Compare commits

..

1 Commits

Author SHA1 Message Date
Sarah Akus
44fa30f91b Revert "Allow serving app from non-root path in dev env" 2023-02-27 10:16:28 -05:00
115 changed files with 639 additions and 2779 deletions

View File

@@ -7,7 +7,6 @@ on:
branches:
- devel
- release_*
- feature_*
jobs:
push:
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
@@ -21,12 +20,6 @@ jobs:
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
run: |
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
env:
OWNER: '${{ github.repository_owner }}'
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
@@ -38,18 +31,15 @@ jobs:
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/} || :
- name: Build images
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
- name: Push image
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
docker push ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/}

View File

@@ -1,5 +1,4 @@
PYTHON ?= python3.9
DOCKER_COMPOSE ?= docker-compose
OFFICIAL ?= no
NODE ?= node
NPM_BIN ?= npm
@@ -204,7 +203,19 @@ uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
uwsgi /etc/tower/uwsgi.ini
uwsgi -b 32768 \
--socket 127.0.0.1:8050 \
--module=awx.wsgi:application \
--home=/var/lib/awx/venv/awx \
--chdir=/awx_devel/ \
--vacuum \
--processes=5 \
--harakiri=120 --master \
--no-orphans \
--max-requests=1000 \
--stats /tmp/stats.socket \
--lazy-apps \
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
awx-autoreload:
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
@@ -411,14 +422,12 @@ ui-release: $(UI_BUILD_FLAG_FILE)
ui-devel: awx/ui/node_modules
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
@if [ -d "/var/lib/awx" ] ; then \
mkdir -p /var/lib/awx/public/static/css; \
mkdir -p /var/lib/awx/public/static/js; \
mkdir -p /var/lib/awx/public/static/media; \
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
fi
mkdir -p /var/lib/awx/public/static/css
mkdir -p /var/lib/awx/public/static/js
mkdir -p /var/lib/awx/public/static/media
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
ui-devel-instrumented: awx/ui/node_modules
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
@@ -500,20 +509,20 @@ docker-compose-sources: .git/hooks/pre-commit
docker-compose: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose-credential-plugins: awx/projects docker-compose-sources
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
docker-compose-test: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
docker-compose-runtest: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
docker-compose-build-swagger: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
SCHEMA_DIFF_BASE_BRANCH ?= devel
detect-schema-change: genschema
@@ -522,7 +531,7 @@ detect-schema-change: genschema
diff -u -b reference-schema.json schema.json
docker-compose-clean: awx/projects
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose-container-group-clean:
@if [ -f "tools/docker-compose-minikube/_sources/minikube" ]; then \
@@ -538,8 +547,10 @@ docker-compose-build:
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
if [ "$(shell docker images | grep awx_devel)" ]; then \
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
fi
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
@@ -548,10 +559,10 @@ docker-refresh: docker-clean docker-compose
## Docker Development Environment with Elastic Stack Connected
docker-compose-elk: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-cluster-elk: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-container-group:
MINIKUBE_CONTAINER_GROUP=true make docker-compose
@@ -573,7 +584,6 @@ VERSION:
PYTHON_VERSION:
@echo "$(PYTHON)" | sed 's:python::'
.PHONY: Dockerfile
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)

View File

@@ -1,4 +1,5 @@
# Django
from django.conf import settings
from django.utils.translation import gettext_lazy as _
# Django REST Framework
@@ -8,7 +9,6 @@ from rest_framework import serializers
from awx.conf import fields, register, register_validate
from awx.api.fields import OAuth2ProviderField
from oauth2_provider.settings import oauth2_settings
from awx.sso.common import is_remote_auth_enabled
register(
@@ -108,8 +108,19 @@ register(
def authentication_validate(serializer, attrs):
if attrs.get('DISABLE_LOCAL_AUTH', False) and not is_remote_auth_enabled():
raise serializers.ValidationError(_("There are no remote authentication systems configured."))
remote_auth_settings = [
'AUTH_LDAP_SERVER_URI',
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'SOCIAL_AUTH_GITHUB_KEY',
'SOCIAL_AUTH_GITHUB_ORG_KEY',
'SOCIAL_AUTH_GITHUB_TEAM_KEY',
'SOCIAL_AUTH_SAML_ENABLED_IDPS',
'RADIUS_SERVER',
'TACACSPLUS_HOST',
]
if attrs.get('DISABLE_LOCAL_AUTH', False):
if not any(getattr(settings, s, None) for s in remote_auth_settings):
raise serializers.ValidationError(_("There are no remote authentication systems configured."))
return attrs

View File

@@ -155,7 +155,7 @@ class FieldLookupBackend(BaseFilterBackend):
'search',
)
# A list of fields that we know can be filtered on without the possibility
# A list of fields that we know can be filtered on without the possiblity
# of introducing duplicates
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
@@ -268,7 +268,7 @@ class FieldLookupBackend(BaseFilterBackend):
continue
# HACK: make `created` available via API for the Django User ORM model
# so it keep compatibility with other objects which exposes the `created` attr.
# so it keep compatiblity with other objects which exposes the `created` attr.
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
key = key.replace('created', 'date_joined')

View File

@@ -674,7 +674,7 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
location = None
created = True
# Retrieve the sub object (whether created or by ID).
# Retrive the sub object (whether created or by ID).
sub = get_object_or_400(self.model, pk=sub_id)
# Verify we have permission to attach.

View File

@@ -60,7 +60,7 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
delattr(renderer_context['view'], '_request')
def get_raw_data_form(self, data, view, method, request):
# Set a flag on the view to indicate to the view/serializer that we're
# Set a flag on the view to indiciate to the view/serializer that we're
# creating a raw data form for the browsable API. Store the original
# request method to determine how to populate the raw data form.
if request.method in {'OPTIONS', 'DELETE'}:

View File

@@ -8,7 +8,6 @@ import logging
import re
from collections import OrderedDict
from datetime import timedelta
from uuid import uuid4
# OAuth2
from oauthlib import oauth2
@@ -109,15 +108,13 @@ from awx.main.utils import (
extract_ansible_vars,
encrypt_dict,
prefetch_page_capabilities,
get_external_account,
truncate_stdout,
get_licenser,
)
from awx.main.utils.filters import SmartFilter
from awx.main.utils.named_url_graph import reset_counters
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.signals import update_inventory_computed_fields
from awx.main.validators import vars_validate_or_raise
@@ -127,8 +124,6 @@ from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, Ver
# AWX Utils
from awx.api.validators import HostnameRegexValidator
from awx.sso.common import get_external_account
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
@@ -160,7 +155,7 @@ SUMMARIZABLE_FK_FIELDS = {
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
@@ -541,7 +536,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjunction with
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
@@ -992,8 +987,23 @@ class UserSerializer(BaseSerializer):
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
if new_password and not self.get_external_account(obj):
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (
getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)
) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
@@ -1857,7 +1867,7 @@ class HostSerializer(BaseSerializerWithVariables):
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
if inventory and Group.objects.filter(name=name, inventory=inventory).exists():
if Group.objects.filter(name=name, inventory=inventory).exists():
raise serializers.ValidationError(_('A Group with that name already exists.'))
return super(HostSerializer, self).validate(attrs)
@@ -1949,130 +1959,6 @@ class GroupSerializer(BaseSerializerWithVariables):
return ret
class BulkHostSerializer(HostSerializer):
class Meta:
model = Host
fields = (
'name',
'enabled',
'instance_id',
'description',
'variables',
)
class BulkHostCreateSerializer(serializers.Serializer):
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(), required=True, write_only=True, help_text=_('Primary Key ID of inventory to add hosts to.')
)
hosts = serializers.ListField(
child=BulkHostSerializer(),
allow_empty=False,
max_length=100000,
write_only=True,
help_text=_('List of hosts to be created, JSON. e.g. [{"name": "example.com"}, {"name": "127.0.0.1"}]'),
)
class Meta:
model = Inventory
fields = ('inventory', 'hosts')
read_only_fields = ()
def raise_if_host_counts_violated(self, attrs):
validation_info = get_licenser().validate()
org = attrs['inventory'].organization
if org:
org_active_count = Host.objects.org_active_count(org.id)
new_hosts = [h['name'] for h in attrs['hosts']]
org_net_new_host_count = len(new_hosts) - Host.objects.filter(inventory__organization=1, name__in=new_hosts).values('name').distinct().count()
if org.max_hosts > 0 and org_active_count + org_net_new_host_count > org.max_hosts:
raise PermissionDenied(
_(
"You have already reached the maximum number of %s hosts"
" allowed for your organization. Contact your System Administrator"
" for assistance." % org.max_hosts
)
)
# Don't check license if it is open license
if validation_info.get('license_type', 'UNLICENSED') == 'open':
return
sys_free_instances = validation_info.get('free_instances', 0)
system_net_new_host_count = Host.objects.exclude(name__in=new_hosts).count()
if system_net_new_host_count > sys_free_instances:
hard_error = validation_info.get('trial', False) is True or validation_info['instance_count'] == 10
if hard_error:
# Only raise permission error for trial, otherwise just log a warning as we do in other inventory import situations
raise PermissionDenied(_("Host count exceeds available instances."))
logger.warning(_("Number of hosts allowed by license has been exceeded."))
def validate(self, attrs):
request = self.context.get('request', None)
inv = attrs['inventory']
if inv.kind != '':
raise serializers.ValidationError(_('Hosts can only be created in manual inventories (not smart or constructed types).'))
if len(attrs['hosts']) > settings.BULK_HOST_MAX_CREATE:
raise serializers.ValidationError(_('Number of hosts exceeds system setting BULK_HOST_MAX_CREATE'))
if request and not request.user.is_superuser:
if request.user not in inv.admin_role:
raise serializers.ValidationError(_(f'Inventory with id {inv.id} not found or lack permissions to add hosts.'))
current_hostnames = set(inv.hosts.values_list('name', flat=True))
new_names = [host['name'] for host in attrs['hosts']]
duplicate_new_names = [n for n in new_names if n in current_hostnames or new_names.count(n) > 1]
if duplicate_new_names:
raise serializers.ValidationError(_(f'Hostnames must be unique in an inventory. Duplicates found: {duplicate_new_names}'))
self.raise_if_host_counts_violated(attrs)
_now = now()
for host in attrs['hosts']:
host['created'] = _now
host['modified'] = _now
host['inventory'] = inv
return attrs
def create(self, validated_data):
# This assumes total_hosts is up to date, and it can get out of date if the inventory computed fields have not been updated lately.
# If we wanted to side step this we could query Hosts.objects.filter(inventory...)
old_total_hosts = validated_data['inventory'].total_hosts
result = [Host(**attrs) for attrs in validated_data['hosts']]
try:
Host.objects.bulk_create(result)
except Exception as e:
raise serializers.ValidationError({"detail": _(f"cannot create host, host creation error {e}")})
new_total_hosts = old_total_hosts + len(result)
request = self.context.get('request', None)
changes = {'total_hosts': [old_total_hosts, new_total_hosts]}
activity_entry = ActivityStream.objects.create(
operation='update',
object1='inventory',
changes=json.dumps(changes),
actor=request.user,
)
activity_entry.inventory.add(validated_data['inventory'])
# This actually updates the cached "total_hosts" field on the inventory
update_inventory_computed_fields.delay(validated_data['inventory'].id)
return_keys = [k for k in BulkHostSerializer().fields.keys()] + ['id']
return_data = {}
host_data = []
for r in result:
item = {k: getattr(r, k) for k in return_keys}
if not settings.IS_TESTING_MODE:
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
# to get it, you have to do an additional query, which is not useful for our tests
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
item['inventory'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
host_data.append(item)
return_data['url'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
return_data['hosts'] = host_data
return return_data
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
@@ -2128,7 +2014,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
'source',
'source_path',
'source_vars',
'scm_branch',
'credential',
'enabled_var',
'enabled_value',
@@ -2293,14 +2178,10 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
else:
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
if redundant_scm_fields:
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
project = get_field_from_model_or_attrs('source_project')
if get_field_from_model_or_attrs('scm_branch') and not project.allow_override:
raise serializers.ValidationError({'scm_branch': _('Project does not allow overriding branch.')})
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
@@ -4116,7 +3997,7 @@ class JobEventSerializer(BaseSerializer):
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return data
# If the view logic says to not truncate (request was to the detail view or a param was used)
# If the view logic says to not trunctate (request was to the detail view or a param was used)
if self.context.get('no_truncate', False):
return data
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
@@ -4147,7 +4028,7 @@ class ProjectUpdateEventSerializer(JobEventSerializer):
# raw SCM URLs in their stdout (which *could* contain passwords)
# attempt to detect and filter HTTP basic auth passwords in the stdout
# of these types of events
if obj.event_data.get('task_action') in ('git', 'svn', 'ansible.builtin.git', 'ansible.builtin.svn'):
if obj.event_data.get('task_action') in ('git', 'svn'):
try:
return json.loads(UriCleaner.remove_sensitive(json.dumps(obj.event_data)))
except Exception:
@@ -4191,7 +4072,7 @@ class AdHocCommandEventSerializer(BaseSerializer):
def to_representation(self, obj):
data = super(AdHocCommandEventSerializer, self).to_representation(obj)
# If the view logic says to not truncate (request was to the detail view or a param was used)
# If the view logic says to not trunctate (request was to the detail view or a param was used)
if self.context.get('no_truncate', False):
return data
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
@@ -4538,271 +4419,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return accepted
class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
# We don't do a PrimaryKeyRelatedField for unified_job_template and others, because that increases the number
# of database queries, rather we take them as integer and later convert them to objects in get_objectified_jobs
unified_job_template = serializers.IntegerField(
required=True, min_value=1, help_text=_('Primary key of the template for this job, can be a job template or inventory source.')
)
inventory = serializers.IntegerField(required=False, min_value=1)
execution_environment = serializers.IntegerField(required=False, min_value=1)
# many-to-many fields
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
def validate(self, attrs):
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
def get_validation_exclusions(self, obj=None):
ret = super().get_validation_exclusions(obj)
ret.extend(['unified_job_template', 'inventory', 'execution_environment'])
return ret
class BulkJobLaunchSerializer(serializers.Serializer):
name = serializers.CharField(default='Bulk Job Launch', max_length=512, write_only=True, required=False, allow_blank=True) # limited by max name of jobs
jobs = BulkJobNodeSerializer(
many=True,
allow_empty=False,
write_only=True,
max_length=100000,
help_text=_('List of jobs to be launched, JSON. e.g. [{"unified_job_template": 7}, {"unified_job_template": 10}]'),
)
description = serializers.CharField(write_only=True, required=False, allow_blank=False)
extra_vars = serializers.JSONField(write_only=True, required=False)
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False,
default=None,
allow_null=True,
write_only=True,
help_text=_('Inherit permissions from this organization. If not provided, a organization the user is a member of will be selected automatically.'),
)
inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True)
limit = serializers.CharField(write_only=True, required=False, allow_blank=False)
scm_branch = serializers.CharField(write_only=True, required=False, allow_blank=False)
skip_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
job_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
class Meta:
model = WorkflowJob
fields = ('name', 'jobs', 'description', 'extra_vars', 'organization', 'inventory', 'limit', 'scm_branch', 'skip_tags', 'job_tags')
read_only_fields = ()
def validate(self, attrs):
request = self.context.get('request', None)
identifiers = set()
if len(attrs['jobs']) > settings.BULK_JOB_MAX_LAUNCH:
raise serializers.ValidationError(_('Number of requested jobs exceeds system setting BULK_JOB_MAX_LAUNCH'))
for node in attrs['jobs']:
if 'identifier' in node:
if node['identifier'] in identifiers:
raise serializers.ValidationError(_(f"Identifier {node['identifier']} not unique"))
identifiers.add(node['identifier'])
else:
node['identifier'] = str(uuid4())
requested_ujts = {j['unified_job_template'] for j in attrs['jobs']}
requested_use_inventories = {job['inventory'] for job in attrs['jobs'] if 'inventory' in job}
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
requested_use_credentials = set()
requested_use_labels = set()
# requested_use_instance_groups = set()
for job in attrs['jobs']:
for cred in job.get('credentials', []):
requested_use_credentials.add(cred)
for label in job.get('labels', []):
requested_use_labels.add(label)
# for instance_group in job.get('instance_groups', []):
# requested_use_instance_groups.add(instance_group)
key_to_obj_map = {
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
}
ujts = {}
for ujt in key_to_obj_map['unified_job_template'].values():
ujts.setdefault(type(ujt), [])
ujts[type(ujt)].append(ujt)
unallowed_types = set(ujts.keys()) - set([JobTemplate, Project, InventorySource, WorkflowJobTemplate])
if unallowed_types:
type_names = ' '.join([cls._meta.verbose_name.title() for cls in unallowed_types])
raise serializers.ValidationError(_("Template types {type_names} not allowed in bulk jobs").format(type_names=type_names))
for model, obj_list in ujts.items():
role_field = 'execute_role' if issubclass(model, (JobTemplate, WorkflowJobTemplate)) else 'update_role'
self.check_list_permission(model, set([obj.id for obj in obj_list]), role_field)
self.check_organization_permission(attrs, request)
if 'inventory' in attrs:
requested_use_inventories.add(attrs['inventory'].id)
self.check_list_permission(Inventory, requested_use_inventories, 'use_role')
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
self.check_list_permission(Label, requested_use_labels)
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
attrs['jobs'] = jobs_object
if 'extra_vars' in attrs:
extra_vars_dict = parse_yaml_or_json(attrs['extra_vars'])
attrs['extra_vars'] = json.dumps(extra_vars_dict)
attrs = super().validate(attrs)
return attrs
def check_list_permission(self, model, id_list, role_field=None):
if not id_list:
return
user = self.context['request'].user
if role_field is None: # implies "read" level permission is required
access_qs = user.get_queryset(model)
else:
access_qs = model.accessible_objects(user, role_field)
not_allowed = set(id_list) - set(access_qs.filter(id__in=id_list).values_list('id', flat=True))
if not_allowed:
raise serializers.ValidationError(
_("{model_name} {not_allowed} not found or you don't have permissions to access it").format(
model_name=model._meta.verbose_name_plural.title(), not_allowed=not_allowed
)
)
def create(self, validated_data):
request = self.context.get('request', None)
launch_user = request.user if request else None
job_node_data = validated_data.pop('jobs')
wfj_deferred_attr_names = ('skip_tags', 'limit', 'job_tags')
wfj_deferred_vals = {}
for item in wfj_deferred_attr_names:
wfj_deferred_vals[item] = validated_data.pop(item, None)
wfj = WorkflowJob.objects.create(**validated_data, is_bulk_job=True, launch_type='manual', created_by=launch_user)
for key, val in wfj_deferred_vals.items():
if val:
setattr(wfj, key, val)
nodes = []
node_m2m_objects = {}
node_m2m_object_types_to_through_model = {
'credentials': WorkflowJobNode.credentials.through,
'labels': WorkflowJobNode.labels.through,
# 'instance_groups': WorkflowJobNode.instance_groups.through,
}
node_deferred_attr_names = (
'limit',
'scm_branch',
'verbosity',
'forks',
'diff_mode',
'job_tags',
'job_type',
'skip_tags',
'job_slice_count',
'timeout',
)
node_deferred_attrs = {}
for node_attrs in job_node_data:
# we need to add any m2m objects after creation via the through model
node_m2m_objects[node_attrs['identifier']] = {}
node_deferred_attrs[node_attrs['identifier']] = {}
for item in node_m2m_object_types_to_through_model.keys():
if item in node_attrs:
node_m2m_objects[node_attrs['identifier']][item] = node_attrs.pop(item)
# Some attributes are not accepted by WorkflowJobNode __init__, we have to set them after
for item in node_deferred_attr_names:
if item in node_attrs:
node_deferred_attrs[node_attrs['identifier']][item] = node_attrs.pop(item)
# Create the node objects
node_obj = WorkflowJobNode(workflow_job=wfj, created=wfj.created, modified=wfj.modified, **node_attrs)
# we can set the deferred attrs now
for item, value in node_deferred_attrs[node_attrs['identifier']].items():
setattr(node_obj, item, value)
# the node is now ready to be bulk created
nodes.append(node_obj)
# we'll need this later when we do the m2m through model bulk create
node_m2m_objects[node_attrs['identifier']]['node'] = node_obj
WorkflowJobNode.objects.bulk_create(nodes)
# Deal with the m2m objects we have to create once the node exists
for field_name, through_model in node_m2m_object_types_to_through_model.items():
through_model_objects = []
for node_identifier in node_m2m_objects.keys():
if field_name in node_m2m_objects[node_identifier] and field_name == 'credentials':
for cred in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(credential=cred, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
for label in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if through_model_objects:
through_model.objects.bulk_create(through_model_objects)
wfj.save()
wfj.signal_start()
return WorkflowJobSerializer().to_representation(wfj)
def check_organization_permission(self, attrs, request):
# validate Organization
# - If the orgs is not set, set it to the org of the launching user
# - If the user is part of multiple orgs, throw a validation error saying user is part of multiple orgs, please provide one
if not request.user.is_superuser:
read_org_qs = Organization.accessible_objects(request.user, 'member_role')
if 'organization' not in attrs or attrs['organization'] == None or attrs['organization'] == '':
read_org_ct = read_org_qs.count()
if read_org_ct == 1:
attrs['organization'] = read_org_qs.first()
elif read_org_ct > 1:
raise serializers.ValidationError("User has permission to multiple Organizations, please set one of them in the request")
else:
raise serializers.ValidationError("User not part of any organization, please assign an organization to assign to the bulk job")
else:
allowed_orgs = set(read_org_qs.values_list('id', flat=True))
requested_org = attrs['organization']
if requested_org.id not in allowed_orgs:
raise ValidationError(_(f"Organization {requested_org.id} not found or you don't have permissions to access it"))
def get_objectified_jobs(self, attrs, key_to_obj_map):
objectified_jobs = []
# This loop is generalized so we should only have to add related items to the key_to_obj_map
for job in attrs['jobs']:
objectified_job = {}
for key, value in job.items():
if key in key_to_obj_map:
if isinstance(value, int):
objectified_job[key] = key_to_obj_map[key][value]
elif isinstance(value, list):
objectified_job[key] = [key_to_obj_map[key][item] for item in value]
else:
objectified_job[key] = value
objectified_jobs.append(objectified_job)
return objectified_jobs
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
@@ -5149,7 +4765,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
),
)
until = serializers.SerializerMethodField(
help_text=_('The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an empty string will be returned'),
help_text=_('The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an emptry string will be returned'),
)
class Meta:
@@ -5471,8 +5087,6 @@ class InstanceGroupSerializer(BaseSerializer):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
res['access_list'] = self.reverse('api:instance_group_access_list', kwargs={'pk': obj.pk})
res['object_roles'] = self.reverse('api:instance_group_object_role_list', kwargs={'pk': obj.pk})
if obj.credential:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})

View File

@@ -7,12 +7,10 @@ the following fields (some fields may not be visible to all users):
* `project_base_dir`: Path on the server where projects and playbooks are \
stored.
* `project_local_paths`: List of directories beneath `project_base_dir` to
use when creating/editing a manual project.
use when creating/editing a project.
* `time_zone`: The configured time zone for the server.
* `license_info`: Information about the current license.
* `version`: Version of Ansible Tower package installed.
* `custom_virtualenvs`: Deprecated venv locations from before migration to
execution environments. Export tooling is in `awx-manage` commands.
* `eula`: The current End-User License Agreement
{% endifmeth %}

View File

@@ -0,0 +1,4 @@
Version 1 of the Ansible Tower REST API.
Make a GET request to this resource to obtain a list of all child resources
available via the API.

View File

@@ -1,41 +0,0 @@
# Bulk Host Create
This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.
Example:
{
"inventory": 1,
"hosts": [
{"name": "example1.com", "variables": "ansible_connection: local"},
{"name": "example2.com"}
]
}
Return data:
{
"url": "/api/v2/inventories/3/hosts/",
"hosts": [
{
"name": "example1.com",
"enabled": true,
"instance_id": "",
"description": "",
"variables": "ansible_connection: local",
"id": 1255,
"url": "/api/v2/hosts/1255/",
"inventory": "/api/v2/inventories/3/"
},
{
"name": "example2.com",
"enabled": true,
"instance_id": "",
"description": "",
"variables": "",
"id": 1256,
"url": "/api/v2/hosts/1256/",
"inventory": "/api/v2/inventories/3/"
}
]
}

View File

@@ -1,13 +0,0 @@
# Bulk Job Launch
This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.
Example:
{
"name": "my bulk job",
"jobs": [
{"unified_job_template": 7, "inventory": 2},
{"unified_job_template": 7, "credentials": [3]}
]
}

View File

@@ -1,3 +0,0 @@
# Bulk Actions
This endpoint lists available bulk action APIs.

View File

@@ -3,7 +3,7 @@ Make a GET request to this resource to retrieve aggregate statistics about inven
Including fetching the number of total hosts tracked by Tower over an amount of time and the current success or
failed status of hosts which have run jobs within an Inventory.
## Parameters and Filtering
## Parmeters and Filtering
The `period` of the data can be adjusted with:
@@ -24,7 +24,7 @@ Data about the number of hosts will be returned in the following format:
Each element contains an epoch timestamp represented in seconds and a numerical value indicating
the number of hosts that exist at a given moment
Data about failed and successful hosts by inventory will be given as:
Data about failed and successfull hosts by inventory will be given as:
{
"sources": [

View File

@@ -2,7 +2,7 @@
Make a GET request to this resource to retrieve aggregate statistics about job runs suitable for graphing.
## Parameters and Filtering
## Parmeters and Filtering
The `period` of the data can be adjusted with:

View File

@@ -0,0 +1,11 @@
# List Fact Scans for a Host Specific Host Scan
Make a GET request to this resource to retrieve system tracking data for a particular scan
You may filter by datetime:
`?datetime=2015-06-01`
and module
`?datetime=2015-06-01&module=ansible`

View File

@@ -0,0 +1,11 @@
# List Fact Scans for a Host by Module and Date
Make a GET request to this resource to retrieve system tracking scans by module and date/time
You may filter scan runs using the `from` and `to` properties:
`?from=2015-06-01%2012:00:00&to=2015-06-03`
You may also filter by module
`?module=packages`

View File

@@ -0,0 +1 @@
# List Red Hat Insights for a Host

View File

@@ -18,7 +18,7 @@ inventory sources:
* `inventory_update`: ID of the inventory update job that was started.
(integer, read-only)
* `project_update`: ID of the project update job that was started if this inventory source is an SCM source.
(integer, read-only, optional)
(interger, read-only, optional)
Note: All manual inventory sources (source="") will be ignored by the update_inventory_sources endpoint. This endpoint will not update inventory sources for Smart Inventories.

View File

@@ -0,0 +1,21 @@
{% ifmeth GET %}
# Determine if a Job can be started
Make a GET request to this resource to determine if the job can be started and
whether any passwords are required to start the job. The response will include
the following fields:
* `can_start`: Flag indicating if this job can be started (boolean, read-only)
* `passwords_needed_to_start`: Password names required to start the job (array,
read-only)
{% endifmeth %}
{% ifmeth POST %}
# Start a Job
Make a POST request to this resource to start the job. If any passwords are
required, they must be passed via POST data.
If successful, the response status code will be 202. If any required passwords
are not provided, a 400 status code will be returned. If the job cannot be
started, a 405 status code will be returned.
{% endifmeth %}

View File

@@ -3,14 +3,7 @@
from django.urls import re_path
from awx.api.views import (
InstanceGroupList,
InstanceGroupDetail,
InstanceGroupUnifiedJobsList,
InstanceGroupInstanceList,
InstanceGroupAccessList,
InstanceGroupObjectRolesList,
)
from awx.api.views import InstanceGroupList, InstanceGroupDetail, InstanceGroupUnifiedJobsList, InstanceGroupInstanceList
urls = [
@@ -18,8 +11,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', InstanceGroupDetail.as_view(), name='instance_group_detail'),
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceGroupUnifiedJobsList.as_view(), name='instance_group_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/instances/$', InstanceGroupInstanceList.as_view(), name='instance_group_instance_list'),
re_path(r'^(?P<pk>[0-9]+)/access_list/$', InstanceGroupAccessList.as_view(), name='instance_group_access_list'),
re_path(r'^(?P<pk>[0-9]+)/object_roles/$', InstanceGroupObjectRolesList.as_view(), name='instance_group_object_role_list'),
]
__all__ = ['urls']

View File

@@ -31,13 +31,6 @@ from awx.api.views import (
ApplicationOAuth2TokenList,
OAuth2ApplicationDetail,
)
from awx.api.views.bulk import (
BulkView,
BulkHostCreateView,
BulkJobLaunchView,
)
from awx.api.views.mesh_visualizer import MeshVisualizer
from awx.api.views.metrics import MetricsView
@@ -143,9 +136,6 @@ v2_urls = [
re_path(r'^activity_stream/', include(activity_stream_urls)),
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
]

View File

@@ -152,7 +152,7 @@ def api_exception_handler(exc, context):
if 'awx.named_url_rewritten' in req.environ and not str(getattr(exc, 'status_code', 0)).startswith('2'):
# if the URL was rewritten, and it's not a 2xx level status code,
# revert the request.path to its original value to avoid leaking
# any context about the existence of resources
# any context about the existance of resources
req.path = req.environ['awx.named_url_rewritten']
if exc.status_code == 403:
exc = NotFound(detail=_('Not found.'))
@@ -172,7 +172,7 @@ class DashboardView(APIView):
user_inventory = get_user_queryset(request.user, models.Inventory)
inventory_with_failed_hosts = user_inventory.filter(hosts_with_active_failures__gt=0)
user_inventory_external = user_inventory.filter(has_inventory_sources=True)
# if there are *zero* inventories, this aggregate query will be None, fall back to 0
# if there are *zero* inventories, this aggregrate query will be None, fall back to 0
failed_inventory = user_inventory.aggregate(Sum('inventory_sources_with_failures'))['inventory_sources_with_failures__sum'] or 0
data['inventories'] = {
'url': reverse('api:inventory_list', request=request),
@@ -466,23 +466,6 @@ class InstanceGroupUnifiedJobsList(SubListAPIView):
relationship = "unifiedjob_set"
class InstanceGroupAccessList(ResourceAccessList):
model = models.User # needs to be User for AccessLists
parent_model = models.InstanceGroup
class InstanceGroupObjectRolesList(SubListAPIView):
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.InstanceGroup
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetachAPIView):
name = _("Instance Group's Instances")
model = models.Instance
@@ -1684,7 +1667,7 @@ class GroupList(ListCreateAPIView):
class EnforceParentRelationshipMixin(object):
"""
Useful when you have a self-referring ManyToManyRelationship.
Useful when you have a self-refering ManyToManyRelationship.
* Tower uses a shallow (2-deep only) url pattern. For example:
When an object hangs off of a parent object you would have the url of the
@@ -2432,7 +2415,7 @@ class JobTemplateSurveySpec(GenericAPIView):
status=status.HTTP_400_BAD_REQUEST,
)
# if it's a multiselect or multiple choice, it must have coices listed
# choices and defaults must come in as strings separated by /n characters.
# choices and defualts must come in as strings seperated by /n characters.
if qtype == 'multiselect' or qtype == 'multiplechoice':
if 'choices' in survey_item:
if isinstance(survey_item['choices'], str):
@@ -3095,9 +3078,7 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
search_fields = ('unified_job_template__name', 'unified_job_template__description')
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).order_by('id')
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobTemplateJobsList(SubListAPIView):
@@ -3191,9 +3172,7 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
search_fields = ('unified_job_template__name', 'unified_job_template__description')
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).order_by('id')
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobCancel(GenericCancelView):
@@ -3451,7 +3430,7 @@ class JobCreateSchedule(RetrieveAPIView):
config = obj.launch_config
# Make up a name for the schedule, guarantee that it is unique
# Make up a name for the schedule, guarentee that it is unique
name = 'Auto-generated schedule from job {}'.format(obj.id)
existing_names = models.Schedule.objects.filter(name__startswith=name).values_list('name', flat=True)
if name in existing_names:
@@ -3642,7 +3621,7 @@ class JobJobEventsChildrenSummary(APIView):
# key is counter of meta events (i.e. verbose), value is uuid of the assigned parent
map_meta_counter_nested_uuid = {}
# collapsible tree view in the UI only makes sense for tree-like
# collapsable tree view in the UI only makes sense for tree-like
# hierarchy. If ansible is ran with a strategy like free or host_pinned, then
# events can be out of sequential order, and no longer follow a tree structure
# E1

View File

@@ -1,69 +0,0 @@
from collections import OrderedDict
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.response import Response
from awx.main.models import UnifiedJob, Host
from awx.api.generics import (
GenericAPIView,
APIView,
)
from awx.api import (
serializers,
renderers,
)
class BulkView(APIView):
permission_classes = [IsAuthenticated]
renderer_classes = [
renderers.BrowsableAPIRenderer,
JSONRenderer,
]
allowed_methods = ['GET', 'OPTIONS']
def get(self, request, format=None):
'''List top level resources'''
data = OrderedDict()
data['host_create'] = reverse('api:bulk_host_create', request=request)
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
return Response(data)
class BulkJobLaunchView(GenericAPIView):
permission_classes = [IsAuthenticated]
model = UnifiedJob
serializer_class = serializers.BulkJobLaunchSerializer
allowed_methods = ['GET', 'POST', 'OPTIONS']
def get(self, request):
data = OrderedDict()
data['detail'] = "Specify a list of unified job templates to launch alongside their launchtime parameters"
return Response(data, status=status.HTTP_200_OK)
def post(self, request):
bulkjob_serializer = serializers.BulkJobLaunchSerializer(data=request.data, context={'request': request})
if bulkjob_serializer.is_valid():
result = bulkjob_serializer.create(bulkjob_serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED)
return Response(bulkjob_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BulkHostCreateView(GenericAPIView):
permission_classes = [IsAuthenticated]
model = Host
serializer_class = serializers.BulkHostCreateSerializer
allowed_methods = ['GET', 'POST', 'OPTIONS']
def get(self, request):
return Response({"detail": "Bulk create hosts with this endpoint"}, status=status.HTTP_200_OK)
def post(self, request):
serializer = serializers.BulkHostCreateSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
result = serializer.create(serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

View File

@@ -121,7 +121,6 @@ class ApiVersionRootView(APIView):
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
data['bulk'] = reverse('api:bulk', request=request)
return Response(data)

View File

@@ -21,7 +21,7 @@ logger = logging.getLogger('awx.conf.fields')
# Use DRF fields to convert/validate settings:
# - to_representation(obj) should convert a native Python object to a primitive
# serializable type. This primitive type will be what is presented in the API
# and stored in the JSON field in the database.
# and stored in the JSON field in the datbase.
# - to_internal_value(data) should convert the primitive type back into the
# appropriate Python type to be used in settings.

View File

@@ -180,7 +180,7 @@ class SettingLoggingTest(GenericAPIView):
if not port:
return Response({'error': 'Port required for ' + protocol}, status=status.HTTP_400_BAD_REQUEST)
else:
# if http/https by this point, domain is reachable
# if http/https by this point, domain is reacheable
return Response(status=status.HTTP_202_ACCEPTED)
if protocol == 'udp':

View File

@@ -1972,7 +1972,7 @@ msgid ""
"HTTP headers and meta keys to search to determine remote host name or IP. "
"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if "
"behind a reverse proxy. See the \"Proxy Support\" section of the "
"Administrator guide for more details."
"Adminstrator guide for more details."
msgstr ""
#: awx/main/conf.py:85
@@ -2457,7 +2457,7 @@ msgid ""
msgstr ""
#: awx/main/conf.py:631
msgid "Maximum disk persistence for external log aggregation (in GB)"
msgid "Maximum disk persistance for external log aggregation (in GB)"
msgstr ""
#: awx/main/conf.py:633
@@ -2548,7 +2548,7 @@ msgid "Enable"
msgstr ""
#: awx/main/constants.py:27
msgid "Does"
msgid "Doas"
msgstr ""
#: awx/main/constants.py:28
@@ -4801,7 +4801,7 @@ msgstr ""
#: awx/main/models/workflow.py:251
msgid ""
"An identifier corresponding to the workflow job template node that this node "
"An identifier coresponding to the workflow job template node that this node "
"was created from."
msgstr ""
@@ -5521,7 +5521,7 @@ msgstr ""
#: awx/sso/conf.py:606
msgid ""
"Extra arguments for Google OAuth2 login. You can restrict it to only allow a "
"single domain to authenticate, even if the user is logged in with multiple "
"single domain to authenticate, even if the user is logged in with multple "
"Google accounts. Refer to the documentation for more detail."
msgstr ""
@@ -5905,7 +5905,7 @@ msgstr ""
#: awx/sso/conf.py:1290
msgid ""
"Create a key pair to use as a service provider (SP) and include the "
"Create a keypair to use as a service provider (SP) and include the "
"certificate content here."
msgstr ""
@@ -5915,7 +5915,7 @@ msgstr ""
#: awx/sso/conf.py:1302
msgid ""
"Create a key pair to use as a service provider (SP) and include the private "
"Create a keypair to use as a service provider (SP) and include the private "
"key content here."
msgstr ""

View File

@@ -1971,7 +1971,7 @@ msgid ""
"HTTP headers and meta keys to search to determine remote host name or IP. "
"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if "
"behind a reverse proxy. See the \"Proxy Support\" section of the "
"Administrator guide for more details."
"Adminstrator guide for more details."
msgstr "Los encabezados HTTP y las llaves de activación para buscar y determinar el nombre de host remoto o IP. Añada elementos adicionales a esta lista, como \"HTTP_X_FORWARDED_FOR\", si está detrás de un proxy inverso. Consulte la sección \"Soporte de proxy\" de la guía del adminstrador para obtener más información."
#: awx/main/conf.py:85
@@ -4804,7 +4804,7 @@ msgstr "Indica que un trabajo no se creará cuando es sea True. La semántica de
#: awx/main/models/workflow.py:251
msgid ""
"An identifier corresponding to the workflow job template node that this node "
"An identifier coresponding to the workflow job template node that this node "
"was created from."
msgstr "Un identificador que corresponde al nodo de plantilla de tarea del flujo de trabajo a partir del cual se creó este nodo."
@@ -5526,7 +5526,7 @@ msgstr "Argumentos adicionales para Google OAuth2"
#: awx/sso/conf.py:606
msgid ""
"Extra arguments for Google OAuth2 login. You can restrict it to only allow a "
"single domain to authenticate, even if the user is logged in with multiple "
"single domain to authenticate, even if the user is logged in with multple "
"Google accounts. Refer to the documentation for more detail."
msgstr "Argumentos adicionales para el inicio de sesión en Google OAuth2. Puede limitarlo para permitir la autenticación de un solo dominio, incluso si el usuario ha iniciado sesión con varias cuentas de Google. Consulte la documentación para obtener información detallada."
@@ -5910,7 +5910,7 @@ msgstr "Certificado público del proveedor de servicio SAML"
#: awx/sso/conf.py:1290
msgid ""
"Create a key pair to use as a service provider (SP) and include the "
"Create a keypair to use as a service provider (SP) and include the "
"certificate content here."
msgstr "Crear un par de claves para usar como proveedor de servicio (SP) e incluir el contenido del certificado aquí."
@@ -5920,7 +5920,7 @@ msgstr "Clave privada del proveedor de servicio SAML"
#: awx/sso/conf.py:1302
msgid ""
"Create a key pair to use as a service provider (SP) and include the private "
"Create a keypair to use as a service provider (SP) and include the private "
"key content here."
msgstr "Crear un par de claves para usar como proveedor de servicio (SP) e incluir el contenido de la clave privada aquí."

View File

@@ -588,39 +588,17 @@ class InstanceAccess(BaseAccess):
class InstanceGroupAccess(BaseAccess):
"""
I can see Instance Groups when I am:
- a superuser(system administrator)
- at least read_role on the instance group
I can edit Instance Groups when I am:
- a superuser
- admin role on the Instance group
I can add/delete Instance Groups:
- a superuser(system administrator)
I can use Instance Groups when I have:
- use_role on the instance group
"""
model = InstanceGroup
prefetch_related = ('instances',)
def filtered_queryset(self):
return self.model.accessible_objects(self.user, 'read_role')
@check_superuser
def can_use(self, obj):
return self.user in obj.use_role
return InstanceGroup.objects.filter(organization__in=Organization.accessible_pk_qs(self.user, 'admin_role')).distinct()
def can_add(self, data):
return self.user.is_superuser
@check_superuser
def can_change(self, obj, data):
return self.can_admin(obj)
@check_superuser
def can_admin(self, obj):
return self.user in obj.admin_role
return self.user.is_superuser
def can_delete(self, obj):
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
@@ -867,7 +845,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
if relationship == "instance_groups":
if self.user in obj.admin_role and self.user in sub_obj.use_role:
if self.user.is_superuser:
return True
return False
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
@@ -956,7 +934,7 @@ class InventoryAccess(BaseAccess):
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups":
if self.user in sub_obj.use_role and self.user in obj.admin_role:
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role:
return True
return False
return super(InventoryAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
@@ -1693,12 +1671,11 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
return self.user.is_superuser or self.user in obj.admin_role
@check_superuser
# object here is the job template. sub_object here is what is being attached
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == "instance_groups":
if not obj.organization:
return False
return self.user in sub_obj.use_role and self.user in obj.admin_role
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser
@@ -1875,6 +1852,8 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
def _related_filtered_queryset(self, cls):
if cls is Label:
return LabelAccess(self.user).filtered_queryset()
elif cls is InstanceGroup:
return InstanceGroupAccess(self.user).filtered_queryset()
else:
return cls._accessible_pk_qs(cls, self.user, 'use_role')
@@ -1886,7 +1865,6 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
@check_superuser
def can_add(self, data, template=None):
# WARNING: duplicated with BulkJobLaunchSerializer, check when changing permission levels
# This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this
if 'reference_obj' in data:
@@ -2019,16 +1997,7 @@ class WorkflowJobNodeAccess(BaseAccess):
)
def filtered_queryset(self):
return self.model.objects.filter(
Q(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(workflow_job__organization__in=Organization.objects.filter(Q(admin_role__members=self.user)))
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.workflow_job.is_bulk_job and obj.workflow_job.created_by_id == self.user.id:
return True
return super().can_read(obj)
return self.model.objects.filter(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
@check_superuser
def can_add(self, data):
@@ -2154,16 +2123,7 @@ class WorkflowJobAccess(BaseAccess):
)
def filtered_queryset(self):
return WorkflowJob.objects.filter(
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.is_bulk_job and obj.created_by_id == self.user.id:
return True
return super().can_read(obj)
return WorkflowJob.objects.filter(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
def can_add(self, data):
# Old add-start system for launching jobs is being depreciated, and

View File

@@ -233,13 +233,11 @@ def projects_by_scm_type(since, **kwargs):
return counts
@register('instance_info', '1.3', description=_('Cluster topology and capacity'))
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
def instance_info(since, include_hostnames=False, **kwargs):
info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
tm_models = TaskManagerModels.init_with_consumed_capacity(
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
)
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
for tm_instance in tm_models.instances.instances_by_hostname.values():
instance = tm_instance.obj
instance_info = {

View File

@@ -282,16 +282,6 @@ register(
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
)
register(
'AWX_RUNNER_KEEPALIVE_SECONDS',
field_class=fields.IntegerField,
label=_('K8S Ansible Runner Keep-Alive Message Interval'),
help_text=_('Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open.'),
category=_('Jobs'),
category_slug='jobs',
placeholder=240, # intended to be under common 5 minute idle timeout
)
register(
'GALAXY_TASK_ENV',
field_class=fields.KeyValueField,
@@ -775,26 +765,6 @@ register(
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
)
register(
'BULK_JOB_MAX_LAUNCH',
field_class=fields.IntegerField,
default=100,
label=_('Max jobs to allow bulk jobs to launch'),
help_text=_('Max jobs to allow bulk jobs to launch'),
category=_('Bulk Actions'),
category_slug='bulk',
)
register(
'BULK_HOST_MAX_CREATE',
field_class=fields.IntegerField,
default=100,
label=_('Max number of hosts to allow to be created in a single bulk action'),
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
category=_('Bulk Actions'),
category_slug='bulk',
)
def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -70,7 +70,7 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None)
verify = kwargs['verify']
webservice_id = kwargs.get('webservice_id', '')
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id']
object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format']

View File

@@ -49,10 +49,7 @@ def tss_backend(**kwargs):
secret_dict = secret_server.get_secret(kwargs['secret_id'])
secret = ServerSecret(**secret_dict)
if isinstance(secret.fields[kwargs['secret_field']].value, str) == False:
return secret.fields[kwargs['secret_field']].value.text
else:
return secret.fields[kwargs['secret_field']].value
return secret.fields[kwargs['secret_field']].value
tss_plugin = CredentialPlugin(

View File

@@ -70,7 +70,7 @@ def reap_waiting(instance=None, status='failed', job_explanation=None, grace_per
reap_job(j, status, job_explanation=job_explanation)
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None, ref_time=None):
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
"""
Reap all jobs in running for this instance.
"""
@@ -80,7 +80,7 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
hostname = instance.hostname
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
Q(status='running', modified__lte=ref_time) & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
)
if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)

View File

@@ -1,143 +0,0 @@
import time
from urllib.parse import urljoin
from argparse import ArgumentTypeError
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils.timezone import now
from awx.main.models import Instance, UnifiedJob
class AWXInstance:
def __init__(self, **filter):
self.filter = filter
self.get_instance()
def get_instance(self):
filter = self.filter if self.filter is not None else dict(hostname=settings.CLUSTER_HOST_ID)
qs = Instance.objects.filter(**filter)
if not qs.exists():
raise ValueError(f"No AWX instance found with {filter} parameters")
self.instance = qs.first()
def disable(self):
if self.instance.enabled:
self.instance.enabled = False
self.instance.save()
return True
def enable(self):
if not self.instance.enabled:
self.instance.enabled = True
self.instance.save()
return True
def jobs(self):
return UnifiedJob.objects.filter(
Q(controller_node=self.instance.hostname) | Q(execution_node=self.instance.hostname), status__in=("running", "waiting")
)
def jobs_pretty(self):
jobs = []
for j in self.jobs():
job_started = j.started if j.started else now()
# similar calculation of `elapsed` as the corresponding serializer
# does
td = now() - job_started
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / (10**6 * 1.0)
elapsed = float(elapsed)
details = dict(
name=j.name,
url=j.get_ui_url(),
elapsed=elapsed,
)
jobs.append(details)
jobs = sorted(jobs, reverse=True, key=lambda j: j["elapsed"])
return ", ".join([f"[\"{j['name']}\"]({j['url']})" for j in jobs])
def instance_pretty(self):
instance = (
self.instance.hostname,
urljoin(settings.TOWER_URL_BASE, f"/#/instances/{self.instance.pk}/details"),
)
return f"[\"{instance[0]}\"]({instance[1]})"
class Command(BaseCommand):
help = "Disable instance, optionally waiting for all its managed jobs to finish."
@staticmethod
def ge_1(arg):
if arg == "inf":
return float("inf")
int_arg = int(arg)
if int_arg < 1:
raise ArgumentTypeError(f"The value must be a positive number >= 1. Provided: \"{arg}\"")
return int_arg
def add_arguments(self, parser):
filter_group = parser.add_mutually_exclusive_group()
filter_group.add_argument(
"--hostname",
type=str,
default=settings.CLUSTER_HOST_ID,
help=f"{Instance.hostname.field.help_text} Defaults to the hostname of the machine where the Python interpreter is currently executing".strip(),
)
filter_group.add_argument("--id", type=self.ge_1, help=Instance.id.field.help_text)
parser.add_argument(
"--wait",
action="store_true",
help="Wait for jobs managed by the instance to finish. With default retry arguments waits ~1h",
)
parser.add_argument(
"--retry",
type=self.ge_1,
default=120,
help="Number of retries when waiting for jobs to finish. Default: 120. Also accepts \"inf\" to wait indefinitely",
)
parser.add_argument(
"--retry_sleep",
type=self.ge_1,
default=30,
help="Number of seconds to sleep before consequtive retries when waiting. Default: 30",
)
def handle(self, *args, **options):
try:
filter = dict(id=options["id"]) if options["id"] is not None else dict(hostname=options["hostname"])
instance = AWXInstance(**filter)
except ValueError as e:
raise CommandError(e)
if instance.disable():
self.stdout.write(self.style.SUCCESS(f"Instance {instance.instance_pretty()} has been disabled"))
else:
self.stdout.write(f"Instance {instance.instance_pretty()} has already been disabled")
if not options["wait"]:
return
rc = 1
while instance.jobs().count() > 0:
if rc < options["retry"]:
self.stdout.write(
f"{rc}/{options['retry']}: Waiting {options['retry_sleep']}s before the next attempt to see if the following instance' managed jobs have finished: {instance.jobs_pretty()}"
)
rc += 1
time.sleep(options["retry_sleep"])
else:
raise CommandError(
f"{rc}/{options['retry']}: No more retry attempts left, but the instance still has associated managed jobs: {instance.jobs_pretty()}"
)
else:
self.stdout.write(self.style.SUCCESS("Done waiting for instance' managed jobs to finish!"))

View File

@@ -851,7 +851,6 @@ class Command(BaseCommand):
logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))
# Create ad-hoc inventory source and inventory update objects
ee = get_default_execution_environment()
with ignore_inventory_computed_fields():
source = Command.get_source_absolute_path(raw_source)
@@ -861,22 +860,14 @@ class Command(BaseCommand):
source_path=os.path.abspath(source),
overwrite=bool(options.get('overwrite', False)),
overwrite_vars=bool(options.get('overwrite_vars', False)),
execution_environment=ee,
)
inventory_update = inventory_source.create_inventory_update(
_eager_fields=dict(
status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd(), execution_environment=ee
)
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
)
try:
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
logger.debug('Finished loading from source: %s', source)
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
except SystemExit:
logger.debug("Error occurred while running ansible-inventory")
inventory_update.cancel()
sys.exit(1)
logger.debug('Finished loading from source: %s', source)
status, tb, exc = 'error', '', None
try:

View File

@@ -1,17 +0,0 @@
# Generated by Django 3.2.16 on 2023-01-05 15:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0174_ensure_org_ee_admin_roles'),
]
operations = [
migrations.AddField(
model_name='workflowjob',
name='is_bulk_job',
field=models.BooleanField(default=False),
),
]

View File

@@ -1,32 +0,0 @@
# Generated by Django 3.2.16 on 2023-03-03 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0175_workflowjob_is_bulk_job'),
]
operations = [
migrations.AddField(
model_name='inventorysource',
name='scm_branch',
field=models.CharField(
blank=True,
default='',
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
max_length=1024,
),
),
migrations.AddField(
model_name='inventoryupdate',
name='scm_branch',
field=models.CharField(
blank=True,
default='',
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
max_length=1024,
),
),
]

View File

@@ -1,48 +0,0 @@
# Generated by Django 3.2.16 on 2023-02-17 02:45
import awx.main.fields
from django.db import migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0176_inventorysource_scm_branch'),
]
operations = [
migrations.AddField(
model_name='instancegroup',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(
editable=False,
null='True',
on_delete=django.db.models.deletion.CASCADE,
parent_role=['singleton:system_administrator'],
related_name='+',
to='main.role',
),
preserve_default='True',
),
migrations.AddField(
model_name='instancegroup',
name='read_role',
field=awx.main.fields.ImplicitRoleField(
editable=False,
null='True',
on_delete=django.db.models.deletion.CASCADE,
parent_role=['singleton:system_auditor', 'use_role', 'admin_role'],
related_name='+',
to='main.role',
),
preserve_default='True',
),
migrations.AddField(
model_name='instancegroup',
name='use_role',
field=awx.main.fields.ImplicitRoleField(
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role'], related_name='+', to='main.role'
),
preserve_default='True',
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.16 on 2023-02-17 02:45
from django.db import migrations
from awx.main.migrations import _rbac as rbac
from awx.main.migrations import _migration_utils as migration_utils
from awx.main.migrations import _OrgAdmin_to_use_ig as oamigrate
from awx.main.migrations import ActivityStreamDisabledMigration
class Migration(ActivityStreamDisabledMigration):
dependencies = [
('main', '0177_instance_group_role_addition'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(rbac.create_roles),
migrations.RunPython(oamigrate.migrate_org_admin_to_use),
]

View File

@@ -1,20 +0,0 @@
import logging
from awx.main.models import Organization
logger = logging.getLogger('awx.main.migrations')
def migrate_org_admin_to_use(apps, schema_editor):
logger.info('Initiated migration from Org admin to use role')
roles_added = 0
for org in Organization.objects.prefetch_related('admin_role__members').iterator():
igs = list(org.instance_groups.all())
if not igs:
continue
for admin in org.admin_role.members.filter(is_superuser=False):
for ig in igs:
ig.use_role.members.add(admin)
roles_added += 1
if roles_added:
logger.info(f'Migration converted {roles_added} from organization admin to use role')

View File

@@ -29,7 +29,6 @@ def create_roles(apps, schema_editor):
'Project',
'Credential',
'JobTemplate',
'InstanceGroup',
]
]

View File

@@ -17,20 +17,15 @@ from django.db.models import Sum
import redis
from solo.models import SingletonModel
# AWX
from awx import __version__ as awx_application_version
from awx.api.versioning import reverse
from awx.main.fields import JSONBlob, ImplicitRoleField
from awx.main.fields import JSONBlob
from awx.main.managers import InstanceManager, UUID_DEFAULT
from awx.main.constants import JOB_FOLDER_PREFIX
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.models.unified_jobs import UnifiedJob
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
from awx.main.models.mixins import RelatedJobsMixin
# ansible-runner
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
@@ -357,7 +352,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin, ResourceMixin):
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
"""A model representing a Queue/Group of AWX Instances."""
name = models.CharField(max_length=250, unique=True)
@@ -384,24 +379,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin, ResourceMi
default='',
)
)
admin_role = ImplicitRoleField(
parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
]
)
use_role = ImplicitRoleField(
parent_role=[
'admin_role',
]
)
read_role = ImplicitRoleField(
parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
'use_role',
'admin_role',
]
)
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))

View File

@@ -872,12 +872,6 @@ class InventorySourceOptions(BaseModel):
default='',
help_text=_('Inventory source variables in YAML or JSON format.'),
)
scm_branch = models.CharField(
max_length=1024,
default='',
blank=True,
help_text=_('Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.'),
)
enabled_var = models.TextField(
blank=True,
default='',

View File

@@ -14,7 +14,7 @@ from oauth2_provider.models import AbstractApplication, AbstractAccessToken
from oauth2_provider.generators import generate_client_secret
from oauthlib import oauth2
from awx.sso.common import get_external_account
from awx.main.utils import get_external_account
from awx.main.fields import OAuth2ClientSecretField

View File

@@ -650,7 +650,6 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
help_text=_("If automatically created for a sliced job run, the job template " "the workflow job was created from."),
)
is_sliced_job = models.BooleanField(default=False)
is_bulk_job = models.BooleanField(default=False)
def _set_default_dependencies_processed(self):
self.dependencies_processed = True

View File

@@ -27,8 +27,8 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
websocket_urlpatterns = [
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
re_path(r'websocket/', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),
]
application = AWXProtocolTypeRouter(

View File

@@ -85,8 +85,6 @@ class RunnerCallback:
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if event_data.get('event') == 'keepalive':
return
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
@@ -118,7 +116,7 @@ class RunnerCallback:
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn', 'ansible.builtin.git', 'ansible.builtin.svn'):
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
@@ -221,7 +219,7 @@ class RunnerCallbackForProjectUpdate(RunnerCallback):
def event_handler(self, event_data):
super_return_value = super(RunnerCallbackForProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') in ('set_fact', 'ansible.builtin.set_fact'):
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']

View File

@@ -759,7 +759,7 @@ class SourceControlMixin(BaseTask):
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
self.acquire_lock(project, self.instance.id)
is_commit = False
try:
original_branch = None
failed_reason = project.get_reason_if_failed()
@@ -771,7 +771,6 @@ class SourceControlMixin(BaseTask):
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
is_commit = True
original_branch = git_repo.head.commit
else:
original_branch = git_repo.active_branch
@@ -783,11 +782,7 @@ class SourceControlMixin(BaseTask):
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
if is_commit:
git_repo.head.set_commit(original_branch)
git_repo.head.reset(index=True, working_tree=True)
else:
original_branch.checkout()
original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
@@ -1586,7 +1581,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
if inventory_update.source == 'scm':
if not source_project:
raise RuntimeError('Could not find project to run SCM inventory update from.')
self.sync_and_copy(source_project, private_data_dir, scm_branch=inventory_update.inventory_source.scm_branch)
self.sync_and_copy(source_project, private_data_dir)
else:
# If source is not SCM make an empty project directory, content is built inside inventory folder
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)

View File

@@ -526,10 +526,6 @@ class AWXReceptorJob:
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
if settings.AWX_RUNNER_KEEPALIVE_SECONDS:
pod_spec['spec']['containers'][0].setdefault('env', [])
pod_spec['spec']['containers'][0]['env'].append({'name': 'ANSIBLE_RUNNER_KEEPALIVE_SECONDS', 'value': str(settings.AWX_RUNNER_KEEPALIVE_SECONDS)})
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:

View File

@@ -581,7 +581,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
active_task_ids = []
for task_list in worker_tasks.values():
active_task_ids.extend(task_list)
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids)
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))

View File

@@ -14,7 +14,7 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
@pytest.mark.django_db
def test_subclass_types():
def test_subclass_types(rando):
assert set(UnifiedJobTemplate._submodels_with_roles()) == set(
[
ContentType.objects.get_for_model(JobTemplate).id,

View File

@@ -1,311 +0,0 @@
import pytest
from uuid import uuid4
from awx.api.versioning import reverse
from awx.main.models.jobs import JobTemplate
from awx.main.models import Organization, Inventory, WorkflowJob, ExecutionEnvironment, Host
from awx.main.scheduler import TaskManager
@pytest.mark.django_db
@pytest.mark.parametrize('num_hosts, num_queries', [(1, 15), (10, 15)])
def test_bulk_host_create_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries):
'''
If I am a...
org admin
inventory admin at org level
admin of a particular inventory
superuser
Bulk Host create should take under a certain number of queries
'''
inventory.organization = organization
inventory_admin = user('inventory_admin', False)
org_admin = user('org_admin', False)
org_inv_admin = user('org_admin', False)
superuser = user('admin', True)
for u in [org_admin, org_inv_admin, inventory_admin]:
organization.member_role.members.add(u)
organization.admin_role.members.add(org_admin)
organization.inventory_admin_role.members.add(org_inv_admin)
inventory.admin_role.members.add(inventory_admin)
for u in [org_admin, inventory_admin, org_inv_admin, superuser]:
hosts = [{'name': uuid4()} for i in range(num_hosts)]
with django_assert_max_num_queries(num_queries):
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, u, expect=201).data
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {u}"
@pytest.mark.django_db
def test_bulk_host_create_rbac(organization, inventory, post, get, user):
'''
If I am a...
org admin
inventory admin at org level
admin of a particular invenotry
... I can bulk add hosts
Everyone else cannot
'''
inventory.organization = organization
inventory_admin = user('inventory_admin', False)
org_admin = user('org_admin', False)
org_inv_admin = user('org_admin', False)
auditor = user('auditor', False)
member = user('member', False)
use_inv_member = user('member', False)
for u in [org_admin, org_inv_admin, auditor, member, inventory_admin, use_inv_member]:
organization.member_role.members.add(u)
organization.admin_role.members.add(org_admin)
organization.inventory_admin_role.members.add(org_inv_admin)
inventory.admin_role.members.add(inventory_admin)
inventory.use_role.members.add(use_inv_member)
organization.auditor_role.members.add(auditor)
for indx, u in enumerate([org_admin, inventory_admin, org_inv_admin]):
bulk_host_create_response = post(
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar-{indx}'}]}, u, expect=201
).data
assert len(bulk_host_create_response['hosts']) == 1, f"unexpected number of hosts created for user {u}"
assert Host.objects.filter(inventory__id=inventory.id)[0].name == 'foobar-0'
for indx, u in enumerate([member, auditor, use_inv_member]):
bulk_host_create_response = post(
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar2-{indx}'}]}, u, expect=400
).data
assert bulk_host_create_response['__all__'][0] == f'Inventory with id {inventory.id} not found or lack permissions to add hosts.'
@pytest.mark.django_db
@pytest.mark.parametrize('num_jobs, num_queries', [(1, 25), (10, 25)])
def test_bulk_job_launch_queries(job_template, organization, inventory, project, post, get, user, num_jobs, num_queries, django_assert_max_num_queries):
'''
if I have access to the unified job template
... I can launch the bulk job
... and the number of queries should NOT scale with the number of jobs
'''
normal_user = user('normal_user', False)
org_admin = user('org_admin', False)
jt = JobTemplate.objects.create(name='my-jt', ask_inventory_on_launch=True, project=project, playbook='helloworld.yml')
organization.member_role.members.add(normal_user)
organization.admin_role.members.add(org_admin)
jt.execute_role.members.add(normal_user)
inventory.use_role.members.add(normal_user)
jt.save()
inventory.save()
jobs = [{'unified_job_template': jt.id, 'inventory': inventory.id} for _ in range(num_jobs)]
# This is not working, we need to figure that out if we want to include tests for more jobs
# with mock.patch('awx.api.serializers.settings.BULK_JOB_MAX_LAUNCH', num_jobs + 1):
with django_assert_max_num_queries(num_queries):
bulk_job_launch_response = post(reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': jobs}, normal_user, expect=201).data
# Run task manager so the workflow job nodes actually spawn
TaskManager().schedule()
for u in (org_admin, normal_user):
bulk_job = get(bulk_job_launch_response['url'], u, expect=200).data
assert organization.id == bulk_job['summary_fields']['organization']['id']
resp = get(bulk_job_launch_response['related']['workflow_nodes'], u)
assert resp.data['count'] == num_jobs
for item in resp.data['results']:
assert item["unified_job_template"] == jt.id
assert item["inventory"] == inventory.id
@pytest.mark.django_db
def test_bulk_job_launch_no_access_to_job_template(job_template, organization, inventory, project, credential, post, get, user):
'''
if I don't have access to the unified job templare
... I can't launch the bulk job
'''
normal_user = user('normal_user', False)
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
jt.save()
organization.member_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
).data
assert bulk_job_launch_response['__all__'][0] == f'Job Templates {{{jt.id}}} not found or you don\'t have permissions to access it'
@pytest.mark.django_db
def test_bulk_job_launch_no_org_assigned(job_template, organization, inventory, project, credential, post, get, user):
'''
if I am not part of any organization...
... I can't launch the bulk job
'''
normal_user = user('normal_user', False)
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
jt.save()
jt.execute_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
).data
assert bulk_job_launch_response['__all__'][0] == 'User not part of any organization, please assign an organization to assign to the bulk job'
@pytest.mark.django_db
def test_bulk_job_launch_multiple_org_assigned(job_template, organization, inventory, project, credential, post, get, user):
'''
if I am part of multiple organization...
and if I do not provide org at the launch time
... I can't launch the bulk job
'''
normal_user = user('normal_user', False)
org1 = Organization.objects.create(name='foo1')
org2 = Organization.objects.create(name='foo2')
org1.member_role.members.add(normal_user)
org2.member_role.members.add(normal_user)
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
jt.save()
jt.execute_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
).data
assert bulk_job_launch_response['__all__'][0] == 'User has permission to multiple Organizations, please set one of them in the request'
@pytest.mark.django_db
def test_bulk_job_launch_specific_org(job_template, organization, inventory, project, credential, post, get, user):
'''
if I am part of multiple organization...
and if I provide org at the launch time
... I can launch the bulk job
'''
normal_user = user('normal_user', False)
org1 = Organization.objects.create(name='foo1')
org2 = Organization.objects.create(name='foo2')
org1.member_role.members.add(normal_user)
org2.member_role.members.add(normal_user)
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
jt.save()
jt.execute_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}], 'organization': org1.id}, normal_user, expect=201
).data
bulk_job_id = bulk_job_launch_response['id']
bulk_job_obj = WorkflowJob.objects.filter(id=bulk_job_id, is_bulk_job=True).first()
assert org1.id == bulk_job_obj.organization.id
@pytest.mark.django_db
def test_bulk_job_launch_inventory_no_access(job_template, organization, inventory, project, credential, post, get, user):
'''
if I don't have access to the inventory...
and if I try to use it at the launch time
... I can't launch the bulk job
'''
normal_user = user('normal_user', False)
org1 = Organization.objects.create(name='foo1')
org2 = Organization.objects.create(name='foo2')
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
jt.save()
org1.member_role.members.add(normal_user)
inv = Inventory.objects.create(name='inv1', organization=org2)
jt.execute_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id, 'inventory': inv.id}]}, normal_user, expect=400
).data
assert bulk_job_launch_response['__all__'][0] == f'Inventories {{{inv.id}}} not found or you don\'t have permissions to access it'
@pytest.mark.django_db
def test_bulk_job_inventory_prompt(job_template, organization, inventory, project, credential, post, get, user):
'''
Job template has an inventory set as prompt_on_launch
and if I provide the inventory as a parameter in bulk job
... job uses that inventory
'''
normal_user = user('normal_user', False)
org1 = Organization.objects.create(name='foo1')
jt = JobTemplate.objects.create(name='my-jt', ask_inventory_on_launch=True, project=project, playbook='helloworld.yml')
jt.save()
org1.member_role.members.add(normal_user)
inv = Inventory.objects.create(name='inv1', organization=org1)
jt.execute_role.members.add(normal_user)
inv.use_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id, 'inventory': inv.id}]}, normal_user, expect=201
).data
bulk_job_id = bulk_job_launch_response['id']
node = WorkflowJob.objects.get(id=bulk_job_id).workflow_job_nodes.all().order_by('created')
assert inv.id == node[0].inventory.id
@pytest.mark.django_db
def test_bulk_job_set_all_prompt(job_template, organization, inventory, project, credentialtype_ssh, post, get, user):
'''
Job template has many fields set as prompt_on_launch
and if I provide all those fields as a parameter in bulk job
... job uses them
'''
normal_user = user('normal_user', False)
jt = JobTemplate.objects.create(
name='my-jt',
ask_inventory_on_launch=True,
ask_diff_mode_on_launch=True,
ask_job_type_on_launch=True,
ask_verbosity_on_launch=True,
ask_execution_environment_on_launch=True,
ask_forks_on_launch=True,
ask_job_slice_count_on_launch=True,
ask_timeout_on_launch=True,
ask_variables_on_launch=True,
ask_scm_branch_on_launch=True,
ask_limit_on_launch=True,
ask_skip_tags_on_launch=True,
ask_tags_on_launch=True,
project=project,
playbook='helloworld.yml',
)
jt.save()
organization.member_role.members.add(normal_user)
inv = Inventory.objects.create(name='inv1', organization=organization)
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
jt.execute_role.members.add(normal_user)
inv.use_role.members.add(normal_user)
bulk_job_launch_response = post(
reverse('api:bulk_job_launch'),
{
'name': 'Bulk Job Launch',
'jobs': [
{
'unified_job_template': jt.id,
'inventory': inv.id,
'diff_mode': True,
'job_type': 'check',
'verbosity': 3,
'execution_environment': ee.id,
'forks': 1,
'job_slice_count': 1,
'timeout': 200,
'extra_data': {'prompted_key': 'prompted_val'},
'scm_branch': 'non_dev',
'limit': 'kansas',
'skip_tags': 'foobar',
'job_tags': 'untagged',
}
],
},
normal_user,
expect=201,
).data
bulk_job_id = bulk_job_launch_response['id']
node = WorkflowJob.objects.get(id=bulk_job_id).workflow_job_nodes.all().order_by('created')
assert node[0].inventory.id == inv.id
assert node[0].diff_mode == True
assert node[0].job_type == 'check'
assert node[0].verbosity == 3
assert node[0].execution_environment.id == ee.id
assert node[0].forks == 1
assert node[0].job_slice_count == 1
assert node[0].timeout == 200
assert node[0].extra_data == {'prompted_key': 'prompted_val'}
assert node[0].scm_branch == 'non_dev'
assert node[0].limit == 'kansas'
assert node[0].skip_tags == 'foobar'
assert node[0].job_tags == 'untagged'

View File

@@ -235,7 +235,6 @@ class TestAutoScaling:
assert len(self.pool) == 10
assert self.pool.workers[0].messages_sent == 2
@pytest.mark.timeout(20)
def test_lost_worker_autoscale(self):
# if a worker exits, it should be replaced automatically up to min_workers
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
@@ -244,8 +243,8 @@ class TestAutoScaling:
assert len(self.pool) == 2
assert not self.pool.should_grow
alive_pid = self.pool.workers[1].pid
self.pool.workers[0].process.kill()
self.pool.workers[0].process.join() # waits for process to full terminate
self.pool.workers[0].process.terminate()
time.sleep(2) # wait a moment for sigterm
# clean up and the dead worker
self.pool.cleanup()
@@ -337,8 +336,6 @@ class TestTaskPublisher:
yesterday = tz_now() - datetime.timedelta(days=1)
minute = tz_now() - datetime.timedelta(seconds=120)
now = tz_now()
@pytest.mark.django_db
@@ -347,8 +344,8 @@ class TestJobReaper(object):
'status, execution_node, controller_node, modified, fail',
[
('running', '', '', None, False), # running, not assigned to the instance
('running', 'awx', '', minute, True), # running, has the instance as its execution_node
('running', '', 'awx', minute, True), # running, has the instance as its controller_node
('running', 'awx', '', None, True), # running, has the instance as its execution_node
('running', '', 'awx', None, True), # running, has the instance as its controller_node
('waiting', '', '', None, False), # waiting, not assigned to the instance
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
@@ -370,7 +367,7 @@ class TestJobReaper(object):
# we have to edit the modification time _without_ calling save()
# (because .save() overwrites it to _now_)
Job.objects.filter(id=j.id).update(modified=modified)
reaper.reap(i, ref_time=now)
reaper.reap(i)
reaper.reap_waiting(i)
job = Job.objects.first()
if fail:
@@ -381,15 +378,13 @@ class TestJobReaper(object):
assert job.status == status
@pytest.mark.parametrize(
'excluded_uuids, fail, modified',
'excluded_uuids, fail',
[
(['abc123'], False, None),
([], False, None),
([], True, minute),
(['abc123'], False),
([], True),
],
)
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail, modified):
"""Modified Test to account for ref_time in reap()"""
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail):
i = Instance(hostname='awx')
i.save()
j = Job(
@@ -400,13 +395,10 @@ class TestJobReaper(object):
celery_task_id='abc123',
)
j.save()
if modified:
Job.objects.filter(id=j.id).update(modified=modified)
# if the UUID is excluded, don't reap it
reaper.reap(i, excluded_uuids=excluded_uuids, ref_time=now)
reaper.reap(i, excluded_uuids=excluded_uuids)
job = Job.objects.first()
if fail:
assert job.status == 'failed'
assert 'marked as failed' in job.job_explanation
@@ -419,6 +411,6 @@ class TestJobReaper(object):
i.save()
j = WorkflowJob(status='running', execution_node='awx')
j.save()
reaper.reap(i, ref_time=now)
reaper.reap(i)
assert WorkflowJob.objects.first().status == 'running'

View File

@@ -99,12 +99,12 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
list_response = get(reverse('api:instance_list'), user=system_auditor)
api_num_instances_auditor = list(list_response.data.items())[0][1]
ig_all.read_role.members.add(org_admin)
list_response2 = get(reverse('api:instance_list'), user=org_admin)
api_num_instances_oa = list(list_response2.data.items())[0][1]
assert api_num_instances_auditor == actual_num_instances
# Note: The org_admin will not see instances unless at least read_role to the IG has been assigned
# Note: The org_admin will not see the default 'tower' node
# (instance fixture) because it is not in its group, as expected
assert api_num_instances_oa == (actual_num_instances - 1)

View File

@@ -1,16 +0,0 @@
import pytest
from django.apps import apps
from awx.main.models import InstanceGroup
from awx.main.migrations import _OrgAdmin_to_use_ig as orgadmin
@pytest.mark.django_db
def test_migrate_admin_role(org_admin, organization):
instance_group = InstanceGroup.objects.create(name='test')
organization.admin_role.members.add(org_admin)
organization.instance_groups.add(instance_group)
orgadmin.migrate_org_admin_to_use(apps, None)
assert org_admin in instance_group.use_role.members.all()
assert instance_group.use_role.members.count() == 1

View File

@@ -6,47 +6,7 @@ from awx.main.access import (
InventoryAccess,
JobTemplateAccess,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"obj_perm,allowed,readonly,partial", [("admin_role", True, True, True), ("use_role", False, True, True), ("read_role", False, True, False)]
)
def test_ig_role_base_visibility(default_instance_group, rando, obj_perm, allowed, partial, readonly):
if obj_perm:
getattr(default_instance_group, obj_perm).members.add(rando)
assert readonly == InstanceGroupAccess(rando).can_read(default_instance_group)
assert partial == InstanceGroupAccess(rando).can_use(default_instance_group)
assert not InstanceGroupAccess(rando).can_add(default_instance_group)
assert allowed == InstanceGroupAccess(rando).can_admin(default_instance_group)
assert allowed == InstanceGroupAccess(rando).can_change(default_instance_group, {'name': 'New Name'})
@pytest.mark.django_db
@pytest.mark.parametrize(
"obj_perm,subobj_perm,allowed", [('admin_role', 'use_role', True), ('admin_role', 'read_role', False), ('admin_role', 'admin_role', True)]
)
def test_ig_role_based_associability(default_instance_group, rando, organization, job_template_factory, obj_perm, subobj_perm, allowed):
objects = job_template_factory('jt', organization=organization, project='p', inventory='i', credential='c')
if obj_perm:
getattr(objects.job_template, obj_perm).members.add(rando)
getattr(objects.inventory, obj_perm).members.add(rando)
getattr(objects.organization, obj_perm).members.add(rando)
if subobj_perm:
getattr(default_instance_group, subobj_perm).members.add(rando)
assert allowed == JobTemplateAccess(rando).can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert allowed == InventoryAccess(rando).can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert allowed == OrganizationAccess(rando).can_attach(objects.organization, default_instance_group, 'instance_groups', None)
@pytest.mark.django_db
def test_ig_use_with_org_admin(default_instance_group, rando, org_admin):
default_instance_group.use_role.members.add(rando)
assert list(InstanceGroupAccess(org_admin).get_queryset()) != [default_instance_group]
assert list(InstanceGroupAccess(rando).get_queryset()) == [default_instance_group]
from awx.main.models import Organization
@pytest.mark.django_db
@@ -64,7 +24,7 @@ def test_ig_admin_user_visibility(organization, default_instance_group, admin, s
assert len(InstanceGroupAccess(system_auditor).get_queryset()) == 1
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
organization.instance_groups.add(default_instance_group)
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 1
@pytest.mark.django_db
@@ -77,6 +37,16 @@ def test_ig_normal_user_associability(organization, default_instance_group, user
assert not access.can_attach(organization, default_instance_group, 'instance_groups', None)
@pytest.mark.django_db
def test_access_via_two_organizations(rando, default_instance_group):
for org_name in ['org1', 'org2']:
org = Organization.objects.create(name=org_name)
org.instance_groups.add(default_instance_group)
org.admin_role.members.add(rando)
access = InstanceGroupAccess(rando)
assert list(access.get_queryset()) == [default_instance_group]
@pytest.mark.django_db
def test_ig_associability(organization, default_instance_group, admin, system_auditor, org_admin, org_member, job_template_factory):
admin_access = OrganizationAccess(admin)
@@ -102,7 +72,7 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
omember_access = InventoryAccess(org_member)
assert admin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert oadmin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
assert not omember_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
@@ -112,6 +82,6 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
omember_access = JobTemplateAccess(org_member)
assert admin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not oadmin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert oadmin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not auditor_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
assert not omember_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)

View File

@@ -148,7 +148,7 @@ class TestWorkflowJobTemplateNodeAccess:
elif permission_type == 'instance_groups':
sub_obj = InstanceGroup.objects.create()
org = Organization.objects.create()
sub_obj.use_role.members.add(rando) # only admins can see IGs
org.admin_role.members.add(rando) # only admins can see IGs
org.instance_groups.add(sub_obj)
access = WorkflowJobTemplateNodeAccess(rando)

View File

@@ -18,7 +18,7 @@ class DistinctParametrize(object):
@pytest.mark.survey
class TestSurveyVariableValidation:
class SurveyVariableValidation:
def test_survey_answers_as_string(self, job_template_factory):
objects = job_template_factory('job-template-with-survey', survey=[{'variable': 'var1', 'type': 'text'}], persisted=False)
jt = objects.job_template
@@ -57,7 +57,7 @@ class TestSurveyVariableValidation:
accepted, rejected, errors = obj.accept_or_ignore_variables({"a": 5})
assert rejected == {"a": 5}
assert accepted == {}
assert str(errors['variables_needed_to_start'][0]) == "Value 5 for 'a' expected to be a string."
assert str(errors[0]) == "Value 5 for 'a' expected to be a string."
def test_job_template_survey_default_variable_validation(self, job_template_factory):
objects = job_template_factory(
@@ -88,7 +88,7 @@ class TestSurveyVariableValidation:
obj.survey_enabled = True
accepted, _, errors = obj.accept_or_ignore_variables({"a": 2})
assert accepted == {"a": 2.0}
assert accepted == {{"a": 2.0}}
assert not errors

View File

@@ -80,6 +80,7 @@ __all__ = [
'set_environ',
'IllegalArgumentError',
'get_custom_venv_choices',
'get_external_account',
'ScheduleTaskManager',
'ScheduleDependencyManager',
'ScheduleWorkflowManager',
@@ -1088,6 +1089,29 @@ def has_model_field_prefetched(model_obj, field_name):
return getattr(getattr(model_obj, field_name, None), 'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (
getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)
) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget

View File

@@ -25,47 +25,42 @@
connection: local
name: Update source tree if necessary
tasks:
- name: Delete project directory before update
ansible.builtin.shell: set -o pipefail && find . -delete -print | tail -2 # volume mounted, cannot delete folder itself
register: reg
changed_when: reg.stdout_lines | length > 1
- name: delete project directory before update
command: "find -delete" # volume mounted, cannot delete folder itself
args:
chdir: "{{ project_path }}"
tags:
- delete
- name: Update project using git
tags:
- update_git
block:
- name: Update project using git
ansible.builtin.git:
dest: "{{ project_path | quote }}"
repo: "{{ scm_url }}"
version: "{{ scm_branch | quote }}"
refspec: "{{ scm_refspec | default(omit) }}"
force: "{{ scm_clean }}"
track_submodules: "{{ scm_track_submodules | default(omit) }}"
accept_hostkey: "{{ scm_accept_hostkey | default(omit) }}"
- block:
- name: update project using git
git:
dest: "{{project_path|quote}}"
repo: "{{scm_url}}"
version: "{{scm_branch|quote}}"
refspec: "{{scm_refspec|default(omit)}}"
force: "{{scm_clean}}"
track_submodules: "{{scm_track_submodules|default(omit)}}"
accept_hostkey: "{{scm_accept_hostkey|default(omit)}}"
register: git_result
- name: Set the git repository version
ansible.builtin.set_fact:
set_fact:
scm_version: "{{ git_result['after'] }}"
when: "'after' in git_result"
- name: Update project using svn
tags:
- update_svn
block:
- name: Update project using svn
ansible.builtin.subversion:
dest: "{{ project_path | quote }}"
repo: "{{ scm_url | quote }}"
revision: "{{ scm_branch | quote }}"
force: "{{ scm_clean }}"
username: "{{ scm_username | default(omit) }}"
password: "{{ scm_password | default(omit) }}"
- update_git
- block:
- name: update project using svn
subversion:
dest: "{{project_path|quote}}"
repo: "{{scm_url|quote}}"
revision: "{{scm_branch|quote}}"
force: "{{scm_clean}}"
username: "{{scm_username|default(omit)}}"
password: "{{scm_password|default(omit)}}"
# must be in_place because folder pre-existing, because it is mounted
in_place: true
environment:
@@ -73,90 +68,85 @@
register: svn_result
- name: Set the svn repository version
ansible.builtin.set_fact:
set_fact:
scm_version: "{{ svn_result['after'] }}"
when: "'after' in svn_result"
- name: Parse subversion version string properly
ansible.builtin.set_fact:
scm_version: "{{ scm_version | regex_replace('^.*Revision: ([0-9]+).*$', '\\1') }}"
- name: Project update for Insights
- name: parse subversion version string properly
set_fact:
scm_version: "{{scm_version|regex_replace('^.*Revision: ([0-9]+).*$', '\\1')}}"
tags:
- update_insights
block:
- update_svn
- block:
- name: Ensure the project directory is present
ansible.builtin.file:
dest: "{{ project_path | quote }}"
file:
dest: "{{project_path|quote}}"
state: directory
mode: '0755'
- name: Fetch Insights Playbook(s)
insights:
insights_url: "{{ insights_url }}"
username: "{{ scm_username }}"
password: "{{ scm_password }}"
project_path: "{{ project_path }}"
awx_license_type: "{{ awx_license_type }}"
awx_version: "{{ awx_version }}"
insights_url: "{{insights_url}}"
username: "{{scm_username}}"
password: "{{scm_password}}"
project_path: "{{project_path}}"
awx_license_type: "{{awx_license_type}}"
awx_version: "{{awx_version}}"
register: results
- name: Save Insights Version
ansible.builtin.set_fact:
scm_version: "{{ results.version }}"
set_fact:
scm_version: "{{results.version}}"
when: results is defined
- name: Update project using archive
tags:
- update_archive
block:
- update_insights
- block:
- name: Ensure the project archive directory is present
ansible.builtin.file:
dest: "{{ project_path | quote }}/.archive"
file:
dest: "{{ project_path|quote }}/.archive"
state: directory
mode: '0755'
- name: Get archive from url
ansible.builtin.get_url:
url: "{{ scm_url | quote }}"
dest: "{{ project_path | quote }}/.archive/"
url_username: "{{ scm_username | default(omit) }}"
url_password: "{{ scm_password | default(omit) }}"
get_url:
url: "{{ scm_url|quote }}"
dest: "{{ project_path|quote }}/.archive/"
url_username: "{{ scm_username|default(omit) }}"
url_password: "{{ scm_password|default(omit) }}"
force_basic_auth: true
mode: '0755'
register: get_archive
- name: Unpack archive
project_archive:
src: "{{ get_archive.dest }}"
project_path: "{{ project_path | quote }}"
project_path: "{{ project_path|quote }}"
force: "{{ scm_clean }}"
when: get_archive.changed or scm_clean
register: unarchived
- name: Find previous archives
ansible.builtin.find:
paths: "{{ project_path | quote }}/.archive/"
find:
paths: "{{ project_path|quote }}/.archive/"
excludes:
- "{{ get_archive.dest | basename }}"
- "{{ get_archive.dest|basename }}"
when: unarchived.changed
register: previous_archive
- name: Remove previous archives
ansible.builtin.file:
file:
path: "{{ item.path }}"
state: absent
loop: "{{ previous_archive.files }}"
when: previous_archive.files | default([])
when: previous_archive.files|default([])
- name: Set scm_version to archive sha1 checksum
ansible.builtin.set_fact:
set_fact:
scm_version: "{{ get_archive.checksum_src }}"
tags:
- update_archive
- name: Repository Version
ansible.builtin.debug:
debug:
msg: "Repository Version {{ scm_version }}"
tags:
- update_git
@@ -193,59 +183,60 @@
additional_collections_env:
# These environment variables are used for installing collections, in addition to galaxy_task_env
# setting the collections paths silences warnings
ANSIBLE_COLLECTIONS_PATHS: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections"
ANSIBLE_COLLECTIONS_PATHS: "{{projects_root}}/.__awx_cache/{{local_path}}/stage/requirements_collections"
# Put the local tmp directory in same volume as collection destination
# otherwise, files cannot be moved accross volumes and will cause error
ANSIBLE_LOCAL_TEMP: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/tmp"
ANSIBLE_LOCAL_TEMP: "{{projects_root}}/.__awx_cache/{{local_path}}/stage/tmp"
tasks:
- name: Check content sync settings
when: not roles_enabled | bool and not collections_enabled | bool
tags:
- install_roles
- install_collections
block:
- name: Warn about disabled content sync
ansible.builtin.debug:
- debug:
msg: >
Collection and role syncing disabled. Check the AWX_ROLES_ENABLED and
AWX_COLLECTIONS_ENABLED settings and Galaxy credentials on the project's organization.
- name: End play due to disabled content sync
ansible.builtin.meta: end_play
- name: Fetch galaxy roles from requirements.(yml/yaml)
ansible.builtin.command: >
- meta: end_play
when: not roles_enabled|bool and not collections_enabled|bool
tags:
- install_roles
- install_collections
- name: fetch galaxy roles from requirements.(yml/yaml)
command: >
ansible-galaxy role install -r {{ item }}
--roles-path {{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_roles
--roles-path {{projects_root}}/.__awx_cache/{{local_path}}/stage/requirements_roles
{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
args:
chdir: "{{ project_path | quote }}"
chdir: "{{project_path|quote}}"
register: galaxy_result
with_fileglob:
- "{{ project_path | quote }}/roles/requirements.yaml"
- "{{ project_path | quote }}/roles/requirements.yml"
- "{{project_path|quote}}/roles/requirements.yaml"
- "{{project_path|quote}}/roles/requirements.yml"
changed_when: "'was installed successfully' in galaxy_result.stdout"
environment: "{{ galaxy_task_env }}"
when: roles_enabled | bool
when: roles_enabled|bool
tags:
- install_roles
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
ansible.builtin.command: >
- name: fetch galaxy collections from collections/requirements.(yml/yaml)
command: >
ansible-galaxy collection install -r {{ item }}
--collections-path {{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections
--collections-path {{projects_root}}/.__awx_cache/{{local_path}}/stage/requirements_collections
{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
args:
chdir: "{{ project_path | quote }}"
chdir: "{{project_path|quote}}"
register: galaxy_collection_result
with_fileglob:
- "{{ project_path | quote }}/collections/requirements.yaml"
- "{{ project_path | quote }}/collections/requirements.yml"
- "{{ project_path | quote }}/requirements.yaml"
- "{{ project_path | quote }}/requirements.yml"
- "{{project_path|quote}}/collections/requirements.yaml"
- "{{project_path|quote}}/collections/requirements.yml"
- "{{project_path|quote}}/requirements.yaml"
- "{{project_path|quote}}/requirements.yml"
changed_when: "'Installing ' in galaxy_collection_result.stdout"
environment: "{{ additional_collections_env | combine(galaxy_task_env) }}"
when:
- "ansible_version.full is version_compare('2.9', '>=')"
- collections_enabled | bool
- collections_enabled|bool
tags:
- install_collections

View File

@@ -11,13 +11,11 @@ from datetime import timedelta
if "pytest" in sys.modules:
IS_TESTING_MODE = True
from unittest import mock
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap
else:
IS_TESTING_MODE = False
import ldap
@@ -131,13 +129,6 @@ NAMED_URL_GRAPH = {}
# Note: This setting may be overridden by database settings.
SCHEDULE_MAX_JOBS = 10
# Bulk API related settings
# Maximum number of jobs that can be launched in 1 bulk job
BULK_JOB_MAX_LAUNCH = 100
# Maximum number of host that can be created in 1 bulk host create
BULK_HOST_MAX_CREATE = 100
SITE_ID = 1
# Make this unique, and don't share it with anybody.
@@ -938,11 +929,6 @@ AWX_RUNNER_OMIT_ENV_FILES = True
# Allow ansible-runner to save ansible output (may cause performance issues)
AWX_RUNNER_SUPPRESS_OUTPUT_FILE = True
# https://github.com/ansible/ansible-runner/pull/1191/files
# Interval in seconds between the last message and keep-alive messages that
# ansible-runner will send
AWX_RUNNER_KEEPALIVE_SECONDS = 0
# Delete completed work units in receptor
RECEPTOR_RELEASE_WORK = True

View File

@@ -169,45 +169,3 @@ def get_or_create_org_with_default_galaxy_cred(**kwargs):
else:
logger.debug("Could not find default Ansible Galaxy credential to add to org")
return org
def get_external_account(user):
account_type = None
# Previously this method also checked for active configuration which meant that if a user logged in from LDAP
# and then LDAP was no longer configured it would "convert" the user from an LDAP account_type to none.
# This did have one benefit that if a login type was removed intentionally the user could be given a username password.
# But it had a limitation that the user would have to have an active session (or an admin would have to go set a temp password).
# It also lead to the side affect that if LDAP was ever reconfigured the user would convert back to LDAP but still have a local password.
# That local password could then be used to bypass LDAP authentication.
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if user.social_auth.all():
account_type = "social"
if user.enterprise_auth.all():
account_type = "enterprise"
return account_type
def is_remote_auth_enabled():
from django.conf import settings
# Append LDAP, Radius, TACACS+ and SAML options
settings_that_turn_on_remote_auth = [
'AUTH_LDAP_SERVER_URI',
'SOCIAL_AUTH_SAML_ENABLED_IDPS',
'RADIUS_SERVER',
'TACACSPLUS_HOST',
]
# Also include any SOCAIL_AUTH_*KEY (except SAML)
for social_auth_key in dir(settings):
if social_auth_key.startswith('SOCIAL_AUTH_') and social_auth_key.endswith('_KEY') and 'SAML' not in social_auth_key:
settings_that_turn_on_remote_auth.append(social_auth_key)
return any(getattr(settings, s, None) for s in settings_that_turn_on_remote_auth)

View File

@@ -2,22 +2,9 @@ import pytest
from collections import Counter
from django.core.exceptions import FieldError
from django.utils.timezone import now
from django.test.utils import override_settings
from awx.main.models import Credential, CredentialType, Organization, Team, User
from awx.sso.common import (
get_orgs_by_ids,
reconcile_users_org_team_mappings,
create_org_and_teams,
get_or_create_org_with_default_galaxy_cred,
is_remote_auth_enabled,
get_external_account,
)
class MicroMockObject(object):
def all(self):
return True
from awx.sso.common import get_orgs_by_ids, reconcile_users_org_team_mappings, create_org_and_teams, get_or_create_org_with_default_galaxy_cred
@pytest.mark.django_db
@@ -291,87 +278,3 @@ class TestCommonFunctions:
for o in Organization.objects.all():
assert o.galaxy_credentials.count() == 0
@pytest.mark.parametrize(
"enable_ldap, enable_social, enable_enterprise, expected_results",
[
(False, False, False, None),
(True, False, False, 'ldap'),
(True, True, False, 'social'),
(True, True, True, 'enterprise'),
(False, True, True, 'enterprise'),
(False, False, True, 'enterprise'),
(False, True, False, 'social'),
],
)
def test_get_external_account(self, enable_ldap, enable_social, enable_enterprise, expected_results):
try:
user = User.objects.get(username="external_tester")
except User.DoesNotExist:
user = User(username="external_tester")
user.set_unusable_password()
user.save()
if enable_ldap:
user.profile.ldap_dn = 'test.dn'
if enable_social:
from social_django.models import UserSocialAuth
social_auth, _ = UserSocialAuth.objects.get_or_create(
uid='667ec049-cdf3-45d0-a4dc-0465f7505954',
provider='oidc',
extra_data={},
user_id=user.id,
)
user.social_auth.set([social_auth])
if enable_enterprise:
from awx.sso.models import UserEnterpriseAuth
enterprise_auth = UserEnterpriseAuth(user=user, provider='tacacs+')
enterprise_auth.save()
assert get_external_account(user) == expected_results
@pytest.mark.parametrize(
"setting, expected",
[
# Set none of the social auth settings
('JUNK_SETTING', False),
# Set the hard coded settings
('AUTH_LDAP_SERVER_URI', True),
('SOCIAL_AUTH_SAML_ENABLED_IDPS', True),
('RADIUS_SERVER', True),
('TACACSPLUS_HOST', True),
# Set some SOCIAL_SOCIAL_AUTH_OIDC_KEYAUTH_*_KEY settings
('SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', True),
('SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY', True),
('SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY', True),
('SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY', True),
('SOCIAL_AUTH_GITHUB_KEY', True),
('SOCIAL_AUTH_GITHUB_ORG_KEY', True),
('SOCIAL_AUTH_GITHUB_TEAM_KEY', True),
('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', True),
('SOCIAL_AUTH_OIDC_KEY', True),
# Try a hypothetical future one
('SOCIAL_AUTH_GIBBERISH_KEY', True),
# Do a SAML one
('SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', False),
],
)
def test_is_remote_auth_enabled(self, setting, expected):
with override_settings(**{setting: True}):
assert is_remote_auth_enabled() == expected
@pytest.mark.parametrize(
"key_one, key_one_value, key_two, key_two_value, expected",
[
('JUNK_SETTING', True, 'JUNK2_SETTING', True, False),
('AUTH_LDAP_SERVER_URI', True, 'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', True, True),
('JUNK_SETTING', True, 'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', True, True),
('AUTH_LDAP_SERVER_URI', False, 'SOCIAL_AUTH_AZUREAD_OAUTH2_KEY', False, False),
],
)
def test_is_remote_auth_enabled_multiple_keys(self, key_one, key_one_value, key_two, key_two_value, expected):
with override_settings(**{key_one: key_one_value}):
with override_settings(**{key_two: key_two_value}):
assert is_remote_auth_enabled() == expected

View File

@@ -28,7 +28,7 @@ import { getLanguageWithoutRegionCode } from 'util/language';
import Metrics from 'screens/Metrics';
import SubscriptionEdit from 'screens/Setting/Subscription/SubscriptionEdit';
import useTitle from 'hooks/useTitle';
import { dynamicActivate } from './i18nLoader';
import { dynamicActivate, locales } from './i18nLoader';
import getRouteConfig from './routeConfig';
import { SESSION_REDIRECT_URL } from './constants';
@@ -139,15 +139,16 @@ export function ProtectedRoute({ children, ...rest }) {
function App() {
const history = useHistory();
const { hash, search, pathname } = useLocation();
const searchParams = Object.fromEntries(new URLSearchParams(search));
const pseudolocalization =
searchParams.pseudolocalization === 'true' || false;
const language =
searchParams.lang || getLanguageWithoutRegionCode(navigator) || 'en';
let language = getLanguageWithoutRegionCode(navigator);
if (!Object.keys(locales).includes(language)) {
// If there isn't a string catalog available for the browser's
// preferred language, default to one that has strings.
language = 'en';
}
useEffect(() => {
dynamicActivate(language, pseudolocalization);
}, [language, pseudolocalization]);
dynamicActivate(language);
}, [language]);
useTitle();

View File

@@ -8,7 +8,6 @@ class InstanceGroups extends Base {
this.associateInstance = this.associateInstance.bind(this);
this.disassociateInstance = this.disassociateInstance.bind(this);
this.readInstanceOptions = this.readInstanceOptions.bind(this);
this.readInstanceGroupOptions = this.readInstanceGroupOptions.bind(this);
this.readInstances = this.readInstances.bind(this);
this.readJobs = this.readJobs.bind(this);
}
@@ -34,10 +33,6 @@ class InstanceGroups extends Base {
return this.http.options(`${this.baseUrl}${id}/instances/`);
}
readInstanceGroupOptions(id) {
return this.http.options(`${this.baseUrl}${id}/`);
}
readJobs(id) {
return this.http.get(`${this.baseUrl}${id}/jobs/`);
}

View File

@@ -6,12 +6,7 @@ import { useField } from 'formik';
import styled from 'styled-components';
import { Split, SplitItem, Button, Modal } from '@patternfly/react-core';
import { ExpandArrowsAltIcon } from '@patternfly/react-icons';
import {
yamlToJson,
jsonToYaml,
isJsonString,
parseVariableField,
} from 'util/yaml';
import { yamlToJson, jsonToYaml, isJsonString } from 'util/yaml';
import { CheckboxField } from '../FormField';
import MultiButtonToggle from '../MultiButtonToggle';
import CodeEditor from './CodeEditor';
@@ -42,24 +37,36 @@ function VariablesField({
// track focus manually, because the Code Editor library doesn't wire
// into Formik completely
const [shouldValidate, setShouldValidate] = useState(false);
const [mode, setMode] = useState(initialMode || YAML_MODE);
const validate = useCallback(
(value) => {
if (!shouldValidate) {
return undefined;
}
try {
parseVariableField(value);
if (mode === YAML_MODE) {
yamlToJson(value);
} else {
JSON.parse(value);
}
} catch (error) {
return error.message;
}
return undefined;
},
[shouldValidate]
[shouldValidate, mode]
);
const [field, meta, helpers] = useField({ name, validate });
const [mode, setMode] = useState(() =>
isJsonString(field.value) ? JSON_MODE : initialMode || YAML_MODE
);
useEffect(() => {
if (isJsonString(field.value)) {
// mode's useState above couldn't be initialized to JSON_MODE because
// the field value had to be defined below it
setMode(JSON_MODE);
onModeChange(JSON_MODE);
helpers.setValue(JSON.stringify(JSON.parse(field.value), null, 2));
}
}, []); // eslint-disable-line react-hooks/exhaustive-deps
useEffect(
() => {

View File

@@ -6,7 +6,6 @@ import {
InventoriesAPI,
ProjectsAPI,
OrganizationsAPI,
InstanceGroupsAPI,
} from 'api';
export default function getResourceAccessConfig() {
@@ -211,32 +210,5 @@ export default function getResourceAccessConfig() {
fetchItems: (queryParams) => OrganizationsAPI.read(queryParams),
fetchOptions: () => OrganizationsAPI.readOptions(),
},
{
selectedResource: 'Instance Groups',
label: t`Instance Groups`,
searchColumns: [
{
name: t`Name`,
key: 'name__icontains',
isDefault: true,
},
{
name: t`Created By (Username)`,
key: 'created_by__username__icontains',
},
{
name: t`Modified By (Username)`,
key: 'modified_by__username__icontains',
},
],
sortColumns: [
{
name: t`Name`,
key: 'name',
},
],
fetchItems: (queryParams) => InstanceGroupsAPI.read(queryParams),
fetchOptions: () => InstanceGroupsAPI.readOptions(),
},
];
}

View File

@@ -1,3 +1,4 @@
/* eslint-disable-next-line import/prefer-default-export */
export const JOB_TYPE_URL_SEGMENTS = {
job: 'playbook',
project_update: 'project',

View File

@@ -27,21 +27,8 @@ i18n.loadLocaleData({
* We do a dynamic import of just the catalog that we need
* @param locale any locale string
*/
export async function dynamicActivate(locale, pseudolocalization = false) {
export async function dynamicActivate(locale) {
const { messages } = await import(`./locales/${locale}/messages`);
if (pseudolocalization) {
Object.keys(messages).forEach((key) => {
if (Array.isArray(messages[key])) {
// t`Foo ${param}` -> ["Foo ", ['param']] => [">>", "Foo ", ['param'], "<<"]
messages[key] = ['»', ...messages[key], '«'];
} else {
// simple string
messages[key] = `»${messages[key]}«`;
}
});
}
i18n.load(locale, messages);
i18n.activate(locale);
}

View File

@@ -184,6 +184,7 @@ function getRouteConfig(userProfile = {}) {
deleteRouteGroup('settings');
deleteRoute('management_jobs');
if (userProfile?.isOrgAdmin) return routeConfig;
deleteRoute('instance_groups');
deleteRoute('topology_view');
deleteRoute('instances');
if (!userProfile?.isNotificationAdmin) deleteRoute('notification_templates');

View File

@@ -127,7 +127,6 @@ describe('getRouteConfig', () => {
'/teams',
'/credential_types',
'/notification_templates',
'/instance_groups',
'/applications',
'/execution_environments',
]);
@@ -151,7 +150,6 @@ describe('getRouteConfig', () => {
'/users',
'/teams',
'/credential_types',
'/instance_groups',
'/applications',
'/execution_environments',
]);
@@ -175,7 +173,6 @@ describe('getRouteConfig', () => {
'/users',
'/teams',
'/credential_types',
'/instance_groups',
'/applications',
'/execution_environments',
]);
@@ -204,7 +201,6 @@ describe('getRouteConfig', () => {
'/teams',
'/credential_types',
'/notification_templates',
'/instance_groups',
'/applications',
'/execution_environments',
]);

View File

@@ -22,19 +22,14 @@ import { CredentialsAPI } from 'api';
import CredentialDetail from './CredentialDetail';
import CredentialEdit from './CredentialEdit';
const unacceptableCredentialTypes = [
'centrify_vault_kv',
'aim',
'conjur',
'hashivault_kv',
'hashivault_ssh',
'azure_kv',
'thycotic_dsv',
'thycotic_tss',
'galaxy_api_token',
'insights',
'registry',
'scm',
const jobTemplateCredentialTypes = [
'machine',
'cloud',
'net',
'ssh',
'vault',
'kubernetes',
'cryptography',
];
function Credential({ setBreadcrumb }) {
@@ -91,10 +86,7 @@ function Credential({ setBreadcrumb }) {
id: 1,
},
];
if (
!unacceptableCredentialTypes.includes(credential?.kind) &&
credential !== null
) {
if (jobTemplateCredentialTypes.includes(credential?.kind)) {
tabsArray.push({
name: t`Job Templates`,
link: `/credentials/${id}/job_templates`,
@@ -123,14 +115,12 @@ function Credential({ setBreadcrumb }) {
</PageSection>
);
}
if (hasContentLoading) {
return <ContentLoading />;
}
return (
<PageSection>
<Card>
{showCardHeader && <RoutedTabs tabsArray={tabsArray} />}
{hasContentLoading && <ContentLoading />}
{!hasContentLoading && credential && (
<Switch>
<Redirect

View File

@@ -8,7 +8,6 @@ import {
} from '../../../testUtils/enzymeHelpers';
import mockMachineCredential from './shared/data.machineCredential.json';
import mockSCMCredential from './shared/data.scmCredential.json';
import mockCyberArkCredential from './shared/data.cyberArkCredential.json';
import Credential from './Credential';
jest.mock('../../api');
@@ -22,11 +21,6 @@ jest.mock('react-router-dom', () => ({
describe('<Credential />', () => {
let wrapper;
afterEach(() => {
jest.clearAllMocks();
wrapper.unmount();
});
test('initially renders user-based machine credential successfully', async () => {
CredentialsAPI.readDetail.mockResolvedValueOnce({
@@ -67,19 +61,6 @@ describe('<Credential />', () => {
});
});
test('should not render job template tab', async () => {
CredentialsAPI.readDetail.mockResolvedValueOnce({
data: { ...mockCyberArkCredential, kind: 'registry' },
});
const expectedTabs = ['Back to Credentials', 'Details', 'Access'];
await act(async () => {
wrapper = mountWithContexts(<Credential setBreadcrumb={() => {}} />);
});
wrapper.find('RoutedTabs li').forEach((tab, index) => {
expect(tab.text()).toEqual(expectedTabs[index]);
});
});
test('should show content error when user attempts to navigate to erroneous route', async () => {
const history = createMemoryHistory({
initialEntries: ['/credentials/2/foobar'],
@@ -104,4 +85,3 @@ describe('<Credential />', () => {
await waitForElement(wrapper, 'ContentError', (el) => el.length === 1);
});
});
describe('<Credential> should not show job template tab', () => {});

View File

@@ -21,11 +21,9 @@ function ContainerGroupEdit({ instanceGroup }) {
result: initialPodSpec,
} = useRequest(
useCallback(async () => {
const { data } = await InstanceGroupsAPI.readInstanceGroupOptions(
instanceGroup.id
);
return data.actions.PUT.pod_spec_override.default;
}, [instanceGroup.id]),
const { data } = await InstanceGroupsAPI.readOptions();
return data.actions.POST.pod_spec_override.default;
}, []),
{
initialPodSpec: {},
}

View File

@@ -48,7 +48,6 @@ function InventorySourceDetail({ inventorySource }) {
source,
source_path,
source_vars,
scm_branch,
update_cache_timeout,
update_on_launch,
verbosity,
@@ -234,11 +233,6 @@ function InventorySourceDetail({ inventorySource }) {
helpText={helpText.subFormVerbosityFields}
value={VERBOSITY()[verbosity]}
/>
<Detail
label={t`Source Control Branch`}
helpText={helpText.sourceControlBranch}
value={scm_branch}
/>
<Detail
label={t`Cache timeout`}
value={`${update_cache_timeout} ${t`seconds`}`}

View File

@@ -152,7 +152,6 @@ const getInventoryHelpTextStrings = () => ({
},
enabledVariableField: t`Retrieve the enabled state from the given dict of host variables.
The enabled variable may be specified using dot notation, e.g: 'foo.bar'`,
sourceControlBranch: t`Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true.`,
enabledValue: t`This field is ignored unless an Enabled Variable is set. If the enabled variable matches this value, the host will be enabled on import.`,
hostFilter: t`Regular expression where only matching host names will be imported. The filter is applied as a post-processing step after any inventory plugin filters are applied.`,
sourceVars: (docsBaseUrl, source) => {

View File

@@ -71,7 +71,6 @@ const InventorySourceFormFields = ({
source_project: null,
source_script: null,
source_vars: '---\n',
scm_branch: null,
update_cache_timeout: 0,
update_on_launch: false,
verbosity: 1,
@@ -249,7 +248,6 @@ const InventorySourceForm = ({
source_project: source?.summary_fields?.source_project || null,
source_script: source?.summary_fields?.source_script || null,
source_vars: source?.source_vars || '---\n',
scm_branch: source?.scm_branch || '',
update_cache_timeout: source?.update_cache_timeout || 0,
update_on_launch: source?.update_on_launch || false,
verbosity: source?.verbosity || 1,

View File

@@ -13,7 +13,6 @@ import { required } from 'util/validators';
import CredentialLookup from 'components/Lookup/CredentialLookup';
import ProjectLookup from 'components/Lookup/ProjectLookup';
import Popover from 'components/Popover';
import FormField from 'components/FormField';
import {
OptionsField,
SourceVarsField,
@@ -37,6 +36,7 @@ const SCMSubForm = ({ autoPopulateProject }) => {
name: 'source_path',
validate: required(t`Select a value for this field`),
});
const { error: sourcePathError, request: fetchSourcePath } = useRequest(
useCallback(async (projectId) => {
const { data } = await ProjectsAPI.readInventories(projectId);
@@ -44,6 +44,7 @@ const SCMSubForm = ({ autoPopulateProject }) => {
}, []),
[]
);
useEffect(() => {
if (projectMeta.initialValue) {
fetchSourcePath(projectMeta.initialValue.id);
@@ -57,7 +58,6 @@ const SCMSubForm = ({ autoPopulateProject }) => {
(value) => {
setFieldValue('source_project', value);
setFieldTouched('source_project', true, false);
setFieldValue('scm_branch', '', false);
if (sourcePathField.value) {
setFieldValue('source_path', '');
setFieldTouched('source_path', false);
@@ -68,6 +68,7 @@ const SCMSubForm = ({ autoPopulateProject }) => {
},
[fetchSourcePath, setFieldValue, setFieldTouched, sourcePathField.value]
);
const handleCredentialUpdate = useCallback(
(value) => {
setFieldValue('credential', value);
@@ -75,17 +76,9 @@ const SCMSubForm = ({ autoPopulateProject }) => {
},
[setFieldValue, setFieldTouched]
);
return (
<>
{projectField.value?.allow_override && (
<FormField
id="project-scm-branch"
name="scm_branch"
type="text"
label={t`Source Control Branch/Tag/Commit`}
tooltip={helpText.sourceControlBranch}
/>
)}
<CredentialLookup
credentialTypeKind="cloud"
label={t`Credential`}

View File

@@ -208,7 +208,6 @@ function AWXLogin({ alt, isAuthenticated }) {
>
{(formik) => (
<LoginForm
autoComplete="off"
data-cy="login-form"
className={authError ? 'pf-m-error' : ''}
helperText={helperText}

View File

@@ -115,14 +115,6 @@ describe('<Login />', () => {
);
});
test.only('form has autocomplete off', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(<AWXLogin isAuthenticated={() => false} />);
});
expect(wrapper.find('form[autoComplete="off"]').length).toBe(1);
});
test('custom logo renders Brand component with correct src and alt', async () => {
let wrapper;
await act(async () => {

View File

@@ -150,11 +150,6 @@ function JobsEdit() {
type={options?.SCHEDULE_MAX_JOBS ? 'number' : undefined}
isRequired={Boolean(options?.SCHEDULE_MAX_JOBS)}
/>
<InputField
name="AWX_RUNNER_KEEPALIVE_SECONDS"
config={jobs.AWX_RUNNER_KEEPALIVE_SECONDS}
type="number"
/>
<InputField
name="DEFAULT_JOB_TIMEOUT"
config={jobs.DEFAULT_JOB_TIMEOUT}

View File

@@ -344,16 +344,6 @@
"category_slug": "jobs",
"default": 10
},
"AWX_RUNNER_KEEPALIVE_SECONDS": {
"type": "integer",
"required": true,
"label": "K8S Ansible Runner Keep-Alive Message Interval",
"help_text": "Only applies to K8S deployments and container_group jobs. If not 0, send a message every so-many seconds to keep connection open.",
"category": "Jobs",
"category_slug": "jobs",
"placeholder": 240,
"default": 0
},
"AWX_ANSIBLE_CALLBACK_PLUGINS": {
"type": "list",
"required": false,
@@ -4108,15 +4098,6 @@
"category_slug": "jobs",
"defined_in_file": false
},
"AWX_RUNNER_KEEPALIVE_SECONDS": {
"type": "integer",
"label": "K8S Ansible Runner Keep-Alive Message Interval",
"help_text": "Only applies to K8S deployments and container_group jobs. If not 0, send a message every so-many seconds to keep connection open.",
"category": "Jobs",
"category_slug": "jobs",
"placeholder": 240,
"default": 0
},
"AWX_ANSIBLE_CALLBACK_PLUGINS": {
"type": "list",
"label": "Ansible Callback Plugins",

View File

@@ -51,7 +51,6 @@
"STDOUT_MAX_BYTES_DISPLAY":1048576,
"EVENT_STDOUT_MAX_BYTES_DISPLAY":1024,
"SCHEDULE_MAX_JOBS":10,
"AWX_RUNNER_KEEPALIVE_SECONDS": 0,
"AWX_ANSIBLE_CALLBACK_PLUGINS":[],
"DEFAULT_JOB_TIMEOUT":0,
"DEFAULT_JOB_IDLE_TIMEOUT":0,

View File

@@ -19,7 +19,6 @@
"STDOUT_MAX_BYTES_DISPLAY": 1048576,
"EVENT_STDOUT_MAX_BYTES_DISPLAY": 1024,
"SCHEDULE_MAX_JOBS": 10,
"AWX_RUNNER_KEEPALIVE_SECONDS": 0,
"AWX_ANSIBLE_CALLBACK_PLUGINS": [],
"DEFAULT_JOB_TIMEOUT": 0,
"DEFAULT_JOB_IDLE_TIMEOUT": 0,

View File

@@ -6,8 +6,6 @@ action_groups:
- ad_hoc_command_cancel
- ad_hoc_command_wait
- application
- bulk_job_launch
- bulk_host_create
- controller_meta
- credential_input_source
- credential

View File

@@ -196,7 +196,7 @@ class LookupModule(LookupBase):
if isinstance(rule[field_name], int):
rule[field_name] = [rule[field_name]]
# If its not a list, we need to split it into a list
if not isinstance(rule[field_name], list):
if isinstance(rule[field_name], list):
rule[field_name] = rule[field_name].split(',')
for value in rule[field_name]:
# If they have a list of strs we want to strip the str incase its space delineated
@@ -210,8 +210,7 @@ class LookupModule(LookupBase):
def process_list(self, field_name, rule, valid_list, rule_number):
return_values = []
# If its not a list, we need to split it into a list
if not isinstance(rule[field_name], list):
if isinstance(rule[field_name], list):
rule[field_name] = rule[field_name].split(',')
for value in rule[field_name]:
value = value.strip()

View File

@@ -1,102 +0,0 @@
#!/usr/bin/python
# coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bulk_host_create
author: "Seth Foster (@fosterseth)"
short_description: Bulk host create in Automation Platform Controller
description:
- Single-request bulk host creation in Automation Platform Controller.
- Provides a way to add many hosts at once to an inventory in Controller.
options:
hosts:
description:
- List of hosts to add to inventory.
required: True
type: list
elements: dict
suboptions:
name:
description:
- The name to use for the host.
type: str
required: True
description:
description:
- The description to use for the host.
type: str
enabled:
description:
- If the host should be enabled.
type: bool
variables:
description:
- Variables to use for the host.
type: dict
instance_id:
description:
- instance_id to use for the host.
type: str
inventory:
description:
- Inventory name or ID the hosts should be made a member of.
required: True
type: str
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: Bulk host create
bulk_host_create:
inventory: 1
hosts:
- name: foobar.org
- name: 127.0.0.1
'''
from ..module_utils.controller_api import ControllerAPIModule
import json
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
hosts=dict(required=True, type='list', elements='dict'),
inventory=dict(required=True, type='str'),
)
# Create a module for ourselves
module = ControllerAPIModule(argument_spec=argument_spec)
# Extract our parameters
inv_name = module.params.get('inventory')
hosts = module.params.get('hosts')
for h in hosts:
if 'variables' in h:
h['variables'] = json.dumps(h['variables'])
inv_id = module.resolve_name_to_id('inventories', inv_name)
# Launch the jobs
result = module.post_endpoint("bulk/host_create", data={"inventory": inv_id, "hosts": hosts})
if result['status_code'] != 201:
module.fail_json(msg="Failed to create hosts, see response for details", response=result)
module.json_output['changed'] = True
module.exit_json(**module.json_output)
if __name__ == '__main__':
main()

View File

@@ -1,281 +0,0 @@
#!/usr/bin/python
# coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bulk_job_launch
author: "Seth Foster (@fosterseth)"
short_description: Bulk job launch in Automation Platform Controller
description:
- Single-request bulk job launch in Automation Platform Controller.
- Creates a workflow where each node corresponds to an item specified in the jobs option.
- Any options specified at the top level will inherited by the launched jobs (if prompt on launch is enabled for those fields).
- Provides a way to submit many jobs at once to Controller.
options:
jobs:
description:
- List of jobs to create.
required: True
type: list
elements: dict
suboptions:
unified_job_template:
description:
- Job template ID to use when launching.
type: int
required: True
inventory:
description:
- Inventory ID applied as a prompt, if job template prompts for inventory
type: int
execution_environment:
description:
- Execution environment ID applied as a prompt, if job template prompts for execution environments
type: int
instance_groups:
description:
- Instance group IDs applied as a prompt, if job template prompts for instance groups
type: list
elements: int
credentials:
description:
- Credential IDs applied as a prompt, if job template prompts for credentials
type: list
elements: int
labels:
description:
- Label IDs to use for the job, if job template prompts for labels
type: list
elements: int
extra_data:
description:
- Extra variables to apply at launch time, if job template prompts for extra variables
type: dict
default: {}
diff_mode:
description:
- Show the changes made by Ansible tasks where supported
type: bool
verbosity:
description:
- Verbosity level for this ad hoc command run
type: int
choices: [ 0, 1, 2, 3, 4, 5 ]
scm_branch:
description:
- SCM branch applied as a prompt, if job template prompts for SCM branch
- This is only applicable if the project allows for branch override
type: str
job_type:
description:
- Job type applied as a prompt, if job template prompts for job type
type: str
choices:
- 'run'
- 'check'
job_tags:
description:
- Job tags applied as a prompt, if job template prompts for job tags
type: str
skip_tags:
description:
- Tags to skip, applied as a prompt, if job template prompts for job tags
type: str
limit:
description:
- Limit to act on, applied as a prompt, if job template prompts for limit
type: str
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook, if job template prompts for forks
type: int
job_slice_count:
description:
- The number of jobs to slice into at runtime, if job template prompts for job slices.
- Will cause the Job Template to launch a workflow if value is greater than 1.
type: int
default: '1'
identifier:
description:
- Identifier for the resulting workflow node that represents this job
type: str
timeout:
description:
- Maximum time in seconds to wait for a job to finish (server-side), if job template prompts for timeout.
type: int
name:
description:
- The name of the bulk job that is created
required: False
type: str
description:
description:
- Optional description of this bulk job.
type: str
organization:
description:
- If not provided, will use the organization the user is in.
- Required if the user belongs to more than one organization.
- Affects who can see the resulting bulk job.
type: str
inventory:
description:
- Inventory name or ID to use for the jobs ran within the bulk job, only used if prompt for inventory is set.
type: str
scm_branch:
description:
- A specific branch of the SCM project to run the template on.
- This is only applicable if the project allows for branch override.
type: str
extra_vars:
description:
- Any extra vars required to launch the job.
- Extends the extra_data field at the individual job level.
type: dict
limit:
description:
- Limit to use for the bulk job.
type: str
job_tags:
description:
- A comma-separated list of playbook tags to specify what parts of the playbooks should be executed.
type: str
skip_tags:
description:
- A comma-separated list of playbook tags to skip certain tasks or parts of the playbooks to be executed.
type: str
wait:
description:
- Wait for the bulk job to complete.
default: True
type: bool
interval:
description:
- The interval to request an update from the controller.
required: False
default: 2
type: float
extends_documentation_fragment: awx.awx.auth
'''
RETURN = '''
job_info:
description: dictionary containing information about the bulk job executed
returned: If bulk job launched
type: dict
'''
EXAMPLES = '''
- name: Launch bulk jobs
bulk_job_launch:
name: My Bulk Job Launch
jobs:
- unified_job_template: 7
skip_tags: foo
- unified_job_template: 10
limit: foo
extra_data:
food: carrot
color: orange
limit: bar
extra_vars: # these override / extend extra_data at the job level
food: grape
animal: owl
organization: Default
inventory: Demo Inventory
- name: Launch bulk jobs with lookup plugin
bulk_job_launch:
name: My Bulk Job Launch
jobs:
- unified_job_template: 7
- unified_job_template: "{{ lookup('awx.awx.controller_api', 'job_templates', query_params={'name': 'Demo Job Template'},
return_ids=True, expect_one=True) }}"
'''
from ..module_utils.controller_api import ControllerAPIModule
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
jobs=dict(required=True, type='list', elements='dict'),
name=dict(),
description=dict(),
organization=dict(type='str'),
inventory=dict(type='str'),
limit=dict(),
scm_branch=dict(),
extra_vars=dict(type='dict'),
job_tags=dict(),
skip_tags=dict(),
wait=dict(required=False, default=True, type='bool'),
interval=dict(required=False, default=2.0, type='float'),
)
# Create a module for ourselves
module = ControllerAPIModule(argument_spec=argument_spec)
post_data_names = (
'jobs',
'name',
'description',
'limit',
'scm_branch',
'extra_vars',
'job_tags',
'skip_tags',
)
post_data = {}
for p in post_data_names:
val = module.params.get(p)
if val:
post_data[p] = val
# Resolve name to ID for related resources
# Do not resolve name for "jobs" suboptions, for optimization
org_name = module.params.get('organization')
if org_name:
post_data['organization'] = module.resolve_name_to_id('organizations', org_name)
inv_name = module.params.get('inventory')
if inv_name:
post_data['inventory'] = module.resolve_name_to_id('inventories', inv_name)
# Extract our parameters
wait = module.params.get('wait')
timeout = module.params.get('timeout')
interval = module.params.get('interval')
name = module.params.get('name')
# Launch the jobs
result = module.post_endpoint("bulk/job_launch", data=post_data)
if result['status_code'] != 201:
module.fail_json(msg="Failed to launch bulk jobs, see response for details", response=result)
module.json_output['changed'] = True
module.json_output['id'] = result['json']['id']
module.json_output['status'] = result['json']['status']
# This is for backwards compatability
module.json_output['job_info'] = result['json']
if not wait:
module.exit_json(**module.json_output)
# Invoke wait function
module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Bulk Job Launch', timeout=timeout, interval=interval)
module.exit_json(**module.json_output)
if __name__ == '__main__':
main()

View File

@@ -105,11 +105,6 @@ options:
description:
- Project to use as source with scm option
type: str
scm_branch:
description:
- Inventory source SCM branch.
- Project must have branch override enabled.
type: str
state:
description:
- Desired state of the resource.
@@ -183,7 +178,6 @@ def main():
update_on_launch=dict(type='bool'),
update_cache_timeout=dict(type='int'),
source_project=dict(),
scm_branch=dict(type='str'),
notification_templates_started=dict(type="list", elements='str'),
notification_templates_success=dict(type="list", elements='str'),
notification_templates_error=dict(type="list", elements='str'),
@@ -278,7 +272,6 @@ def main():
'enabled_var',
'enabled_value',
'host_filter',
'scm_branch',
)
# Layer in all remaining optional information

View File

@@ -1,43 +0,0 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from awx.main.models import WorkflowJob
@pytest.mark.django_db
def test_bulk_job_launch(run_module, admin_user, job_template):
jobs = [dict(unified_job_template=job_template.id)]
run_module(
'bulk_job_launch',
{
'name': "foo-bulk-job",
'jobs': jobs,
'extra_vars': {'animal': 'owl'},
'limit': 'foo',
'wait': False,
},
admin_user,
)
bulk_job = WorkflowJob.objects.get(name="foo-bulk-job")
assert bulk_job.extra_vars == '{"animal": "owl"}'
assert bulk_job.limit == "foo"
@pytest.mark.django_db
def test_bulk_host_create(run_module, admin_user, inventory):
hosts = [dict(name="127.0.0.1"), dict(name="foo.dns.org")]
run_module(
'bulk_host_create',
{
'inventory': inventory.name,
'hosts': hosts,
},
admin_user,
)
resp_hosts = inventory.hosts.all().values_list('name', flat=True)
for h in hosts:
assert h['name'] in resp_hosts

View File

@@ -44,12 +44,6 @@ no_endpoint_for_module = [
'subscriptions', # Subscription deals with config/subscriptions
]
# Add modules with endpoints that are not at /api/v2
extra_endpoints = {
'bulk_job_launch': '/api/v2/bulk/job_launch/',
'bulk_host_create': '/api/v2/bulk/host_create/',
}
# Global module parameters we can ignore
ignore_parameters = ['state', 'new_name', 'update_secrets', 'copy_from']
@@ -79,8 +73,6 @@ no_api_parameter_ok = {
'user': ['new_username', 'organization'],
# workflow_approval parameters that do not apply when approving an approval node.
'workflow_approval': ['action', 'interval', 'timeout', 'workflow_job_id'],
# bulk
'bulk_job_launch': ['interval', 'wait'],
}
# When this tool was created we were not feature complete. Adding something in here indicates a module
@@ -236,10 +228,6 @@ def test_completeness(collection_import, request, admin_user, job_template, exec
user=admin_user,
expect=None,
)
for key, val in extra_endpoints.items():
endpoint_response.data[key] = val
for endpoint in endpoint_response.data.keys():
# Module names are singular and endpoints are plural so we need to convert to singular
singular_endpoint = '{0}'.format(endpoint)

View File

@@ -112,7 +112,6 @@ def test_falsy_value(run_module, admin_user, base_inventory):
# credential ? ? o o r r r r r r r o
# source_project ? ? r - - - - - - - - -
# source_path ? ? r - - - - - - - - -
# scm_branch ? ? r - - - - - - - - -
# verbosity ? ? o o o o o o o o o o
# overwrite ? ? o o o o o o o o o o
# overwrite_vars ? ? o o o o o o o o o o

View File

@@ -1,51 +0,0 @@
---
- name: Generate a random string for test
set_fact:
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
when: test_id is not defined
- name: Generate a unique name
set_fact:
bulk_inv_name: "AWX-Collection-tests-bulk_host_create-{{ test_id }}"
- name: Get our collection package
controller_meta:
register: controller_meta
- name: Generate the name of our plugin
set_fact:
plugin_name: "{{ controller_meta.prefix }}.controller_api"
- name: Create an inventory
inventory:
name: "{{ bulk_inv_name }}"
organization: Default
state: present
register: inventory_result
- name: Bulk Host Create
bulk_host_create:
hosts:
- name: "123.456.789.123"
description: "myhost1"
variables:
food: carrot
color: orange
- name: example.dns.gg
description: "myhost2"
enabled: false
inventory: "{{ bulk_inv_name }}"
register: result
- assert:
that:
- result is not failed
# cleanup
- name: Delete inventory
inventory:
name: "{{ bulk_inv_name }}"
organization: Default
state: absent

View File

@@ -1,70 +0,0 @@
---
- name: Generate a random string for test
set_fact:
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
when: test_id is not defined
- name: Generate a unique name
set_fact:
bulk_job_name: "AWX-Collection-tests-bulk_job_launch-{{ test_id }}"
- name: Get our collection package
controller_meta:
register: controller_meta
- name: Generate the name of our plugin
set_fact:
plugin_name: "{{ controller_meta.prefix }}.controller_api"
- name: Get Inventory
set_fact:
inventory_id: "{{ lookup(plugin_name, 'inventories', query_params={'name': 'Demo Inventory'}, return_ids=True ) }}"
- name: Create a Job Template
job_template:
name: "{{ bulk_job_name }}"
copy_from: "Demo Job Template"
ask_variables_on_launch: true
ask_inventory_on_launch: true
ask_skip_tags_on_launch: true
allow_simultaneous: true
state: present
register: jt_result
- name: Create Bulk Job
bulk_job_launch:
name: "{{ bulk_job_name }}"
jobs:
- unified_job_template: "{{ jt_result.id }}"
inventory: "{{ inventory_id }}"
skip_tags: "skipfoo,skipbar"
extra_data:
animal: fish
color: orange
- unified_job_template: "{{ jt_result.id }}"
extra_vars:
animal: bear
food: carrot
skip_tags: "skipbaz"
job_tags: "Hello World"
limit: "localhost"
wait: True
inventory: Demo Inventory
organization: Default
register: result
- assert:
that:
- result is not failed
- "'id' in result"
- result['job_info']['skip_tags'] == "skipbaz"
- result['job_info']['limit'] == "localhost"
- result['job_info']['job_tags'] == "Hello World"
- result['job_info']['inventory'] == {{ inventory_id }}
- "result['job_info']['extra_vars'] == '{\"animal\": \"bear\", \"food\": \"carrot\"}'"
# cleanup
- name: Delete Job Template
job_template:
name: "{{ bulk_job_name }}"
state: absent

View File

@@ -95,22 +95,6 @@
- results is failed
- "'In rule 2 end_on must either be an integer or in the format YYYY-MM-DD [HH:MM:SS]' in results.msg"
- name: Every Mondays
set_fact:
complex_rule: "{{ query(ruleset_plugin_name, '2022-04-30 10:30:45', rules=rrules, timezone='UTC' ) }}"
ignore_errors: True
register: results
vars:
rrules:
- frequency: 'day'
interval: 1
byweekday: 'monday'
- assert:
that:
- results is success
- "'DTSTART;TZID=UTC:20220430T103045 RRULE:FREQ=DAILY;BYDAY=MO;INTERVAL=1' == complex_rule"
- name: call rruleset with an invalid byweekday
set_fact:

View File

@@ -1,7 +1,6 @@
# Order matters
from .page import * # NOQA
from .base import * # NOQA
from .bulk import * # NOQA
from .access_list import * # NOQA
from .api import * # NOQA
from .authtoken import * # NOQA

View File

@@ -1,24 +0,0 @@
from awxkit.api.resources import resources
from . import base
from . import page
class Bulk(base.Base):
def get(self, **query_parameters):
request = self.connection.get(self.endpoint, query_parameters, headers={'Accept': 'application/json'})
return self.page_identity(request)
page.register_page([resources.bulk, (resources.bulk, 'get')], Bulk)
class BulkJobLaunch(base.Base):
def post(self, payload={}):
result = self.connection.post(self.endpoint, payload)
if 'url' in result.json():
return self.walk(result.json()['url'])
else:
return self.page_identity(result, request_json={})
page.register_page(resources.bulk_job_launch, BulkJobLaunch)

View File

@@ -319,7 +319,6 @@ class InventorySource(HasCreate, HasNotifications, UnifiedJobTemplate):
optional_fields = (
'source_path',
'source_vars',
'scm_branch',
'timeout',
'overwrite',
'overwrite_vars',

View File

@@ -13,8 +13,6 @@ class Resources(object):
_applications = 'applications/'
_auth = 'auth/'
_authtoken = 'authtoken/'
_bulk = 'bulk/'
_bulk_job_launch = 'bulk/job_launch/'
_config = 'config/'
_config_attach = 'config/attach/'
_credential = r'credentials/\d+/'

Some files were not shown because too many files have changed in this diff Show More