Compare commits

..

1 Commits

Author SHA1 Message Date
Luiz Costa
7f25309078 WIP Makefile 2022-09-02 15:01:00 -03:00
357 changed files with 3879 additions and 14168 deletions

View File

@@ -19,11 +19,8 @@ jobs:
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
OWNER: ${{ github.repository_owner }} OWNER: ${{ github.repository_owner }}
REPO: ${{ github.event.repository.name }} REPO: ${{ github.event.repository.name }}
PR: ${{github.event.pull_request.number}} BRANCH: ${{github.event.pull_request.head.ref}}
PR_BODY: ${{github.event.pull_request.body}} PR: ${{github.event.pull_request}}
run: | run: |
gh pr checkout ${{ env.PR }} gh pr checkout ${{ env.BRANCH }}
echo "${{ env.PR_BODY }}" > my_pr_body.txt gh pr edit --body "${{ env.PR }}\nBug, Docs Fix or other nominal change"
echo "" >> my_pr_body.txt
echo "Bug, Docs Fix or other nominal change" >> my_pr_body.txt
gh pr edit ${{env.PR}} --body-file my_pr_body.txt

View File

@@ -8,8 +8,6 @@ ignore: |
awx/ui/test/e2e/tests/smoke-vars.yml awx/ui/test/e2e/tests/smoke-vars.yml
awx/ui/node_modules awx/ui/node_modules
tools/docker-compose/_sources tools/docker-compose/_sources
# django template files
awx/api/templates/instance_install_bundle/**
extends: default extends: default

View File

@@ -3,7 +3,7 @@ recursive-include awx *.po
recursive-include awx *.mo recursive-include awx *.mo
recursive-include awx/static * recursive-include awx/static *
recursive-include awx/templates *.html recursive-include awx/templates *.html
recursive-include awx/api/templates *.md *.html *.yml recursive-include awx/api/templates *.md *.html
recursive-include awx/ui/build *.html recursive-include awx/ui/build *.html
recursive-include awx/ui/build * recursive-include awx/ui/build *
recursive-include awx/playbooks *.yml recursive-include awx/playbooks *.yml

105
Makefile
View File

@@ -54,45 +54,6 @@ I18N_FLAG_FILE = .i18n_built
VERSION PYTHON_VERSION docker-compose-sources \ VERSION PYTHON_VERSION docker-compose-sources \
.git/hooks/pre-commit .git/hooks/pre-commit
clean-tmp:
rm -rf tmp/
clean-venv:
rm -rf venv/
clean-dist:
rm -rf dist
clean-schema:
rm -rf swagger.json
rm -rf schema.json
rm -rf reference-schema.json
clean-languages:
rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
## Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist
rm -rf awx/public
rm -rf awx/lib/site-packages
rm -rf awx/job_status
rm -rf awx/job_output
rm -rf reports
rm -rf tmp
rm -rf $(I18N_FLAG_FILE)
mkdir tmp
clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info
find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3*
rm -rf requirements/vendor
rm -rf awx/projects
clean-awxkit:
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
## convenience target to assert environment variables are defined ## convenience target to assert environment variables are defined
guard-%: guard-%:
@@ -365,13 +326,75 @@ bulk_data:
fi; \ fi; \
$(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET) $(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET)
# CLEANUP COMMANDS
# --------------------------------------
## Clean everything. Including temporary build files, compiled Python files.
clean: clean-tmp clean-ui clean-api clean-awxkit clean-dist
rm -rf awx/public
rm -rf awx/lib/site-packages
rm -rf awx/job_status
rm -rf awx/job_output
rm -rf reports
rm -rf $(I18N_FLAG_FILE)
clean-tmp:
rm -rf tmp/
mkdir tmp
clean-venv:
rm -rf venv/
clean-dist:
rm -rf dist
clean-schema:
rm -rf swagger.json
rm -rf schema.json
rm -rf reference-schema.json
clean-languages:
rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info
find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3*
rm -rf requirements/vendor
rm -rf awx/projects
## Clean UI builded static files (alias for ui-clean)
clean-ui: ui-clean
## Clean temp build files from the awxkit
clean-awxkit:
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
clean-docker-images:
IMAGES_TO_BE_DELETE=' \
quay.io/ansible/receptor \
quay.io/awx/awx_devel \
ansible/receptor \
postgres \
redis \
' && \
for IMAGE in $$IMAGES_TO_BE_DELETE; do \
echo "Removing image '$$IMAGE'" && \
IMAGE_IDS=$$(docker image ls -a | grep $$IMAGE | awk '{print $$3}') echo "oi" \
done
clean-docker-containers:
clean-docker-volumes:
# UI TASKS # UI TASKS
# -------------------------------------- # --------------------------------------
UI_BUILD_FLAG_FILE = awx/ui/.ui-built UI_BUILD_FLAG_FILE = awx/ui/.ui-built
clean-ui: ui-clean:
rm -rf node_modules rm -rf node_modules
rm -rf awx/ui/node_modules rm -rf awx/ui/node_modules
rm -rf awx/ui/build rm -rf awx/ui/build
@@ -379,7 +402,7 @@ clean-ui:
rm -rf $(UI_BUILD_FLAG_FILE) rm -rf $(UI_BUILD_FLAG_FILE)
awx/ui/node_modules: awx/ui/node_modules:
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
$(UI_BUILD_FLAG_FILE): $(UI_BUILD_FLAG_FILE):
$(MAKE) awx/ui/node_modules $(MAKE) awx/ui/node_modules

View File

@@ -6,6 +6,7 @@ import inspect
import logging import logging
import time import time
import uuid import uuid
import urllib.parse
# Django # Django
from django.conf import settings from django.conf import settings
@@ -13,7 +14,7 @@ from django.contrib.auth import views as auth_views
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache from django.core.cache import cache
from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldDoesNotExist
from django.db import connection, transaction from django.db import connection
from django.db.models.fields.related import OneToOneRel from django.db.models.fields.related import OneToOneRel
from django.http import QueryDict from django.http import QueryDict
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
@@ -29,7 +30,7 @@ from rest_framework.response import Response
from rest_framework import status from rest_framework import status
from rest_framework import views from rest_framework import views
from rest_framework.permissions import AllowAny from rest_framework.permissions import AllowAny
from rest_framework.renderers import StaticHTMLRenderer from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
from rest_framework.negotiation import DefaultContentNegotiation from rest_framework.negotiation import DefaultContentNegotiation
# AWX # AWX
@@ -40,7 +41,7 @@ from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd,
from awx.main.utils.db import get_all_field_names from awx.main.utils.db import get_all_field_names
from awx.main.utils.licensing import server_product_name from awx.main.utils.licensing import server_product_name
from awx.main.views import ApiErrorView from awx.main.views import ApiErrorView
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
from awx.api.versioning import URLPathVersioning from awx.api.versioning import URLPathVersioning
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
from awx.conf import settings_registry from awx.conf import settings_registry
@@ -62,9 +63,9 @@ __all__ = [
'SubDetailAPIView', 'SubDetailAPIView',
'ResourceAccessList', 'ResourceAccessList',
'ParentMixin', 'ParentMixin',
'DeleteLastUnattachLabelMixin',
'SubListAttachDetachAPIView', 'SubListAttachDetachAPIView',
'CopyAPIView', 'CopyAPIView',
'GenericCancelView',
'BaseUsersList', 'BaseUsersList',
] ]
@@ -90,9 +91,14 @@ class LoggedLoginView(auth_views.LoginView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
ret = super(LoggedLoginView, self).post(request, *args, **kwargs) ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
current_user = getattr(request, 'user', None)
if request.user.is_authenticated: if request.user.is_authenticated:
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None)))) logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
ret.set_cookie('userLoggedIn', 'true') ret.set_cookie('userLoggedIn', 'true')
current_user = UserSerializer(self.request.user)
current_user = smart_str(JSONRenderer().render(current_user.data))
current_user = urllib.parse.quote('%s' % current_user, '')
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid')) ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
return ret return ret
@@ -249,7 +255,7 @@ class APIView(views.APIView):
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times) response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
if getattr(self, 'deprecated', False): if getattr(self, 'deprecated', False):
response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa
return response return response
@@ -769,6 +775,28 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
return {'id': None} return {'id': None}
class DeleteLastUnattachLabelMixin(object):
"""
Models for which you want the last instance to be deleted from the database
when the last disassociate is called should inherit from this class. Further,
the model should implement is_detached()
"""
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
if res:
return res
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView): class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
pass pass
@@ -986,23 +1014,6 @@ class CopyAPIView(GenericAPIView):
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class GenericCancelView(RetrieveAPIView):
# In subclass set model, serializer_class
obj_permission_type = 'cancel'
@transaction.non_atomic_requests
def dispatch(self, *args, **kwargs):
return super(GenericCancelView, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class BaseUsersList(SubListCreateAttachDetachAPIView): class BaseUsersList(SubListCreateAttachDetachAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
ret = super(BaseUsersList, self).post(request, *args, **kwargs) ret = super(BaseUsersList, self).post(request, *args, **kwargs)

View File

@@ -24,6 +24,7 @@ __all__ = [
'InventoryInventorySourcesUpdatePermission', 'InventoryInventorySourcesUpdatePermission',
'UserPermission', 'UserPermission',
'IsSystemAdminOrAuditor', 'IsSystemAdminOrAuditor',
'InstanceGroupTowerPermission',
'WorkflowApprovalPermission', 'WorkflowApprovalPermission',
] ]

View File

@@ -29,7 +29,6 @@ from django.utils.translation import gettext_lazy as _
from django.utils.encoding import force_str from django.utils.encoding import force_str
from django.utils.text import capfirst from django.utils.text import capfirst
from django.utils.timezone import now from django.utils.timezone import now
from django.core.validators import RegexValidator, MaxLengthValidator
# Django REST Framework # Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied from rest_framework.exceptions import ValidationError, PermissionDenied
@@ -121,9 +120,6 @@ from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField, DeprecatedCredentialField from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField, DeprecatedCredentialField
# AWX Utils
from awx.api.validators import HostnameRegexValidator
logger = logging.getLogger('awx.api.serializers') logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type. # Fields that should be summarized regardless of object type.
@@ -158,7 +154,6 @@ SUMMARIZABLE_FK_FIELDS = {
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'), 'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'), 'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'), 'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'), 'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
'job_template': DEFAULT_SUMMARY_FIELDS, 'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS, 'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
@@ -619,7 +614,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
def validate(self, attrs): def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs) attrs = super(BaseSerializer, self).validate(attrs)
try: try:
# Create/update a model instance and run its full_clean() method to # Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class. # do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance) exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model() obj = self.instance or self.Meta.model()
@@ -1475,7 +1470,6 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
'allow_override', 'allow_override',
'custom_virtualenv', 'custom_virtualenv',
'default_environment', 'default_environment',
'signature_validation_credential',
) + ( ) + (
'last_update_failed', 'last_update_failed',
'last_updated', 'last_updated',
@@ -1684,7 +1678,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
'total_inventory_sources', 'total_inventory_sources',
'inventory_sources_with_failures', 'inventory_sources_with_failures',
'pending_deletion', 'pending_deletion',
'prevent_instance_group_fallback',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -2237,7 +2230,6 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
'source_project_update', 'source_project_update',
'custom_virtualenv', 'custom_virtualenv',
'instance_group', 'instance_group',
'scm_revision',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -2928,12 +2920,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'become_enabled', 'become_enabled',
'diff_mode', 'diff_mode',
@@ -2942,7 +2928,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'job_slice_count', 'job_slice_count',
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'prevent_instance_group_fallback',
) )
read_only_fields = ('*', 'custom_virtualenv') read_only_fields = ('*', 'custom_virtualenv')
@@ -3197,7 +3182,7 @@ class JobRelaunchSerializer(BaseSerializer):
return attrs return attrs
class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer): class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField() can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField() prompts = serializers.SerializerMethodField()
@@ -3223,17 +3208,14 @@ class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer):
try: try:
config = obj.launch_config config = obj.launch_config
ret = config.prompts_dict(display=True) ret = config.prompts_dict(display=True)
for field_name in ('inventory', 'execution_environment'): if 'inventory' in ret:
if field_name in ret: ret['inventory'] = self._summarize('inventory', ret['inventory'])
ret[field_name] = self._summarize(field_name, ret[field_name]) if 'credentials' in ret:
for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')): all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
if field_name in ret: ret['credentials'] = all_creds
ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]]
if 'labels' in ret:
ret['labels'] = self._summary_field_labels(config)
return ret return ret
except JobLaunchConfig.DoesNotExist: except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been run before launch configurations were saved.')} return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer): class AdHocCommandSerializer(UnifiedJobSerializer):
@@ -3403,9 +3385,6 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -3424,11 +3403,6 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'-execution_environment', '-execution_environment',
'ask_labels_on_launch',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3472,7 +3446,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
# process char_prompts, these are not direct fields on the model # process char_prompts, these are not direct fields on the model
mock_obj = self.Meta.model() mock_obj = self.Meta.model()
for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'): for field_name in ('scm_branch', 'limit'):
if field_name in attrs: if field_name in attrs:
setattr(mock_obj, field_name, attrs[field_name]) setattr(mock_obj, field_name, attrs[field_name])
attrs.pop(field_name) attrs.pop(field_name)
@@ -3498,9 +3472,6 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJob model = WorkflowJob
fields = ( fields = (
@@ -3520,8 +3491,6 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'webhook_guid', 'webhook_guid',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3638,9 +3607,6 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None) diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES) verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
timeout = serializers.IntegerField(required=False, allow_null=True, default=None)
exclude_errors = () exclude_errors = ()
class Meta: class Meta:
@@ -3656,21 +3622,13 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
'skip_tags', 'skip_tags',
'diff_mode', 'diff_mode',
'verbosity', 'verbosity',
'execution_environment',
'forks',
'job_slice_count',
'timeout',
) )
def get_related(self, obj): def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj) res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id: if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id}) res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.execution_environment_id:
res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id})
res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
return res return res
def _build_mock_obj(self, attrs): def _build_mock_obj(self, attrs):
@@ -3750,11 +3708,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
# Build unsaved version of this config, use it to detect prompts errors # Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs) mock_obj = self._build_mock_obj(attrs)
if set(list(ujt.get_ask_mapping().keys()) + ['extra_data']) & set(attrs.keys()):
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict()) accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
else:
# Only perform validation of prompts if prompts fields are provided
errors = {}
# Remove all unprocessed $encrypted$ strings, indicating default usage # Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict: if 'extra_data' in attrs and password_dict:
@@ -4126,6 +4080,7 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
class JobLaunchSerializer(BaseSerializer): class JobLaunchSerializer(BaseSerializer):
# Representational fields # Representational fields
passwords_needed_to_start = serializers.ReadOnlyField() passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True) can_start_without_user_input = serializers.BooleanField(read_only=True)
@@ -4148,12 +4103,6 @@ class JobLaunchSerializer(BaseSerializer):
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True) verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
timeout = serializers.IntegerField(required=False, write_only=True)
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
class Meta: class Meta:
model = JobTemplate model = JobTemplate
@@ -4181,12 +4130,6 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'variables_needed_to_start', 'variables_needed_to_start',
'credential_needed_to_start', 'credential_needed_to_start',
@@ -4194,12 +4137,6 @@ class JobLaunchSerializer(BaseSerializer):
'job_template_data', 'job_template_data',
'defaults', 'defaults',
'verbosity', 'verbosity',
'execution_environment',
'labels',
'forks',
'job_slice_count',
'timeout',
'instance_groups',
) )
read_only_fields = ( read_only_fields = (
'ask_scm_branch_on_launch', 'ask_scm_branch_on_launch',
@@ -4212,12 +4149,6 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
) )
def get_credential_needed_to_start(self, obj): def get_credential_needed_to_start(self, obj):
@@ -4242,17 +4173,6 @@ class JobLaunchSerializer(BaseSerializer):
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields: if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None) cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict) defaults_dict.setdefault(field_name, []).append(cred_dict)
elif field_name == 'execution_environment':
if obj.execution_environment_id:
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
else:
defaults_dict[field_name] = {}
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {'id': label.id, 'name': label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
elif field_name == 'instance_groups':
defaults_dict[field_name] = []
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4275,15 +4195,6 @@ class JobLaunchSerializer(BaseSerializer):
elif template.project.status in ('error', 'failed'): elif template.project.status in ('error', 'failed'):
errors['playbook'] = _("Missing a revision to run due to failed project update.") errors['playbook'] = _("Missing a revision to run due to failed project update.")
latest_update = template.project.project_updates.last()
if latest_update is not None and latest_update.failed:
failed_validation_tasks = latest_update.project_update_events.filter(
event='runner_on_failed',
play="Perform project signature/checksum verification",
)
if failed_validation_tasks:
errors['playbook'] = _("Last project update failed due to signature validation failure.")
# cannot run a playbook without an inventory # cannot run a playbook without an inventory
if template.inventory and template.inventory.pending_deletion is True: if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.") errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
@@ -4360,10 +4271,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True) scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
workflow_job_template_data = serializers.SerializerMethodField() workflow_job_template_data = serializers.SerializerMethodField()
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -4383,22 +4290,8 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
'workflow_job_template_data', 'workflow_job_template_data',
'survey_enabled', 'survey_enabled',
'ask_variables_on_launch', 'ask_variables_on_launch',
'ask_labels_on_launch',
'labels',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
)
read_only_fields = (
'ask_inventory_on_launch',
'ask_variables_on_launch',
'ask_skip_tags_on_launch',
'ask_labels_on_launch',
'ask_limit_on_launch',
'ask_scm_branch_on_launch',
'ask_tags_on_launch',
) )
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj): def get_survey_enabled(self, obj):
if obj: if obj:
@@ -4406,15 +4299,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return False return False
def get_defaults(self, obj): def get_defaults(self, obj):
defaults_dict = {} defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys(): for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory': if field_name == 'inventory':
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None)) defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {"id": label.id, "name": label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4423,7 +4311,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return dict(name=obj.name, id=obj.id, description=obj.description) return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs): def validate(self, attrs):
template = self.instance template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs) accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
@@ -4441,7 +4328,6 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
WFJT_inventory = template.inventory WFJT_inventory = template.inventory
WFJT_limit = template.limit WFJT_limit = template.limit
WFJT_scm_branch = template.scm_branch WFJT_scm_branch = template.scm_branch
super(WorkflowJobLaunchSerializer, self).validate(attrs) super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory template.inventory = WFJT_inventory
@@ -4833,8 +4719,6 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
if isinstance(obj.unified_job_template, SystemJobTemplate): if isinstance(obj.unified_job_template, SystemJobTemplate):
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
# We are not showing instance groups on summary fields because JTs don't either
if 'inventory' in summary_fields: if 'inventory' in summary_fields:
return summary_fields return summary_fields
@@ -4869,7 +4753,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
class InstanceLinkSerializer(BaseSerializer): class InstanceLinkSerializer(BaseSerializer):
class Meta: class Meta:
model = InstanceLink model = InstanceLink
fields = ('source', 'target', 'link_state') fields = ('source', 'target')
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True) source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True) target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
@@ -4878,93 +4762,63 @@ class InstanceLinkSerializer(BaseSerializer):
class InstanceNodeSerializer(BaseSerializer): class InstanceNodeSerializer(BaseSerializer):
class Meta: class Meta:
model = Instance model = Instance
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled') fields = ('id', 'hostname', 'node_type', 'node_state')
node_state = serializers.SerializerMethodField()
def get_node_state(self, obj):
if not obj.enabled:
return "disabled"
return "error" if obj.errors else "healthy"
class InstanceSerializer(BaseSerializer): class InstanceSerializer(BaseSerializer):
show_capabilities = ['edit']
consumed_capacity = serializers.SerializerMethodField() consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField() percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True) jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance'), read_only=True)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
health_check_pending = serializers.SerializerMethodField()
class Meta: class Meta:
model = Instance model = Instance
read_only_fields = ('ip_address', 'uuid', 'version') read_only_fields = ('uuid', 'hostname', 'version', 'node_type')
fields = ( fields = (
'id', "id",
'hostname', "type",
'type', "url",
'url', "related",
'related', "uuid",
'summary_fields', "hostname",
'uuid', "created",
'created', "modified",
'modified', "last_seen",
'last_seen', "last_health_check",
'health_check_started', "errors",
'health_check_pending',
'last_health_check',
'errors',
'capacity_adjustment', 'capacity_adjustment',
'version', "version",
'capacity', "capacity",
'consumed_capacity', "consumed_capacity",
'percent_capacity_remaining', "percent_capacity_remaining",
'jobs_running', "jobs_running",
'jobs_total', "jobs_total",
'cpu', "cpu",
'memory', "memory",
'cpu_capacity', "cpu_capacity",
'mem_capacity', "mem_capacity",
'enabled', "enabled",
'managed_by_policy', "managed_by_policy",
'node_type', "node_type",
'node_state',
'ip_address',
'listener_port',
) )
extra_kwargs = {
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
'node_state': {'initial': Instance.States.INSTALLED, 'default': Instance.States.INSTALLED},
'hostname': {
'validators': [
MaxLengthValidator(limit_value=250),
validators.UniqueValidator(queryset=Instance.objects.all()),
RegexValidator(
regex=r'^localhost$|^127(?:\.[0-9]+){0,2}\.[0-9]+$|^(?:0*\:)*?:?0*1$',
flags=re.IGNORECASE,
inverse_match=True,
message="hostname cannot be localhost or 127.0.0.1",
),
HostnameRegexValidator(),
],
},
}
def get_related(self, obj): def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj) res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk}) res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type != 'hop': if obj.node_type != 'hop':
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk}) res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
return res return res
def get_summary_fields(self, obj):
summary = super().get_summary_fields(obj)
# use this handle to distinguish between a listView and a detailView
if self.is_detail_view:
summary['links'] = InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source').filter(source=obj), many=True).data
return summary
def get_consumed_capacity(self, obj): def get_consumed_capacity(self, obj):
return obj.consumed_capacity return obj.consumed_capacity
@@ -4974,58 +4828,10 @@ class InstanceSerializer(BaseSerializer):
else: else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100)) return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
def get_health_check_pending(self, obj): def validate(self, attrs):
return obj.health_check_pending if self.instance.node_type == 'hop':
raise serializers.ValidationError(_('Hop node instances may not be changed.'))
def validate(self, data): return attrs
if self.instance:
if self.instance.node_type == Instance.Types.HOP:
raise serializers.ValidationError("Hop node instances may not be changed.")
else:
if not settings.IS_K8S:
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
return data
def validate_node_type(self, value):
if not self.instance:
if value not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only create execution nodes.")
else:
if self.instance.node_type != value:
raise serializers.ValidationError("Cannot change node type.")
return value
def validate_node_state(self, value):
if self.instance:
if value != self.instance.node_state:
if not settings.IS_K8S:
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
if value != Instance.States.DEPROVISIONING:
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
if self.instance.node_type not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only deprovision execution nodes.")
else:
if value and value != Instance.States.INSTALLED:
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
return value
def validate_hostname(self, value):
"""
- Hostname cannot be "localhost" - but can be something like localhost.domain
- Cannot change the hostname of an-already instantiated & initialized Instance object
"""
if self.instance and self.instance.hostname != value:
raise serializers.ValidationError("Cannot change hostname.")
return value
def validate_listener_port(self, value):
if self.instance and self.instance.listener_port != value:
raise serializers.ValidationError("Cannot change listener port.")
return value
class InstanceHealthCheckSerializer(BaseSerializer): class InstanceHealthCheckSerializer(BaseSerializer):

View File

@@ -1,23 +0,0 @@
receptor_user: awx
receptor_group: awx
receptor_verify: true
receptor_tls: true
receptor_work_commands:
ansible-runner:
command: ansible-runner
params: worker
allowruntimeparams: true
verifysignature: true
custom_worksign_public_keyfile: receptor/work-public-key.pem
custom_tls_certfile: receptor/tls/receptor.crt
custom_tls_keyfile: receptor/tls/receptor.key
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
receptor_protocol: 'tcp'
receptor_listener: true
receptor_port: {{ instance.listener_port }}
receptor_dependencies:
- python39-pip
{% verbatim %}
podman_user: "{{ receptor_user }}"
podman_group: "{{ receptor_group }}"
{% endverbatim %}

View File

@@ -1,20 +0,0 @@
{% verbatim %}
---
- hosts: all
become: yes
tasks:
- name: Create the receptor user
user:
name: "{{ receptor_user }}"
shell: /bin/bash
- name: Enable Copr repo for Receptor
command: dnf copr enable ansible-awx/receptor -y
- import_role:
name: ansible.receptor.podman
- import_role:
name: ansible.receptor.setup
- name: Install ansible-runner
pip:
name: ansible-runner
executable: pip3.9
{% endverbatim %}

View File

@@ -1,7 +0,0 @@
---
all:
hosts:
remote-execution:
ansible_host: {{ instance.hostname }}
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa

View File

@@ -1,4 +0,0 @@
---
collections:
- name: ansible.receptor
version: 1.1.0

View File

@@ -3,15 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import ( from awx.api.views import InstanceList, InstanceDetail, InstanceUnifiedJobsList, InstanceInstanceGroupsList, InstanceHealthCheck
InstanceList,
InstanceDetail,
InstanceUnifiedJobsList,
InstanceInstanceGroupsList,
InstanceHealthCheck,
InstancePeersList,
)
from awx.api.views.instance_install_bundle import InstanceInstallBundle
urls = [ urls = [
@@ -20,8 +12,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'), re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'), re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'), re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -3,28 +3,26 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.inventory import ( from awx.api.views import (
InventoryList, InventoryList,
InventoryDetail, InventoryDetail,
InventoryHostsList,
InventoryGroupsList,
InventoryRootGroupsList,
InventoryVariableData,
InventoryScriptView,
InventoryTreeView,
InventoryInventorySourcesList,
InventoryInventorySourcesUpdate,
InventoryActivityStreamList, InventoryActivityStreamList,
InventoryJobTemplateList, InventoryJobTemplateList,
InventoryAdHocCommandsList,
InventoryAccessList, InventoryAccessList,
InventoryObjectRolesList, InventoryObjectRolesList,
InventoryInstanceGroupsList, InventoryInstanceGroupsList,
InventoryLabelList, InventoryLabelList,
InventoryCopy, InventoryCopy,
) )
from awx.api.views import (
InventoryHostsList,
InventoryGroupsList,
InventoryInventorySourcesList,
InventoryInventorySourcesUpdate,
InventoryAdHocCommandsList,
InventoryRootGroupsList,
InventoryScriptView,
InventoryTreeView,
InventoryVariableData,
)
urls = [ urls = [

View File

@@ -3,9 +3,6 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.inventory import (
InventoryUpdateEventsList,
)
from awx.api.views import ( from awx.api.views import (
InventoryUpdateList, InventoryUpdateList,
InventoryUpdateDetail, InventoryUpdateDetail,
@@ -13,6 +10,7 @@ from awx.api.views import (
InventoryUpdateStdout, InventoryUpdateStdout,
InventoryUpdateNotificationsList, InventoryUpdateNotificationsList,
InventoryUpdateCredentialsList, InventoryUpdateCredentialsList,
InventoryUpdateEventsList,
) )

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.labels import LabelList, LabelDetail from awx.api.views import LabelList, LabelDetail
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')] urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]

View File

@@ -10,7 +10,7 @@ from oauthlib import oauth2
from oauth2_provider import views from oauth2_provider import views
from awx.main.models import RefreshToken from awx.main.models import RefreshToken
from awx.api.views.root import ApiOAuthAuthorizationRootView from awx.api.views import ApiOAuthAuthorizationRootView
class TokenView(views.TokenView): class TokenView(views.TokenView):

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.organization import ( from awx.api.views import (
OrganizationList, OrganizationList,
OrganizationDetail, OrganizationDetail,
OrganizationUsersList, OrganizationUsersList,
@@ -14,6 +14,7 @@ from awx.api.views.organization import (
OrganizationJobTemplatesList, OrganizationJobTemplatesList,
OrganizationWorkflowJobTemplatesList, OrganizationWorkflowJobTemplatesList,
OrganizationTeamsList, OrganizationTeamsList,
OrganizationCredentialList,
OrganizationActivityStreamList, OrganizationActivityStreamList,
OrganizationNotificationTemplatesList, OrganizationNotificationTemplatesList,
OrganizationNotificationTemplatesErrorList, OrganizationNotificationTemplatesErrorList,
@@ -24,8 +25,8 @@ from awx.api.views.organization import (
OrganizationGalaxyCredentialsList, OrganizationGalaxyCredentialsList,
OrganizationObjectRolesList, OrganizationObjectRolesList,
OrganizationAccessList, OrganizationAccessList,
OrganizationApplicationList,
) )
from awx.api.views import OrganizationCredentialList, OrganizationApplicationList
urls = [ urls = [

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList
urls = [ urls = [
@@ -11,8 +11,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'), re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'), re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -6,15 +6,13 @@ from django.urls import include, re_path
from awx import MODE from awx import MODE
from awx.api.generics import LoggedLoginView, LoggedLogoutView from awx.api.generics import LoggedLoginView, LoggedLogoutView
from awx.api.views.root import ( from awx.api.views import (
ApiRootView, ApiRootView,
ApiV2RootView, ApiV2RootView,
ApiV2PingView, ApiV2PingView,
ApiV2ConfigView, ApiV2ConfigView,
ApiV2SubscriptionView, ApiV2SubscriptionView,
ApiV2AttachView, ApiV2AttachView,
)
from awx.api.views import (
AuthView, AuthView,
UserMeList, UserMeList,
DashboardView, DashboardView,
@@ -30,8 +28,8 @@ from awx.api.views import (
OAuth2TokenList, OAuth2TokenList,
ApplicationOAuth2TokenList, ApplicationOAuth2TokenList,
OAuth2ApplicationDetail, OAuth2ApplicationDetail,
MeshVisualizer,
) )
from awx.api.views.mesh_visualizer import MeshVisualizer
from awx.api.views.metrics import MetricsView from awx.api.views.metrics import MetricsView

View File

@@ -1,6 +1,6 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver from awx.api.views import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
urlpatterns = [ urlpatterns = [

View File

@@ -10,8 +10,6 @@ from awx.api.views import (
WorkflowJobNodeFailureNodesList, WorkflowJobNodeFailureNodesList,
WorkflowJobNodeAlwaysNodesList, WorkflowJobNodeAlwaysNodesList,
WorkflowJobNodeCredentialsList, WorkflowJobNodeCredentialsList,
WorkflowJobNodeLabelsList,
WorkflowJobNodeInstanceGroupsList,
) )
@@ -22,8 +20,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -11,8 +11,6 @@ from awx.api.views import (
WorkflowJobTemplateNodeAlwaysNodesList, WorkflowJobTemplateNodeAlwaysNodesList,
WorkflowJobTemplateNodeCredentialsList, WorkflowJobTemplateNodeCredentialsList,
WorkflowJobTemplateNodeCreateApproval, WorkflowJobTemplateNodeCreateApproval,
WorkflowJobTemplateNodeLabelsList,
WorkflowJobTemplateNodeInstanceGroupsList,
) )
@@ -23,8 +21,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'), re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
] ]

View File

@@ -1,55 +0,0 @@
import re
from django.core.validators import RegexValidator, validate_ipv46_address
from django.core.exceptions import ValidationError
class HostnameRegexValidator(RegexValidator):
"""
Fully validates a domain name that is compliant with norms in Linux/RHEL
- Cannot start with a hyphen
- Cannot begin with, or end with a "."
- Cannot contain any whitespaces
- Entire hostname is max 255 chars (including dots)
- Each domain/label is between 1 and 63 characters, except top level domain, which must be at least 2 characters
- Supports ipv4, ipv6, simple hostnames and FQDNs
- Follows RFC 9210 (modern RFC 1123, 1178) requirements
Accepts an IP Address or Hostname as the argument
"""
regex = '^[a-z0-9][-a-z0-9]*$|^([a-z0-9][-a-z0-9]{0,62}[.])*[a-z0-9][-a-z0-9]{1,62}$'
flags = re.IGNORECASE
def __call__(self, value):
regex_matches, err = self.__validate(value)
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
if err is None:
err = ValidationError(self.message, code=self.code, params={"value": value})
raise err
def __str__(self):
return f"regex={self.regex}, message={self.message}, code={self.code}, inverse_match={self.inverse_match}, flags={self.flags}"
def __validate(self, value):
if ' ' in value:
return False, ValidationError("whitespaces in hostnames are illegal")
"""
If we have an IP address, try and validate it.
"""
try:
validate_ipv46_address(value)
return True, None
except ValidationError:
pass
"""
By this point in the code, we probably have a simple hostname, FQDN or a strange hostname like "192.localhost.domain.101"
"""
if not self.regex.match(value):
return False, ValidationError(f"illegal characters detected in hostname={value}. Please verify.")
return True, None

View File

@@ -22,7 +22,6 @@ from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
@@ -69,7 +68,7 @@ from awx.api.generics import (
APIView, APIView,
BaseUsersList, BaseUsersList,
CopyAPIView, CopyAPIView,
GenericCancelView, DeleteLastUnattachLabelMixin,
GenericAPIView, GenericAPIView,
ListAPIView, ListAPIView,
ListCreateAPIView, ListCreateAPIView,
@@ -86,7 +85,6 @@ from awx.api.generics import (
SubListCreateAttachDetachAPIView, SubListCreateAttachDetachAPIView,
SubListDestroyAPIView, SubListDestroyAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main import models from awx.main import models
from awx.main.utils import ( from awx.main.utils import (
@@ -123,9 +121,59 @@ from awx.api.views.mixin import (
UnifiedJobDeletionMixin, UnifiedJobDeletionMixin,
NoTruncateMixin, NoTruncateMixin,
) )
from awx.api.views.organization import ( # noqa
OrganizationList,
OrganizationDetail,
OrganizationInventoriesList,
OrganizationUsersList,
OrganizationAdminsList,
OrganizationExecutionEnvironmentsList,
OrganizationProjectsList,
OrganizationJobTemplatesList,
OrganizationWorkflowJobTemplatesList,
OrganizationTeamsList,
OrganizationActivityStreamList,
OrganizationNotificationTemplatesList,
OrganizationNotificationTemplatesAnyList,
OrganizationNotificationTemplatesErrorList,
OrganizationNotificationTemplatesStartedList,
OrganizationNotificationTemplatesSuccessList,
OrganizationNotificationTemplatesApprovalList,
OrganizationInstanceGroupsList,
OrganizationGalaxyCredentialsList,
OrganizationAccessList,
OrganizationObjectRolesList,
)
from awx.api.views.inventory import ( # noqa
InventoryList,
InventoryDetail,
InventoryUpdateEventsList,
InventoryList,
InventoryDetail,
InventoryActivityStreamList,
InventoryInstanceGroupsList,
InventoryAccessList,
InventoryObjectRolesList,
InventoryJobTemplateList,
InventoryLabelList,
InventoryCopy,
)
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
from awx.api.views.root import ( # noqa
ApiRootView,
ApiOAuthAuthorizationRootView,
ApiVersionRootView,
ApiV2RootView,
ApiV2PingView,
ApiV2ConfigView,
ApiV2SubscriptionView,
ApiV2AttachView,
)
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
from awx.api.pagination import UnifiedJobEventPagination from awx.api.pagination import UnifiedJobEventPagination
from awx.main.utils import set_environ from awx.main.utils import set_environ
logger = logging.getLogger('awx.api.views') logger = logging.getLogger('awx.api.views')
@@ -310,7 +358,7 @@ class DashboardJobsGraphView(APIView):
return Response(dashboard_data) return Response(dashboard_data)
class InstanceList(ListCreateAPIView): class InstanceList(ListAPIView):
name = _("Instances") name = _("Instances")
model = models.Instance model = models.Instance
@@ -349,17 +397,6 @@ class InstanceUnifiedJobsList(SubListAPIView):
return qs return qs
class InstancePeersList(SubListAPIView):
name = _("Instance Peers")
parent_model = models.Instance
model = models.Instance
serializer_class = serializers.InstanceSerializer
parent_access = 'read'
search_fields = {'hostname'}
relationship = 'peers'
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView): class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
name = _("Instance's Instance Groups") name = _("Instance's Instance Groups")
@@ -402,21 +439,40 @@ class InstanceHealthCheck(GenericAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
if obj.health_check_pending:
return Response({'msg': f"Health check was already in progress for {obj.hostname}."}, status=status.HTTP_200_OK)
# Note: hop nodes are already excluded by the get_queryset method if obj.node_type == 'execution':
obj.health_check_started = now()
obj.save(update_fields=['health_check_started'])
if obj.node_type == models.Instance.Types.EXECUTION:
from awx.main.tasks.system import execution_node_health_check from awx.main.tasks.system import execution_node_health_check
execution_node_health_check.apply_async([obj.hostname]) runner_data = execution_node_health_check(obj.hostname)
obj.refresh_from_db()
data = self.get_serializer(data=request.data).to_representation(obj)
# Add in some extra unsaved fields
for extra_field in ('transmit_timing', 'run_timing'):
if extra_field in runner_data:
data[extra_field] = runner_data[extra_field]
else: else:
from awx.main.tasks.system import cluster_node_health_check from awx.main.tasks.system import cluster_node_health_check
if settings.CLUSTER_HOST_ID == obj.hostname:
cluster_node_health_check(obj.hostname)
else:
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname) cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK) start_time = time.time()
prior_check_time = obj.last_health_check
while time.time() - start_time < 50.0:
obj.refresh_from_db(fields=['last_health_check'])
if obj.last_health_check != prior_check_time:
break
if time.time() - start_time < 1.0:
time.sleep(0.1)
else:
time.sleep(1.0)
else:
obj.mark_offline(errors=_('Health check initiated by user determined this instance to be unresponsive'))
obj.refresh_from_db()
data = self.get_serializer(data=request.data).to_representation(obj)
return Response(data, status=status.HTTP_200_OK)
class InstanceGroupList(ListCreateAPIView): class InstanceGroupList(ListCreateAPIView):
@@ -561,19 +617,6 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.Schedule parent_model = models.Schedule
class ScheduleLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.Schedule
class ScheduleInstanceGroupList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.Schedule
relationship = 'instance_groups'
class ScheduleUnifiedJobsList(SubListAPIView): class ScheduleUnifiedJobsList(SubListAPIView):
model = models.UnifiedJob model = models.UnifiedJob
@@ -977,11 +1020,20 @@ class SystemJobEventsList(SubListAPIView):
return job.get_event_queryset() return job.get_event_queryset()
class ProjectUpdateCancel(GenericCancelView): class ProjectUpdateCancel(RetrieveAPIView):
model = models.ProjectUpdate model = models.ProjectUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.ProjectUpdateCancelSerializer serializer_class = serializers.ProjectUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class ProjectUpdateNotificationsList(SubListAPIView): class ProjectUpdateNotificationsList(SubListAPIView):
@@ -2254,11 +2306,20 @@ class InventoryUpdateCredentialsList(SubListAPIView):
relationship = 'credentials' relationship = 'credentials'
class InventoryUpdateCancel(GenericCancelView): class InventoryUpdateCancel(RetrieveAPIView):
model = models.InventoryUpdate model = models.InventoryUpdate
obj_permission_type = 'cancel'
serializer_class = serializers.InventoryUpdateCancelSerializer serializer_class = serializers.InventoryUpdateCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class InventoryUpdateNotificationsList(SubListAPIView): class InventoryUpdateNotificationsList(SubListAPIView):
@@ -2320,13 +2381,10 @@ class JobTemplateLaunch(RetrieveAPIView):
for field, ask_field_name in modified_ask_mapping.items(): for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field, None) data.pop(field, None)
elif isinstance(getattr(obj.__class__, field).field, ForeignKey): elif field == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField): elif field == 'credentials':
if field == 'instance_groups': data[field] = [cred.id for cred in obj.credentials.all()]
data[field] = []
continue
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field] = getattr(obj, field) data[field] = getattr(obj, field)
return data return data
@@ -2339,8 +2397,9 @@ class JobTemplateLaunch(RetrieveAPIView):
""" """
modern_data = data.copy() modern_data = data.copy()
if 'inventory' not in modern_data and 'inventory_id' in modern_data: id_fd = '{}_id'.format('inventory')
modern_data['inventory'] = modern_data['inventory_id'] if 'inventory' not in modern_data and id_fd in modern_data:
modern_data['inventory'] = modern_data[id_fd]
# credential passwords were historically provided as top-level attributes # credential passwords were historically provided as top-level attributes
if 'credential_passwords' not in modern_data: if 'credential_passwords' not in modern_data:
@@ -2660,9 +2719,28 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created) return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
class JobTemplateLabelList(LabelSubListCreateAttachDetachView): class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
class JobTemplateCallback(GenericAPIView): class JobTemplateCallback(GenericAPIView):
@@ -2888,22 +2966,6 @@ class WorkflowJobNodeCredentialsList(SubListAPIView):
relationship = 'credentials' relationship = 'credentials'
class WorkflowJobNodeLabelsList(SubListAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.WorkflowJobNode
relationship = 'labels'
class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeList(ListCreateAPIView): class WorkflowJobTemplateNodeList(ListCreateAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -2922,19 +2984,6 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.WorkflowJobTemplateNode parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobTemplateNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -3033,7 +3082,8 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
# #
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship # Limit the set of WorkflowJobeNodes to the related nodes of specified by
#'relationship'
# #
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
@@ -3146,17 +3196,13 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView):
data['extra_vars'] = extra_vars data['extra_vars'] = extra_vars
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping() modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars') modified_ask_mapping.pop('extra_vars')
for field_name, ask_field_name in obj.get_ask_mapping().items():
for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field, None) data.pop(field_name, None)
elif isinstance(getattr(obj.__class__, field).field, ForeignKey): elif field_name == 'inventory':
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None)
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field] = getattr(obj, field) data[field_name] = getattr(obj, field_name)
return data return data
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
@@ -3335,15 +3381,20 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id') return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
class WorkflowJobCancel(GenericCancelView): class WorkflowJobCancel(RetrieveAPIView):
model = models.WorkflowJob model = models.WorkflowJob
obj_permission_type = 'cancel'
serializer_class = serializers.WorkflowJobCancelSerializer serializer_class = serializers.WorkflowJobCancelSerializer
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
r = super().post(request, *args, **kwargs) obj = self.get_object()
if obj.can_cancel:
obj.cancel()
ScheduleWorkflowManager().schedule() ScheduleWorkflowManager().schedule()
return r return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class WorkflowJobNotificationsList(SubListAPIView): class WorkflowJobNotificationsList(SubListAPIView):
@@ -3499,11 +3550,20 @@ class JobActivityStreamList(SubListAPIView):
search_fields = ('changes',) search_fields = ('changes',)
class JobCancel(GenericCancelView): class JobCancel(RetrieveAPIView):
model = models.Job model = models.Job
obj_permission_type = 'cancel'
serializer_class = serializers.JobCancelSerializer serializer_class = serializers.JobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class JobRelaunch(RetrieveAPIView): class JobRelaunch(RetrieveAPIView):
@@ -3629,21 +3689,15 @@ class JobCreateSchedule(RetrieveAPIView):
extra_data=config.extra_data, extra_data=config.extra_data,
survey_passwords=config.survey_passwords, survey_passwords=config.survey_passwords,
inventory=config.inventory, inventory=config.inventory,
execution_environment=config.execution_environment,
char_prompts=config.char_prompts, char_prompts=config.char_prompts,
credentials=set(config.credentials.all()), credentials=set(config.credentials.all()),
labels=set(config.labels.all()),
instance_groups=list(config.instance_groups.all()),
) )
if not request.user.can_access(models.Schedule, 'add', schedule_data): if not request.user.can_access(models.Schedule, 'add', schedule_data):
raise PermissionDenied() raise PermissionDenied()
related_fields = ('credentials', 'labels', 'instance_groups') creds_list = schedule_data.pop('credentials')
related = [schedule_data.pop(relationship) for relationship in related_fields]
schedule = models.Schedule.objects.create(**schedule_data) schedule = models.Schedule.objects.create(**schedule_data)
for relationship, items in zip(related_fields, related): schedule.credentials.add(*creds_list)
for item in items:
getattr(schedule, relationship).add(item)
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
@@ -3974,11 +4028,20 @@ class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
serializer_class = serializers.AdHocCommandDetailSerializer serializer_class = serializers.AdHocCommandDetailSerializer
class AdHocCommandCancel(GenericCancelView): class AdHocCommandCancel(RetrieveAPIView):
model = models.AdHocCommand model = models.AdHocCommand
obj_permission_type = 'cancel'
serializer_class = serializers.AdHocCommandCancelSerializer serializer_class = serializers.AdHocCommandCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class AdHocCommandRelaunch(GenericAPIView): class AdHocCommandRelaunch(GenericAPIView):
@@ -4113,11 +4176,20 @@ class SystemJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
serializer_class = serializers.SystemJobSerializer serializer_class = serializers.SystemJobSerializer
class SystemJobCancel(GenericCancelView): class SystemJobCancel(RetrieveAPIView):
model = models.SystemJob model = models.SystemJob
obj_permission_type = 'cancel'
serializer_class = serializers.SystemJobCancelSerializer serializer_class = serializers.SystemJobCancelSerializer
def post(self, request, *args, **kwargs):
obj = self.get_object()
if obj.can_cancel:
obj.cancel()
return Response(status=status.HTTP_202_ACCEPTED)
else:
return self.http_method_not_allowed(request, *args, **kwargs)
class SystemJobNotificationsList(SubListAPIView): class SystemJobNotificationsList(SubListAPIView):
@@ -4356,6 +4428,18 @@ class NotificationDetail(RetrieveAPIView):
serializer_class = serializers.NotificationSerializer serializer_class = serializers.NotificationSerializer
class LabelList(ListCreateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class LabelDetail(RetrieveUpdateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class ActivityStreamList(SimpleListAPIView): class ActivityStreamList(SimpleListAPIView):
model = models.ActivityStream model = models.ActivityStream

View File

@@ -1,199 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
import datetime
import io
import ipaddress
import os
import tarfile
import asn1
from awx.api import serializers
from awx.api.generics import GenericAPIView, Response
from awx.api.permissions import IsSystemAdminOrAuditor
from awx.main import models
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName
from cryptography.x509.oid import NameOID
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from rest_framework import status
# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# generate install bundle for the instance
# install bundle directory structure
# ├── install_receptor.yml (playbook)
# ├── inventory.yml
# ├── group_vars
# │ └── all.yml
# ├── receptor
# │ ├── tls
# │ │ ├── ca
# │ │ │ └── receptor-ca.crt
# │ │ ├── receptor.crt
# │ │ └── receptor.key
# │ └── work-public-key.pem
# └── requirements.yml
class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle')
model = models.Instance
serializer_class = serializers.InstanceSerializer
permission_classes = (IsSystemAdminOrAuditor,)
def get(self, request, *args, **kwargs):
instance_obj = self.get_object()
if instance_obj.node_type not in ('execution',):
return Response(
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
status=status.HTTP_400_BAD_REQUEST,
)
with io.BytesIO() as f:
with tarfile.open(fileobj=f, mode='w:gz') as tar:
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
tar.add(
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
)
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
key, cert = generate_receptor_tls(instance_obj)
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
key_tarinfo.size = len(key)
tar.addfile(key_tarinfo, io.BytesIO(key))
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
cert_tarinfo.size = len(cert)
tar.addfile(cert_tarinfo, io.BytesIO(cert))
# generate and write install_receptor.yml to the tar file
playbook = generate_playbook().encode('utf-8')
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
playbook_tarinfo.size = len(playbook)
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
# generate and write inventory.yml to the tar file
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
inventory_yml_tarinfo.size = len(inventory_yml)
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
# generate and write group_vars/all.yml to the tar file
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
group_vars_tarinfo.size = len(group_vars)
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
# generate and write requirements.yml to the tar file
requirements_yml = generate_requirements_yml().encode('utf-8')
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
requirements_yml_tarinfo.size = len(requirements_yml)
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
# respond with the tarfile
f.seek(0)
response = HttpResponse(f.read(), status=status.HTTP_200_OK)
response['Content-Disposition'] = f"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz"
return response
def generate_playbook():
return render_to_string("instance_install_bundle/install_receptor.yml")
def generate_requirements_yml():
return render_to_string("instance_install_bundle/requirements.yml")
def generate_inventory_yml(instance_obj):
return render_to_string("instance_install_bundle/inventory.yml", context=dict(instance=instance_obj))
def generate_group_vars_all_yml(instance_obj):
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
def generate_receptor_tls(instance_obj):
# generate private key for the receptor
key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# encode receptor hostname to asn1
hostname = instance_obj.hostname
encoder = asn1.Encoder()
encoder.start()
encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)
hostname_asn1 = encoder.output()
san_params = [
DNSName(hostname),
OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),
]
try:
san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))
except ipaddress.AddressValueError:
pass
# generate certificate for the receptor
csr = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
]
)
)
.add_extension(
x509.SubjectAlternativeName(san_params),
critical=False,
)
.sign(key, hashes.SHA256())
)
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
ca_key = serialization.load_pem_private_key(
f.read(),
password=None,
)
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
ca_cert = x509.load_pem_x509_certificate(f.read())
cert = (
x509.CertificateBuilder()
.subject_name(csr.subject)
.issuer_name(ca_cert.issuer)
.public_key(csr.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
)
.sign(ca_key, hashes.SHA256())
)
key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
cert = cert.public_bytes(
encoding=serialization.Encoding.PEM,
)
return key, cert

View File

@@ -18,6 +18,8 @@ from rest_framework import status
# AWX # AWX
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
from awx.main.models.label import Label
from awx.api.generics import ( from awx.api.generics import (
ListCreateAPIView, ListCreateAPIView,
RetrieveUpdateDestroyAPIView, RetrieveUpdateDestroyAPIView,
@@ -25,8 +27,9 @@ from awx.api.generics import (
SubListAttachDetachAPIView, SubListAttachDetachAPIView,
ResourceAccessList, ResourceAccessList,
CopyAPIView, CopyAPIView,
DeleteLastUnattachLabelMixin,
SubListCreateAttachDetachAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.serializers import ( from awx.api.serializers import (
@@ -36,6 +39,7 @@ from awx.api.serializers import (
InstanceGroupSerializer, InstanceGroupSerializer,
InventoryUpdateEventSerializer, InventoryUpdateEventSerializer,
JobTemplateSerializer, JobTemplateSerializer,
LabelSerializer,
) )
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
@@ -153,9 +157,28 @@ class InventoryJobTemplateList(SubListAPIView):
return qs.filter(inventory=parent) return qs.filter(inventory=parent)
class InventoryLabelList(LabelSubListCreateAttachDetachView): class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView):
model = Label
serializer_class = LabelSerializer
parent_model = Inventory parent_model = Inventory
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(InventoryLabelList, self).post(request, *args, **kwargs)
class InventoryCopy(CopyAPIView): class InventoryCopy(CopyAPIView):

View File

@@ -1,71 +0,0 @@
# AWX
from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView
from awx.main.models import Label
from awx.api.serializers import LabelSerializer
# Django
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
"""
For related labels lists like /api/v2/inventories/N/labels/
We want want the last instance to be deleted from the database
when the last disassociate happens.
Subclasses need to define parent_model
"""
model = Label
serializer_class = LabelSerializer
relationship = 'labels'
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super().unattach_validate(request)
if res:
return res
res = super().unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
# Give a 400 error if we have attached too many labels to this object
label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name
if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100:
return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST)
return super().post(request, *args, **kwargs)
class LabelDetail(RetrieveUpdateAPIView):
model = Label
serializer_class = LabelSerializer
class LabelList(ListCreateAPIView):
name = _("Labels")
model = Label
serializer_class = LabelSerializer

View File

@@ -12,7 +12,7 @@ from django.conf import settings
from django.db.models import Q, Prefetch from django.db.models import Q, Prefetch
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist from django.core.exceptions import ObjectDoesNotExist
# Django REST Framework # Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied from rest_framework.exceptions import ParseError, PermissionDenied
@@ -281,23 +281,13 @@ class BaseAccess(object):
""" """
return True return True
def assure_relationship_exists(self, obj, relationship):
if '.' in relationship:
return # not attempting validation for complex relationships now
try:
obj._meta.get_field(relationship)
except FieldDoesNotExist:
raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
self.assure_relationship_exists(obj, relationship)
if skip_sub_obj_read_check: if skip_sub_obj_read_check:
return self.can_change(obj, None) return self.can_change(obj, None)
else: else:
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj)) return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
def can_unattach(self, obj, sub_obj, relationship, data=None): def can_unattach(self, obj, sub_obj, relationship, data=None):
self.assure_relationship_exists(obj, relationship)
return self.can_change(obj, data) return self.can_change(obj, data)
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False): def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
@@ -338,8 +328,6 @@ class BaseAccess(object):
role = getattr(resource, role_field, None) role = getattr(resource, role_field, None)
if role is None: if role is None:
# Handle special case where resource does not have direct roles # Handle special case where resource does not have direct roles
if role_field == 'read_role':
return self.user.can_access(type(resource), 'read', resource)
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field] access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
return self.user.can_access(type(resource), access_method_type, resource, None) return self.user.can_access(type(resource), access_method_type, resource, None)
return self.user in role return self.user in role
@@ -511,21 +499,6 @@ class BaseAccess(object):
return False return False
class UnifiedCredentialsMixin(BaseAccess):
"""
The credentials many-to-many is a standard relationship for JT, jobs, and others
Permission to attach is always use permission, and permission to unattach is admin to the parent object
"""
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials':
if not isinstance(sub_obj, Credential):
raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}')
return self.can_change(obj, None) and (self.user in sub_obj.use_role)
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationAttachMixin(BaseAccess): class NotificationAttachMixin(BaseAccess):
"""For models that can have notifications attached """For models that can have notifications attached
@@ -579,8 +552,7 @@ class InstanceAccess(BaseAccess):
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data) return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
def can_add(self, data): def can_add(self, data):
return False
return self.user.is_superuser
def can_change(self, obj, data): def can_change(self, obj, data):
return False return False
@@ -993,6 +965,9 @@ class HostAccess(BaseAccess):
if data and 'name' in data: if data and 'name' in data:
self.check_license(add_host_name=data['name']) self.check_license(add_host_name=data['name'])
# Check the per-org limit
self.check_org_host_limit({'inventory': obj.inventory}, add_host_name=data['name'])
# Checks for admin or change permission on inventory, controls whether # Checks for admin or change permission on inventory, controls whether
# the user can edit variable data. # the user can edit variable data.
return obj and self.user in obj.inventory.admin_role return obj and self.user in obj.inventory.admin_role
@@ -1056,7 +1031,7 @@ class GroupAccess(BaseAccess):
return bool(obj and self.user in obj.inventory.admin_role) return bool(obj and self.user in obj.inventory.admin_role)
class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
""" """
I can see inventory sources whenever I can see their inventory. I can see inventory sources whenever I can see their inventory.
I can change inventory sources whenever I can change their inventory. I can change inventory sources whenever I can change their inventory.
@@ -1100,6 +1075,18 @@ class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, Ba
return self.user in obj.inventory.update_role return self.user in obj.inventory.update_role
return False return False
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class InventoryUpdateAccess(BaseAccess): class InventoryUpdateAccess(BaseAccess):
""" """
@@ -1498,7 +1485,7 @@ class ProjectUpdateAccess(BaseAccess):
return obj and self.user in obj.project.admin_role return obj and self.user in obj.project.admin_role
class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
""" """
I can see job templates when: I can see job templates when:
- I have read role for the job template. - I have read role for the job template.
@@ -1562,7 +1549,8 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if self.user not in inventory.use_role: if self.user not in inventory.use_role:
return False return False
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'): ee = get_value(ExecutionEnvironment, 'execution_environment')
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False return False
project = get_value(Project, 'project') project = get_value(Project, 'project')
@@ -1612,7 +1600,9 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if self.changes_are_non_sensitive(obj, data): if self.changes_are_non_sensitive(obj, data):
return True return True
if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'): if data.get('execution_environment'):
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False return False
for required_field, cls in (('inventory', Inventory), ('project', Project)): for required_field, cls in (('inventory', Inventory), ('project', Project)):
@@ -1677,13 +1667,17 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
if not obj.organization: if not obj.organization:
return False return False
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return self.user in obj.admin_role and self.user in sub_obj.use_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser @check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs): def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups": if relationship == "instance_groups":
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs) return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs) if relationship == 'credentials' and isinstance(sub_obj, Credential):
return self.user in obj.admin_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class JobAccess(BaseAccess): class JobAccess(BaseAccess):
@@ -1830,7 +1824,7 @@ class SystemJobAccess(BaseAccess):
return False # no relaunching of system jobs return False # no relaunching of system jobs
class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess): class JobLaunchConfigAccess(BaseAccess):
""" """
Launch configs must have permissions checked for Launch configs must have permissions checked for
- relaunching - relaunching
@@ -1838,69 +1832,63 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
In order to create a new object with a copy of this launch config, I need: In order to create a new object with a copy of this launch config, I need:
- use access to related inventory (if present) - use access to related inventory (if present)
- read access to Execution Environment (if present), unless the specified ee is already in the template
- use role to many-related credentials (if any present) - use role to many-related credentials (if any present)
- read access to many-related labels (if any present), unless the specified label is already in the template
- read access to many-related instance groups (if any present), unless the specified instance group is already in the template
""" """
model = JobLaunchConfig model = JobLaunchConfig
select_related = 'job' select_related = 'job'
prefetch_related = ('credentials', 'inventory') prefetch_related = ('credentials', 'inventory')
M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup} def _unusable_creds_exist(self, qs):
return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists()
def _related_filtered_queryset(self, cls): def has_credentials_access(self, obj):
if cls is Label: # user has access if no related credentials exist that the user lacks use role for
return LabelAccess(self.user).filtered_queryset() return not self._unusable_creds_exist(obj.credentials)
elif cls is InstanceGroup:
return InstanceGroupAccess(self.user).filtered_queryset()
else:
return cls._accessible_pk_qs(cls, self.user, 'use_role')
def has_obj_m2m_access(self, obj):
for relationship, cls in self.M2M_CHECKS.items():
if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
return False
return True
@check_superuser @check_superuser
def can_add(self, data, template=None): def can_add(self, data, template=None):
# This is a special case, we don't check related many-to-many elsewhere # This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this # launch RBAC checks use this
if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
if 'reference_obj' in data: if 'reference_obj' in data:
if not self.has_obj_m2m_access(data['reference_obj']): prompted_cred_qs = data['reference_obj'].credentials.all()
return False
else: else:
for relationship, cls in self.M2M_CHECKS.items():
if relationship in data and data[relationship]:
# If given model objects, only use the primary key from them # If given model objects, only use the primary key from them
sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]] cred_pks = [cred.pk for cred in data['credentials']]
if template: if template:
for sub_obj in getattr(template, relationship).all(): for cred in template.credentials.all():
if sub_obj.pk in sub_obj_pks: if cred.pk in cred_pks:
sub_obj_pks.remove(sub_obj.pk) cred_pks.remove(cred.pk)
if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists(): prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
if self._unusable_creds_exist(prompted_cred_qs):
return False return False
return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related( return self.check_related('inventory', Inventory, data, role_field='use_role')
'execution_environment', ExecutionEnvironment, data, role_field='read_role'
)
@check_superuser @check_superuser
def can_use(self, obj): def can_use(self, obj):
return ( return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj)
self.has_obj_m2m_access(obj)
and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role')
)
def can_change(self, obj, data): def can_change(self, obj, data):
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related( return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'
) def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
return self.user in sub_obj.use_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
if skip_sub_obj_read_check:
return True
else:
return self.user in sub_obj.read_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess): class WorkflowJobTemplateNodeAccess(BaseAccess):
""" """
I can see/use a WorkflowJobTemplateNode if I have read permission I can see/use a WorkflowJobTemplateNode if I have read permission
to associated Workflow Job Template to associated Workflow Job Template
@@ -1923,7 +1911,7 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
""" """
model = WorkflowJobTemplateNode model = WorkflowJobTemplateNode
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template') prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template')
def filtered_queryset(self): def filtered_queryset(self):
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role')) return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
@@ -1935,8 +1923,7 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
return ( return (
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
and self.check_related('inventory', Inventory, data, role_field='use_role') and JobLaunchConfigAccess(self.user).can_add(data)
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
) )
def wfjt_admin(self, obj): def wfjt_admin(self, obj):
@@ -1945,14 +1932,17 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
else: else:
return self.user in obj.workflow_job_template.admin_role return self.user in obj.workflow_job_template.admin_role
def ujt_execute(self, obj, data=None): def ujt_execute(self, obj):
if not obj.unified_job_template: if not obj.unified_job_template:
return True return True
return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True) return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True)
def can_change(self, obj, data): def can_change(self, obj, data):
if not data:
return True
# should not be able to edit the prompts if lacking access to UJT or WFJT # should not be able to edit the prompts if lacking access to UJT or WFJT
return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data) return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
def can_delete(self, obj): def can_delete(self, obj):
return self.wfjt_admin(obj) return self.wfjt_admin(obj)
@@ -1965,14 +1955,29 @@ class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
return True return True
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): if not self.wfjt_admin(obj):
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj) return False
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) if relationship == 'credentials':
# Need permission to related template to attach a credential
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
def can_unattach(self, obj, sub_obj, relationship, data=None): def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): if not self.wfjt_admin(obj):
return self.wfjt_admin(obj) return False
return super().can_unattach(obj, sub_obj, relationship, data=None) if relationship == 'credentials':
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
class WorkflowJobNodeAccess(BaseAccess): class WorkflowJobNodeAccess(BaseAccess):
@@ -2047,10 +2052,13 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if not data: # So the browseable API will work if not data: # So the browseable API will work
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists() return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
return bool( if data.get('execution_environment'):
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
and self.check_related('inventory', Inventory, data, role_field='use_role') if not self.user.can_access(ExecutionEnvironment, 'read', ee):
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role') return False
return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related(
'inventory', Inventory, data, role_field='use_role'
) )
def can_copy(self, obj): def can_copy(self, obj):
@@ -2096,10 +2104,14 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if self.user.is_superuser: if self.user.is_superuser:
return True return True
if data and data.get('execution_environment'):
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False
return ( return (
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role')
and self.user in obj.admin_role and self.user in obj.admin_role
) )
@@ -2506,7 +2518,7 @@ class UnifiedJobAccess(BaseAccess):
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True) return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess): class ScheduleAccess(BaseAccess):
""" """
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
""" """
@@ -2547,6 +2559,12 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
def can_delete(self, obj): def can_delete(self, obj):
return self.can_change(obj, {}) return self.can_change(obj, {})
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationTemplateAccess(BaseAccess): class NotificationTemplateAccess(BaseAccess):
""" """

View File

@@ -3,7 +3,6 @@ from prometheus_client import CollectorRegistry, Gauge, Info, generate_latest
from awx.conf.license import get_license from awx.conf.license import get_license
from awx.main.utils import get_awx_version from awx.main.utils import get_awx_version
from awx.main.models import UnifiedJob
from awx.main.analytics.collectors import ( from awx.main.analytics.collectors import (
counts, counts,
instance_info, instance_info,
@@ -170,9 +169,8 @@ def metrics():
all_job_data = job_counts(None) all_job_data = job_counts(None)
statuses = all_job_data.get('status', {}) statuses = all_job_data.get('status', {})
states = set(dict(UnifiedJob.STATUS_CHOICES).keys()) - set(['new']) for status, value in statuses.items():
for state in states: STATUS.labels(status=status).set(value)
STATUS.labels(status=state).set(statuses.get(state, 0))
RUNNING_JOBS.set(current_counts['running_jobs']) RUNNING_JOBS.set(current_counts['running_jobs'])
PENDING_JOBS.set(current_counts['pending_jobs']) PENDING_JOBS.set(current_counts['pending_jobs'])

View File

@@ -166,7 +166,11 @@ class Metrics:
elif settings.IS_TESTING(): elif settings.IS_TESTING():
self.instance_name = "awx_testing" self.instance_name = "awx_testing"
else: else:
self.instance_name = Instance.objects.my_hostname() try:
self.instance_name = Instance.objects.me().hostname
except Exception as e:
self.instance_name = settings.CLUSTER_HOST_ID
logger.info(f'Instance {self.instance_name} seems to be unregistered, error: {e}')
# metric name, help_text # metric name, help_text
METRICSLIST = [ METRICSLIST = [

View File

@@ -3,7 +3,6 @@ import uuid
import json import json
from django.conf import settings from django.conf import settings
from django.db import connection
import redis import redis
from awx.main.dispatch import get_local_queuename from awx.main.dispatch import get_local_queuename
@@ -38,27 +37,18 @@ class Control(object):
def running(self, *args, **kwargs): def running(self, *args, **kwargs):
return self.control_with_reply('running', *args, **kwargs) return self.control_with_reply('running', *args, **kwargs)
def cancel(self, task_ids, *args, **kwargs):
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
@classmethod @classmethod
def generate_reply_queue_name(cls): def generate_reply_queue_name(cls):
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}" return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
def control_with_reply(self, command, timeout=5, extra_data=None): def control_with_reply(self, command, timeout=5):
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename)) logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
reply_queue = Control.generate_reply_queue_name() reply_queue = Control.generate_reply_queue_name()
self.result = None self.result = None
if not connection.get_autocommit(): with pg_bus_conn(new_connection=True) as conn:
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
with pg_bus_conn() as conn:
conn.listen(reply_queue) conn.listen(reply_queue)
send_data = {'control': command, 'reply_to': reply_queue} conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
if extra_data:
send_data.update(extra_data)
conn.notify(self.queuename, json.dumps(send_data))
for reply in conn.events(select_timeout=timeout, yield_timeouts=True): for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
if reply is None: if reply is None:

View File

@@ -387,8 +387,6 @@ class AutoscalePool(WorkerPool):
reaper.reap_job(j, 'failed') reaper.reap_job(j, 'failed')
except Exception: except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
else:
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
orphaned.extend(w.orphaned_tasks) orphaned.extend(w.orphaned_tasks)
self.workers.remove(w) self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers: elif w.idle and len(self.workers) > self.min_workers:
@@ -452,6 +450,9 @@ class AutoscalePool(WorkerPool):
try: try:
if isinstance(body, dict) and body.get('bind_kwargs'): if isinstance(body, dict) and body.get('bind_kwargs'):
self.add_bind_kwargs(body) self.add_bind_kwargs(body)
# when the cluster heartbeat occurs, clean up internally
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
self.cleanup()
if self.should_grow: if self.should_grow:
self.up() self.up()
# we don't care about "preferred queue" round robin distribution, just # we don't care about "preferred queue" round robin distribution, just

View File

@@ -16,7 +16,12 @@ def startup_reaping():
If this particular instance is starting, then we know that any running jobs are invalid If this particular instance is starting, then we know that any running jobs are invalid
so we will reap those jobs as a special action here so we will reap those jobs as a special action here
""" """
jobs = UnifiedJob.objects.filter(status='running', controller_node=Instance.objects.my_hostname()) try:
me = Instance.objects.me()
except RuntimeError as e:
logger.warning(f'Local instance is not registered, not running startup reaper: {e}')
return
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
job_ids = [] job_ids = []
for j in jobs: for j in jobs:
job_ids.append(j.id) job_ids.append(j.id)
@@ -57,13 +62,16 @@ def reap_waiting(instance=None, status='failed', job_explanation=None, grace_per
if grace_period is None: if grace_period is None:
grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT
if instance is None: me = instance
hostname = Instance.objects.my_hostname() if me is None:
else: try:
hostname = instance.hostname me = Instance.objects.me()
except RuntimeError as e:
logger.warning(f'Local instance is not registered, not running reaper: {e}')
return
if ref_time is None: if ref_time is None:
ref_time = tz_now() ref_time = tz_now()
jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=hostname) jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=me.hostname)
if excluded_uuids: if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids) jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
for j in jobs: for j in jobs:
@@ -74,13 +82,16 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
""" """
Reap all jobs in running for this instance. Reap all jobs in running for this instance.
""" """
if instance is None: me = instance
hostname = Instance.objects.my_hostname() if me is None:
else: try:
hostname = instance.hostname me = Instance.objects.me()
except RuntimeError as e:
logger.warning(f'Local instance is not registered, not running reaper: {e}')
return
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter( jobs = UnifiedJob.objects.filter(
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
) )
if excluded_uuids: if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids) jobs = jobs.exclude(celery_task_id__in=excluded_uuids)

View File

@@ -63,7 +63,7 @@ class AWXConsumerBase(object):
def control(self, body): def control(self, body):
logger.warning(f'Received control signal:\n{body}') logger.warning(f'Received control signal:\n{body}')
control = body.get('control') control = body.get('control')
if control in ('status', 'running', 'cancel'): if control in ('status', 'running'):
reply_queue = body['reply_to'] reply_queue = body['reply_to']
if control == 'status': if control == 'status':
msg = '\n'.join([self.listening_on, self.pool.debug()]) msg = '\n'.join([self.listening_on, self.pool.debug()])
@@ -72,17 +72,6 @@ class AWXConsumerBase(object):
for worker in self.pool.workers: for worker in self.pool.workers:
worker.calculate_managed_tasks() worker.calculate_managed_tasks()
msg.extend(worker.managed_tasks.keys()) msg.extend(worker.managed_tasks.keys())
elif control == 'cancel':
msg = []
task_ids = set(body['task_ids'])
for worker in self.pool.workers:
task = worker.current_task
if task and task['uuid'] in task_ids:
logger.warn(f'Sending SIGTERM to task id={task["uuid"]}, task={task.get("task")}, args={task.get("args")}')
os.kill(worker.pid, signal.SIGTERM)
msg.append(task['uuid'])
if task_ids and not msg:
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.notify(reply_queue, json.dumps(msg)) conn.notify(reply_queue, json.dumps(msg))
@@ -114,6 +103,7 @@ class AWXConsumerBase(object):
queue = 0 queue = 0
self.pool.write(queue, body) self.pool.write(queue, body)
self.total_messages += 1 self.total_messages += 1
self.record_statistics()
@log_excess_runtime(logger) @log_excess_runtime(logger)
def record_statistics(self): def record_statistics(self):
@@ -155,16 +145,6 @@ class AWXConsumerPG(AWXConsumerBase):
# if no successful loops have ran since startup, then we should fail right away # if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup self.pg_is_down = True # set so that we fail if we get database errors on startup
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
self.last_cleanup = time.time()
def run_periodic_tasks(self):
self.record_statistics() # maintains time buffer in method
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
# NOTE: if we run out of database connections, it is important to still run cleanup
# so that we scale down workers and free up connections
self.pool.cleanup()
self.last_cleanup = time.time()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs) super(AWXConsumerPG, self).run(*args, **kwargs)
@@ -180,10 +160,8 @@ class AWXConsumerPG(AWXConsumerBase):
if init is False: if init is False:
self.worker.on_start() self.worker.on_start()
init = True init = True
for e in conn.events(yield_timeouts=True): for e in conn.events():
if e is not None:
self.process_task(json.loads(e.payload)) self.process_task(json.loads(e.payload))
self.run_periodic_tasks()
self.pg_is_down = False self.pg_is_down = False
if self.should_stop: if self.should_stop:
return return
@@ -240,8 +218,6 @@ class BaseWorker(object):
# so we can establish a new connection # so we can establish a new connection
conn.close_if_unusable_or_obsolete() conn.close_if_unusable_or_obsolete()
self.perform_work(body, *args) self.perform_work(body, *args)
except Exception:
logger.exception(f'Unhandled exception in perform_work in worker pid={os.getpid()}')
finally: finally:
if 'uuid' in body: if 'uuid' in body:
uuid = body['uuid'] uuid = body['uuid']

View File

@@ -54,7 +54,7 @@ class Command(BaseCommand):
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else '' capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
version = f" version={x.version or '?'}" if x.node_type != 'hop' else '' version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else '' heartbeat = f' heartbeat="{x.modified:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m') print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
print() print()

View File

@@ -27,9 +27,7 @@ class Command(BaseCommand):
) )
def handle(self, **options): def handle(self, **options):
# provides a mapping of hostname to Instance objects
nodes = Instance.objects.in_bulk(field_name='hostname') nodes = Instance.objects.in_bulk(field_name='hostname')
if options['source'] not in nodes: if options['source'] not in nodes:
raise CommandError(f"Host {options['source']} is not a registered instance.") raise CommandError(f"Host {options['source']} is not a registered instance.")
if not (options['peers'] or options['disconnect'] or options['exact'] is not None): if not (options['peers'] or options['disconnect'] or options['exact'] is not None):
@@ -59,9 +57,7 @@ class Command(BaseCommand):
results = 0 results = 0
for target in options['peers']: for target in options['peers']:
_, created = InstanceLink.objects.update_or_create( _, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
)
if created: if created:
results += 1 results += 1
@@ -84,9 +80,7 @@ class Command(BaseCommand):
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True)) links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete() removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
for target in peers - links: for target in peers - links:
_, created = InstanceLink.objects.update_or_create( _, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
)
if created: if created:
additions += 1 additions += 1

View File

@@ -1,7 +1,6 @@
# Copyright (c) 2015 Ansible, Inc. # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import logging import logging
import yaml
from django.conf import settings from django.conf import settings
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
@@ -31,16 +30,7 @@ class Command(BaseCommand):
'--reload', '--reload',
dest='reload', dest='reload',
action='store_true', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes; running jobs will run to completion first'), help=('cause the dispatcher to recycle all of its worker processes;' 'running jobs will run to completion first'),
)
parser.add_argument(
'--cancel',
dest='cancel',
help=(
'Cancel a particular task id. Takes either a single id string, or a JSON list of multiple ids. '
'Can take in output from the --running argument as input to cancel all tasks. '
'Only running tasks can be canceled, queued tasks must be started before they can be canceled.'
),
) )
def handle(self, *arg, **options): def handle(self, *arg, **options):
@@ -52,16 +42,6 @@ class Command(BaseCommand):
return return
if options.get('reload'): if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'}) return Control('dispatcher').control({'control': 'reload'})
if options.get('cancel'):
cancel_str = options.get('cancel')
try:
cancel_data = yaml.safe_load(cancel_str)
except Exception:
cancel_data = [cancel_str]
if not isinstance(cancel_data, list):
cancel_data = [cancel_str]
print(Control('dispatcher').cancel(cancel_data))
return
# It's important to close these because we're _about_ to fork, and we # It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets # don't want the forked processes to inherit the open sockets

View File

@@ -53,7 +53,7 @@ class Command(BaseCommand):
return lines return lines
@classmethod @classmethod
def get_connection_status(cls, hostnames, data): def get_connection_status(cls, me, hostnames, data):
host_stats = [('hostname', 'state', 'start time', 'duration (sec)')] host_stats = [('hostname', 'state', 'start time', 'duration (sec)')]
for h in hostnames: for h in hostnames:
connection_color = '91' # red connection_color = '91' # red
@@ -78,7 +78,7 @@ class Command(BaseCommand):
return host_stats return host_stats
@classmethod @classmethod
def get_connection_stats(cls, hostnames, data): def get_connection_stats(cls, me, hostnames, data):
host_stats = [('hostname', 'total', 'per minute')] host_stats = [('hostname', 'total', 'per minute')]
for h in hostnames: for h in hostnames:
h_safe = safe_name(h) h_safe = safe_name(h)
@@ -119,8 +119,8 @@ class Command(BaseCommand):
return return
try: try:
my_hostname = Instance.objects.my_hostname() me = Instance.objects.me()
logger.info('Active instance with hostname {} is registered.'.format(my_hostname)) logger.info('Active instance with hostname {} is registered.'.format(me.hostname))
except RuntimeError as e: except RuntimeError as e:
# the CLUSTER_HOST_ID in the task, and web instance must match and # the CLUSTER_HOST_ID in the task, and web instance must match and
# ensure network connectivity between the task and web instance # ensure network connectivity between the task and web instance
@@ -145,19 +145,19 @@ class Command(BaseCommand):
else: else:
data[family.name] = family.samples[0].value data[family.name] = family.samples[0].value
my_hostname = Instance.objects.my_hostname() me = Instance.objects.me()
hostnames = [i.hostname for i in Instance.objects.exclude(hostname=my_hostname)] hostnames = [i.hostname for i in Instance.objects.exclude(hostname=me.hostname)]
host_stats = Command.get_connection_status(hostnames, data) host_stats = Command.get_connection_status(me, hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'Broadcast websocket connection status from "{my_hostname}" to:') print(f'Broadcast websocket connection status from "{me.hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
host_stats = Command.get_connection_stats(hostnames, data) host_stats = Command.get_connection_stats(me, hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:') print(f'\nBroadcast websocket connection stats from "{me.hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
return return

View File

@@ -99,12 +99,9 @@ class InstanceManager(models.Manager):
instance or role. instance or role.
""" """
def my_hostname(self):
return settings.CLUSTER_HOST_ID
def me(self): def me(self):
"""Return the currently active instance.""" """Return the currently active instance."""
node = self.filter(hostname=self.my_hostname()) node = self.filter(hostname=settings.CLUSTER_HOST_ID)
if node.exists(): if node.exists():
return node[0] return node[0]
raise RuntimeError("No instance found with the current cluster host id") raise RuntimeError("No instance found with the current cluster host id")
@@ -132,13 +129,10 @@ class InstanceManager(models.Manager):
# if instance was not retrieved by uuid and hostname was, use the hostname # if instance was not retrieved by uuid and hostname was, use the hostname
instance = self.filter(hostname=hostname) instance = self.filter(hostname=hostname)
from awx.main.models import Instance
# Return existing instance # Return existing instance
if instance.exists(): if instance.exists():
instance = instance.first() # in the unusual occasion that there is more than one, only get one instance = instance.first() # in the unusual occasion that there is more than one, only get one
instance.node_state = Instance.States.INSTALLED # Wait for it to show up on the mesh update_fields = []
update_fields = ['node_state']
# if instance was retrieved by uuid and hostname has changed, update hostname # if instance was retrieved by uuid and hostname has changed, update hostname
if instance.hostname != hostname: if instance.hostname != hostname:
logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname)) logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname))
@@ -147,7 +141,6 @@ class InstanceManager(models.Manager):
# if any other fields are to be updated # if any other fields are to be updated
if instance.ip_address != ip_address: if instance.ip_address != ip_address:
instance.ip_address = ip_address instance.ip_address = ip_address
update_fields.append('ip_address')
if instance.node_type != node_type: if instance.node_type != node_type:
instance.node_type = node_type instance.node_type = node_type
update_fields.append('node_type') update_fields.append('node_type')
@@ -158,12 +151,12 @@ class InstanceManager(models.Manager):
return (False, instance) return (False, instance)
# Create new instance, and fill in default values # Create new instance, and fill in default values
create_defaults = {'node_state': Instance.States.INSTALLED, 'capacity': 0} create_defaults = dict(capacity=0)
if defaults is not None: if defaults is not None:
create_defaults.update(defaults) create_defaults.update(defaults)
uuid_option = {} uuid_option = {}
if uuid is not None: if uuid is not None:
uuid_option = {'uuid': uuid} uuid_option = dict(uuid=uuid)
if node_type == 'execution' and 'version' not in create_defaults: if node_type == 'execution' and 'version' not in create_defaults:
create_defaults['version'] = RECEPTOR_PENDING create_defaults['version'] = RECEPTOR_PENDING
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option) instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)

View File

@@ -1,57 +0,0 @@
# Generated by Django 3.2.13 on 2022-08-24 14:02
from django.db import migrations, models
import django.db.models.deletion
from awx.main.models import CredentialType
from awx.main.utils.common import set_current_apps
def setup_tower_managed_defaults(apps, schema_editor):
set_current_apps(apps)
CredentialType.setup_tower_managed_defaults(apps)
class Migration(migrations.Migration):
dependencies = [
('main', '0166_alter_jobevent_host'),
]
operations = [
migrations.AddField(
model_name='project',
name='signature_validation_credential',
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='projects_signature_validation',
to='main.credential',
help_text='An optional credential used for validating files in the project against unexpected changes.',
),
),
migrations.AlterField(
model_name='credentialtype',
name='kind',
field=models.CharField(
choices=[
('ssh', 'Machine'),
('vault', 'Vault'),
('net', 'Network'),
('scm', 'Source Control'),
('cloud', 'Cloud'),
('registry', 'Container Registry'),
('token', 'Personal Access Token'),
('insights', 'Insights'),
('external', 'External'),
('kubernetes', 'Kubernetes'),
('galaxy', 'Galaxy/Automation Hub'),
('cryptography', 'Cryptography'),
],
max_length=32,
),
),
migrations.RunPython(setup_tower_managed_defaults),
]

View File

@@ -1,25 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-08 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0167_project_signature_validation_credential'),
]
operations = [
migrations.AddField(
model_name='inventoryupdate',
name='scm_revision',
field=models.CharField(
blank=True,
default='',
editable=False,
help_text='The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm',
max_length=1024,
verbose_name='SCM Revision',
),
),
]

View File

@@ -1,225 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-15 14:07
import awx.main.fields
import awx.main.utils.polymorphic
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0168_inventoryupdate_scm_revision'),
]
operations = [
migrations.AddField(
model_name='joblaunchconfig',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='joblaunchconfig_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='joblaunchconfig',
name='labels',
field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_execution_environment_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_forks_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_instance_groups_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_job_slice_count_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_timeout_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='schedule',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='schedule_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='schedule',
name='labels',
field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobnode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobnode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_skip_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobtemplatenode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'),
),
migrations.CreateModel(
name='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobtemplatenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobtemplatenode')),
],
),
migrations.CreateModel(
name='WorkflowJobNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobnode')),
],
),
migrations.CreateModel(
name='WorkflowJobInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjob')),
],
),
migrations.CreateModel(
name='ScheduleInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.schedule')),
],
),
migrations.CreateModel(
name='JobLaunchConfigInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')),
],
),
migrations.AddField(
model_name='joblaunchconfig',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='schedule',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='schedule_instance_groups', through='main.ScheduleInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='workflowjob',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_instance_groups',
through='main.WorkflowJobInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_node_instance_groups',
through='main.WorkflowJobNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_template_node_instance_groups',
through='main.WorkflowJobTemplateNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
]

View File

@@ -1,79 +0,0 @@
# Generated by Django 3.2.13 on 2022-08-02 17:53
import django.core.validators
from django.db import migrations, models
def forwards(apps, schema_editor):
# All existing InstanceLink objects need to be in the state
# 'Established', which is the default, so nothing needs to be done
# for that.
Instance = apps.get_model('main', 'Instance')
for instance in Instance.objects.all():
instance.node_state = 'ready' if not instance.errors else 'unavailable'
instance.save(update_fields=['node_state'])
class Migration(migrations.Migration):
dependencies = [
('main', '0169_jt_prompt_everything_on_launch'),
]
operations = [
migrations.AddField(
model_name='instance',
name='listener_port',
field=models.PositiveIntegerField(
blank=True,
default=27199,
help_text='Port that Receptor will listen for incoming connections on.',
validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)],
),
),
migrations.AddField(
model_name='instance',
name='node_state',
field=models.CharField(
choices=[
('provisioning', 'Provisioning'),
('provision-fail', 'Provisioning Failure'),
('installed', 'Installed'),
('ready', 'Ready'),
('unavailable', 'Unavailable'),
('deprovisioning', 'De-provisioning'),
('deprovision-fail', 'De-provisioning Failure'),
],
default='ready',
help_text='Indicates the current life cycle stage of this instance.',
max_length=16,
),
),
migrations.AddField(
model_name='instancelink',
name='link_state',
field=models.CharField(
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
default='established',
help_text='Indicates the current life cycle stage of this peer link.',
max_length=16,
),
),
migrations.AlterField(
model_name='instance',
name='node_type',
field=models.CharField(
choices=[
('control', 'Control plane node'),
('execution', 'Execution plane node'),
('hybrid', 'Controller and execution'),
('hop', 'Message-passing node, no execution capability'),
],
default='hybrid',
help_text='Role that this node plays in the mesh.',
max_length=16,
),
),
migrations.RunPython(forwards, reverse_code=migrations.RunPython.noop),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-26 20:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0170_node_and_link_state'),
]
operations = [
migrations.AddField(
model_name='instance',
name='health_check_started',
field=models.DateTimeField(editable=False, help_text='The last time a health check was initiated on this instance.', null=True),
),
]

View File

@@ -1,29 +0,0 @@
# Generated by Django 3.2.13 on 2022-09-29 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0171_add_health_check_started'),
]
operations = [
migrations.AddField(
model_name='inventory',
name='prevent_instance_group_fallback',
field=models.BooleanField(
default=False,
help_text='If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
),
),
migrations.AddField(
model_name='jobtemplate',
name='prevent_instance_group_fallback',
field=models.BooleanField(
default=False,
help_text='If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
),
),
]

View File

@@ -4,7 +4,7 @@ from django.utils.timezone import now
logger = logging.getLogger('awx.main.migrations') logger = logging.getLogger('awx.main.migrations')
__all__ = ['create_clearsessions_jt', 'create_cleartokens_jt'] __all__ = ['create_collection_jt', 'create_clearsessions_jt', 'create_cleartokens_jt']
''' '''
These methods are called by migrations to create various system job templates These methods are called by migrations to create various system job templates

View File

@@ -44,7 +44,7 @@ def migrate_galaxy_settings(apps, schema_editor):
credential_type=galaxy_type, credential_type=galaxy_type,
inputs={'url': 'https://galaxy.ansible.com/'}, inputs={'url': 'https://galaxy.ansible.com/'},
) )
except Exception: except:
# Needed for new migrations, tests # Needed for new migrations, tests
public_galaxy_credential = Credential( public_galaxy_credential = Credential(
created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'} created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}

View File

@@ -228,14 +228,15 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
@property @property
def preferred_instance_groups(self): def preferred_instance_groups(self):
selected_groups = [] if self.inventory is not None and self.inventory.organization is not None:
organization_groups = [x for x in self.inventory.organization.instance_groups.all()]
else:
organization_groups = []
if self.inventory is not None: if self.inventory is not None:
for instance_group in self.inventory.instance_groups.all(): inventory_groups = [x for x in self.inventory.instance_groups.all()]
selected_groups.append(instance_group) else:
if not self.inventory.prevent_instance_group_fallback and self.inventory.organization is not None: inventory_groups = []
for instance_group in self.inventory.organization.instance_groups.all(): selected_groups = inventory_groups + organization_groups
selected_groups.append(instance_group)
if not selected_groups: if not selected_groups:
return self.global_instance_groups return self.global_instance_groups
return selected_groups return selected_groups

View File

@@ -282,7 +282,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
return field['default'] return field['default']
if 'default' in kwargs: if 'default' in kwargs:
return kwargs['default'] return kwargs['default']
raise AttributeError(field_name) raise AttributeError
if field_name in self.inputs: if field_name in self.inputs:
return self.inputs[field_name] return self.inputs[field_name]
if 'default' in kwargs: if 'default' in kwargs:
@@ -336,7 +336,6 @@ class CredentialType(CommonModelNameNotUnique):
('external', _('External')), ('external', _('External')),
('kubernetes', _('Kubernetes')), ('kubernetes', _('Kubernetes')),
('galaxy', _('Galaxy/Automation Hub')), ('galaxy', _('Galaxy/Automation Hub')),
('cryptography', _('Cryptography')),
) )
kind = models.CharField(max_length=32, choices=KIND_CHOICES) kind = models.CharField(max_length=32, choices=KIND_CHOICES)
@@ -1172,25 +1171,6 @@ ManagedCredentialType(
}, },
) )
ManagedCredentialType(
namespace='gpg_public_key',
kind='cryptography',
name=gettext_noop('GPG Public Key'),
inputs={
'fields': [
{
'id': 'gpg_public_key',
'label': gettext_noop('GPG Public Key'),
'type': 'string',
'secret': True,
'multiline': True,
'help_text': gettext_noop('GPG Public Key used to validate content signatures.'),
},
],
'required': ['gpg_public_key'],
},
)
class CredentialInputSource(PrimordialModel): class CredentialInputSource(PrimordialModel):
class Meta: class Meta:

View File

@@ -5,7 +5,7 @@ from decimal import Decimal
import logging import logging
import os import os
from django.core.validators import MinValueValidator, MaxValueValidator from django.core.validators import MinValueValidator
from django.db import models, connection from django.db import models, connection
from django.db.models.signals import post_save, post_delete from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver from django.dispatch import receiver
@@ -59,15 +59,6 @@ class InstanceLink(BaseModel):
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+') source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers') target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
class States(models.TextChoices):
ADDING = 'adding', _('Adding')
ESTABLISHED = 'established', _('Established')
REMOVING = 'removing', _('Removing')
link_state = models.CharField(
choices=States.choices, default=States.ESTABLISHED, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
)
class Meta: class Meta:
unique_together = ('source', 'target') unique_together = ('source', 'target')
@@ -114,11 +105,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
editable=False, editable=False,
help_text=_('Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes.'), help_text=_('Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes.'),
) )
health_check_started = models.DateTimeField(
null=True,
editable=False,
help_text=_("The last time a health check was initiated on this instance."),
)
last_health_check = models.DateTimeField( last_health_check = models.DateTimeField(
null=True, null=True,
editable=False, editable=False,
@@ -141,33 +127,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
default=0, default=0,
editable=False, editable=False,
) )
NODE_TYPE_CHOICES = [
class Types(models.TextChoices): ("control", "Control plane node"),
CONTROL = 'control', _("Control plane node") ("execution", "Execution plane node"),
EXECUTION = 'execution', _("Execution plane node") ("hybrid", "Controller and execution"),
HYBRID = 'hybrid', _("Controller and execution") ("hop", "Message-passing node, no execution capability"),
HOP = 'hop', _("Message-passing node, no execution capability") ]
node_type = models.CharField(default='hybrid', choices=NODE_TYPE_CHOICES, max_length=16)
node_type = models.CharField(default=Types.HYBRID, choices=Types.choices, max_length=16, help_text=_("Role that this node plays in the mesh."))
class States(models.TextChoices):
PROVISIONING = 'provisioning', _('Provisioning')
PROVISION_FAIL = 'provision-fail', _('Provisioning Failure')
INSTALLED = 'installed', _('Installed')
READY = 'ready', _('Ready')
UNAVAILABLE = 'unavailable', _('Unavailable')
DEPROVISIONING = 'deprovisioning', _('De-provisioning')
DEPROVISION_FAIL = 'deprovision-fail', _('De-provisioning Failure')
node_state = models.CharField(
choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.")
)
listener_port = models.PositiveIntegerField(
blank=True,
default=27199,
validators=[MinValueValidator(1), MaxValueValidator(65535)],
help_text=_("Port that Receptor will listen for incoming connections on."),
)
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target')) peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
@@ -212,14 +178,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
def jobs_total(self): def jobs_total(self):
return UnifiedJob.objects.filter(execution_node=self.hostname).count() return UnifiedJob.objects.filter(execution_node=self.hostname).count()
@property
def health_check_pending(self):
if self.health_check_started is None:
return False
if self.last_health_check is None:
return True
return self.health_check_started > self.last_health_check
def get_cleanup_task_kwargs(self, **kwargs): def get_cleanup_task_kwargs(self, **kwargs):
""" """
Produce options to use for the command: ansible-runner worker cleanup Produce options to use for the command: ansible-runner worker cleanup
@@ -255,22 +213,18 @@ class Instance(HasPolicyEditsMixin, BaseModel):
return self.last_seen < ref_time - timedelta(seconds=grace_period) return self.last_seen < ref_time - timedelta(seconds=grace_period)
def mark_offline(self, update_last_seen=False, perform_save=True, errors=''): def mark_offline(self, update_last_seen=False, perform_save=True, errors=''):
if self.node_state not in (Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED): if self.cpu_capacity == 0 and self.mem_capacity == 0 and self.capacity == 0 and self.errors == errors and (not update_last_seen):
return [] return
if self.node_state == Instance.States.UNAVAILABLE and self.errors == errors and (not update_last_seen):
return []
self.node_state = Instance.States.UNAVAILABLE
self.cpu_capacity = self.mem_capacity = self.capacity = 0 self.cpu_capacity = self.mem_capacity = self.capacity = 0
self.errors = errors self.errors = errors
if update_last_seen: if update_last_seen:
self.last_seen = now() self.last_seen = now()
update_fields = ['node_state', 'capacity', 'cpu_capacity', 'mem_capacity', 'errors'] if perform_save:
update_fields = ['capacity', 'cpu_capacity', 'mem_capacity', 'errors']
if update_last_seen: if update_last_seen:
update_fields += ['last_seen'] update_fields += ['last_seen']
if perform_save:
self.save(update_fields=update_fields) self.save(update_fields=update_fields)
return update_fields
def set_capacity_value(self): def set_capacity_value(self):
"""Sets capacity according to capacity adjustment rule (no save)""" """Sets capacity according to capacity adjustment rule (no save)"""
@@ -324,12 +278,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
if not errors: if not errors:
self.refresh_capacity_fields() self.refresh_capacity_fields()
self.errors = '' self.errors = ''
if self.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
self.node_state = Instance.States.READY
update_fields.append('node_state')
else: else:
fields_to_update = self.mark_offline(perform_save=False, errors=errors) self.mark_offline(perform_save=False, errors=errors)
update_fields.extend(fields_to_update)
update_fields.extend(['cpu_capacity', 'mem_capacity', 'capacity']) update_fields.extend(['cpu_capacity', 'mem_capacity', 'capacity'])
# disabling activity stream will avoid extra queries, which is important for heatbeat actions # disabling activity stream will avoid extra queries, which is important for heatbeat actions
@@ -346,7 +296,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
# playbook event data; we should consider this a zero capacity event # playbook event data; we should consider this a zero capacity event
redis.Redis.from_url(settings.BROKER_URL).ping() redis.Redis.from_url(settings.BROKER_URL).ping()
except redis.ConnectionError: except redis.ConnectionError:
errors = _('Failed to connect to Redis') errors = _('Failed to connect ot Redis')
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors) self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
@@ -438,20 +388,6 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
@receiver(post_save, sender=Instance) @receiver(post_save, sender=Instance)
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs): def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION,):
if instance.node_state == Instance.States.DEPROVISIONING:
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
# wait for jobs on the node to complete, then delete the
# node and kick off write_receptor_config
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
if instance.node_state == Instance.States.INSTALLED:
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
# broadcast to all control instances to update their receptor configs
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
if created or instance.has_policy_changes(): if created or instance.has_policy_changes():
schedule_policy_task() schedule_policy_task()
@@ -498,58 +434,3 @@ class InventoryInstanceGroupMembership(models.Model):
default=None, default=None,
db_index=True, db_index=True,
) )
class JobLaunchConfigInstanceGroupMembership(models.Model):
joblaunchconfig = models.ForeignKey('JobLaunchConfig', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class ScheduleInstanceGroupMembership(models.Model):
schedule = models.ForeignKey('Schedule', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobTemplateNodeBaseInstanceGroupMembership(models.Model):
workflowjobtemplatenode = models.ForeignKey('WorkflowJobTemplateNode', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobNodeBaseInstanceGroupMembership(models.Model):
workflowjobnode = models.ForeignKey('WorkflowJobNode', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobInstanceGroupMembership(models.Model):
workflowjobnode = models.ForeignKey('WorkflowJob', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)

View File

@@ -63,7 +63,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
an inventory source contains lists and hosts. an inventory source contains lists and hosts.
""" """
FIELDS_TO_PRESERVE_AT_COPY = ['hosts', 'groups', 'instance_groups', 'prevent_instance_group_fallback'] FIELDS_TO_PRESERVE_AT_COPY = ['hosts', 'groups', 'instance_groups']
KIND_CHOICES = [ KIND_CHOICES = [
('', _('Hosts have a direct link to this inventory.')), ('', _('Hosts have a direct link to this inventory.')),
('smart', _('Hosts for inventory generated using the host_filter property.')), ('smart', _('Hosts for inventory generated using the host_filter property.')),
@@ -175,16 +175,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
related_name='inventory_labels', related_name='inventory_labels',
help_text=_('Labels associated with this inventory.'), help_text=_('Labels associated with this inventory.'),
) )
prevent_instance_group_fallback = models.BooleanField(
default=False,
help_text=(
"If enabled, the inventory will prevent adding any organization "
"instance groups to the list of preferred instances groups to run "
"associated job templates on."
"If this setting is enabled and you provided an empty list, the global instance "
"groups will be applied."
),
)
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
@@ -1201,14 +1191,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
default=None, default=None,
null=True, null=True,
) )
scm_revision = models.CharField(
max_length=1024,
blank=True,
default='',
editable=False,
verbose_name=_('SCM Revision'),
help_text=_('The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm'),
)
@property @property
def is_container_group_task(self): def is_container_group_task(self):
@@ -1278,19 +1260,15 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
@property @property
def preferred_instance_groups(self): def preferred_instance_groups(self):
selected_groups = [] if self.inventory_source.inventory is not None and self.inventory_source.inventory.organization is not None:
organization_groups = [x for x in self.inventory_source.inventory.organization.instance_groups.all()]
else:
organization_groups = []
if self.inventory_source.inventory is not None: if self.inventory_source.inventory is not None:
# Add the inventory sources IG to the selected IGs first inventory_groups = [x for x in self.inventory_source.inventory.instance_groups.all()]
for instance_group in self.inventory_source.inventory.instance_groups.all(): else:
selected_groups.append(instance_group) inventory_groups = []
# If the inventory allows for fallback and we have an organization then also append the orgs IGs to the end of the list selected_groups = inventory_groups + organization_groups
if (
not getattr(self.inventory_source.inventory, 'prevent_instance_group_fallback', False)
and self.inventory_source.inventory.organization is not None
):
for instance_group in self.inventory_source.inventory.organization.instance_groups.all():
selected_groups.append(instance_group)
if not selected_groups: if not selected_groups:
return self.global_instance_groups return self.global_instance_groups
return selected_groups return selected_groups

View File

@@ -43,8 +43,8 @@ from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
JobNotificationMixin, JobNotificationMixin,
) )
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
SurveyJobTemplateMixin, SurveyJobTemplateMixin,
@@ -203,7 +203,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
playbook) to an inventory source with a given credential. playbook) to an inventory source with a given credential.
""" """
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials', 'survey_spec', 'prevent_instance_group_fallback'] FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials', 'survey_spec']
FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential'] FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential']
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')] SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
@@ -227,6 +227,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
blank=True, blank=True,
default=False, default=False,
) )
ask_limit_on_launch = AskForField(
blank=True,
default=False,
)
ask_tags_on_launch = AskForField(blank=True, default=False, allows_field='job_tags')
ask_skip_tags_on_launch = AskForField(
blank=True,
default=False,
)
ask_job_type_on_launch = AskForField( ask_job_type_on_launch = AskForField(
blank=True, blank=True,
default=False, default=False,
@@ -235,27 +244,12 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
blank=True, blank=True,
default=False, default=False,
) )
ask_inventory_on_launch = AskForField(
blank=True,
default=False,
)
ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials') ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
ask_execution_environment_on_launch = AskForField( ask_scm_branch_on_launch = AskForField(blank=True, default=False, allows_field='scm_branch')
blank=True,
default=False,
)
ask_forks_on_launch = AskForField(
blank=True,
default=False,
)
ask_job_slice_count_on_launch = AskForField(
blank=True,
default=False,
)
ask_timeout_on_launch = AskForField(
blank=True,
default=False,
)
ask_instance_groups_on_launch = AskForField(
blank=True,
default=False,
)
job_slice_count = models.PositiveIntegerField( job_slice_count = models.PositiveIntegerField(
blank=True, blank=True,
default=1, default=1,
@@ -274,15 +268,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
'admin_role', 'admin_role',
], ],
) )
prevent_instance_group_fallback = models.BooleanField(
default=False,
help_text=(
"If enabled, the job template will prevent adding any inventory or organization "
"instance groups to the list of preferred instances groups to run on."
"If this setting is enabled and you provided an empty list, the global instance "
"groups will be applied."
),
)
@classmethod @classmethod
def _get_unified_job_class(cls): def _get_unified_job_class(cls):
@@ -291,17 +276,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
return set(f.name for f in JobOptions._meta.fields) | set( return set(f.name for f in JobOptions._meta.fields) | set(
[ ['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials', 'job_slice_number', 'job_slice_count', 'execution_environment']
'name',
'description',
'organization',
'survey_passwords',
'labels',
'credentials',
'job_slice_number',
'job_slice_count',
'execution_environment',
]
) )
@property @property
@@ -339,13 +314,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
actual_inventory = self.inventory actual_inventory = self.inventory
if self.ask_inventory_on_launch and 'inventory' in kwargs: if self.ask_inventory_on_launch and 'inventory' in kwargs:
actual_inventory = kwargs['inventory'] actual_inventory = kwargs['inventory']
actual_slice_count = self.job_slice_count
if self.ask_job_slice_count_on_launch and 'job_slice_count' in kwargs:
actual_slice_count = kwargs['job_slice_count']
if actual_inventory: if actual_inventory:
return min(actual_slice_count, actual_inventory.hosts.count()) return min(self.job_slice_count, actual_inventory.hosts.count())
else: else:
return actual_slice_count return self.job_slice_count
def save(self, *args, **kwargs): def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', []) update_fields = kwargs.get('update_fields', [])
@@ -453,11 +425,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
field = self._meta.get_field(field_name) field = self._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField): if isinstance(field, models.ManyToManyField):
if field_name == 'instance_groups':
# Instance groups are ordered so we can't make a set out of them
old_value = old_value.all()
elif field_name == 'credentials':
# Credentials have a weird pattern because of how they are layered
old_value = set(old_value.all()) old_value = set(old_value.all())
new_value = set(kwargs[field_name]) - old_value new_value = set(kwargs[field_name]) - old_value
if not new_value: if not new_value:
@@ -482,10 +449,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
rejected_data[field_name] = new_value rejected_data[field_name] = new_value
errors_dict[field_name] = _('Project does not allow override of branch.') errors_dict[field_name] = _('Project does not allow override of branch.')
continue continue
elif field_name == 'job_slice_count' and (new_value > 1) and (self.get_effective_slice_ct(kwargs) <= 1):
rejected_data[field_name] = new_value
errors_dict[field_name] = _('Job inventory does not have enough hosts for slicing')
continue
# accepted prompt # accepted prompt
prompted_data[field_name] = new_value prompted_data[field_name] = new_value
else: else:
@@ -804,15 +767,19 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
@property @property
def preferred_instance_groups(self): def preferred_instance_groups(self):
# If the user specified instance groups those will be handled by the unified_job.create_unified_job if self.organization is not None:
# This function handles only the defaults for a template w/o user specification organization_groups = [x for x in self.organization.instance_groups.all()]
selected_groups = [] else:
for obj_type in ['job_template', 'inventory', 'organization']: organization_groups = []
if getattr(self, obj_type) is not None: if self.inventory is not None:
for instance_group in getattr(self, obj_type).instance_groups.all(): inventory_groups = [x for x in self.inventory.instance_groups.all()]
selected_groups.append(instance_group) else:
if getattr(getattr(self, obj_type), 'prevent_instance_group_fallback', False): inventory_groups = []
break if self.job_template is not None:
template_groups = [x for x in self.job_template.instance_groups.all()]
else:
template_groups = []
selected_groups = template_groups + inventory_groups + organization_groups
if not selected_groups: if not selected_groups:
return self.global_instance_groups return self.global_instance_groups
return selected_groups return selected_groups
@@ -939,36 +906,10 @@ class LaunchTimeConfigBase(BaseModel):
# This is a solution to the nullable CharField problem, specific to prompting # This is a solution to the nullable CharField problem, specific to prompting
char_prompts = JSONBlob(default=dict, blank=True) char_prompts = JSONBlob(default=dict, blank=True)
# Define fields that are not really fields, but alias to char_prompts lookups def prompts_dict(self, display=False):
limit = NullablePromptPseudoField('limit')
scm_branch = NullablePromptPseudoField('scm_branch')
job_tags = NullablePromptPseudoField('job_tags')
skip_tags = NullablePromptPseudoField('skip_tags')
diff_mode = NullablePromptPseudoField('diff_mode')
job_type = NullablePromptPseudoField('job_type')
verbosity = NullablePromptPseudoField('verbosity')
forks = NullablePromptPseudoField('forks')
job_slice_count = NullablePromptPseudoField('job_slice_count')
timeout = NullablePromptPseudoField('timeout')
# NOTE: additional fields are assumed to exist but must be defined in subclasses
# due to technical limitations
SUBCLASS_FIELDS = (
'instance_groups', # needs a through model defined
'extra_vars', # alternates between extra_vars and extra_data
'credentials', # already a unified job and unified JT field
'labels', # already a unified job and unified JT field
'execution_environment', # already a unified job and unified JT field
)
def prompts_dict(self, display=False, for_cls=None):
data = {} data = {}
if for_cls:
cls = for_cls
else:
cls = JobTemplate
# Some types may have different prompts, but always subset of JT prompts # Some types may have different prompts, but always subset of JT prompts
for prompt_name in cls.get_ask_mapping().keys(): for prompt_name in JobTemplate.get_ask_mapping().keys():
try: try:
field = self._meta.get_field(prompt_name) field = self._meta.get_field(prompt_name)
except FieldDoesNotExist: except FieldDoesNotExist:
@@ -976,23 +917,18 @@ class LaunchTimeConfigBase(BaseModel):
if isinstance(field, models.ManyToManyField): if isinstance(field, models.ManyToManyField):
if not self.pk: if not self.pk:
continue # unsaved object can't have related many-to-many continue # unsaved object can't have related many-to-many
prompt_values = list(getattr(self, prompt_name).all()) prompt_val = set(getattr(self, prompt_name).all())
# Many to manys can't distinguish between None and [] if len(prompt_val) > 0:
# Because of this, from a config perspective, we assume [] is none and we don't save [] into the config data[prompt_name] = prompt_val
if len(prompt_values) > 0:
data[prompt_name] = prompt_values
elif prompt_name == 'extra_vars': elif prompt_name == 'extra_vars':
if self.extra_vars: if self.extra_vars:
extra_vars = {}
if display: if display:
extra_vars = self.display_extra_vars() data[prompt_name] = self.display_extra_vars()
else: else:
extra_vars = self.extra_vars data[prompt_name] = self.extra_vars
# Depending on model, field type may save and return as string # Depending on model, field type may save and return as string
if isinstance(extra_vars, str): if isinstance(data[prompt_name], str):
extra_vars = parse_yaml_or_json(extra_vars) data[prompt_name] = parse_yaml_or_json(data[prompt_name])
if extra_vars:
data['extra_vars'] = extra_vars
if self.survey_passwords and not display: if self.survey_passwords and not display:
data['survey_passwords'] = self.survey_passwords data['survey_passwords'] = self.survey_passwords
else: else:
@@ -1002,6 +938,15 @@ class LaunchTimeConfigBase(BaseModel):
return data return data
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'extra_vars':
continue
try:
LaunchTimeConfigBase._meta.get_field(field_name)
except FieldDoesNotExist:
setattr(LaunchTimeConfigBase, field_name, NullablePromptPseudoField(field_name))
class LaunchTimeConfig(LaunchTimeConfigBase): class LaunchTimeConfig(LaunchTimeConfigBase):
""" """
Common model for all objects that save details of a saved launch config Common model for all objects that save details of a saved launch config
@@ -1020,18 +965,8 @@ class LaunchTimeConfig(LaunchTimeConfigBase):
blank=True, blank=True,
) )
) )
# Fields needed for non-unified job / unified JT models, because they are defined on unified models # Credentials needed for non-unified job / unified JT models
credentials = models.ManyToManyField('Credential', related_name='%(class)ss') credentials = models.ManyToManyField('Credential', related_name='%(class)ss')
labels = models.ManyToManyField('Label', related_name='%(class)s_labels')
execution_environment = models.ForeignKey(
'ExecutionEnvironment',
null=True,
blank=True,
default=None,
on_delete=polymorphic.SET_NULL,
related_name='%(class)s_as_prompt',
help_text="The container image to be used for execution.",
)
@property @property
def extra_vars(self): def extra_vars(self):
@@ -1075,11 +1010,6 @@ class JobLaunchConfig(LaunchTimeConfig):
editable=False, editable=False,
) )
# Instance Groups needed for non-unified job / unified JT models
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='%(class)ss', blank=True, editable=False, through='JobLaunchConfigInstanceGroupMembership'
)
def has_user_prompts(self, template): def has_user_prompts(self, template):
""" """
Returns True if any fields exist in the launch config that are Returns True if any fields exist in the launch config that are

View File

@@ -10,8 +10,6 @@ from awx.api.versioning import reverse
from awx.main.models.base import CommonModelNameNotUnique from awx.main.models.base import CommonModelNameNotUnique
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
from awx.main.models.inventory import Inventory from awx.main.models.inventory import Inventory
from awx.main.models.schedules import Schedule
from awx.main.models.workflow import WorkflowJobTemplateNode, WorkflowJobNode
__all__ = ('Label',) __all__ = ('Label',)
@@ -36,22 +34,16 @@ class Label(CommonModelNameNotUnique):
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request)
@staticmethod
def get_orphaned_labels():
return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
def is_detached(self): def is_detached(self):
return Label.objects.filter( return Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True).exists()
id=self.id,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
).exists()
def is_candidate_for_detach(self): def is_candidate_for_detach(self):
count = UnifiedJob.objects.filter(labels__in=[self.id]).count() # Both Jobs and WFJobs
count += UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() # Both JTs and WFJT c1 = UnifiedJob.objects.filter(labels__in=[self.id]).count()
count += Inventory.objects.filter(labels__in=[self.id]).count() c2 = UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count()
count += Schedule.objects.filter(labels__in=[self.id]).count() c3 = Inventory.objects.filter(labels__in=[self.id]).count()
count += WorkflowJobTemplateNode.objects.filter(labels__in=[self.id]).count() return (c1 + c2 + c3 - 1) == 0
count += WorkflowJobNode.objects.filter(labels__in=[self.id]).count()
return (count - 1) == 0

View File

@@ -104,33 +104,6 @@ class SurveyJobTemplateMixin(models.Model):
default=False, default=False,
) )
survey_spec = prevent_search(JSONBlob(default=dict, blank=True)) survey_spec = prevent_search(JSONBlob(default=dict, blank=True))
ask_inventory_on_launch = AskForField(
blank=True,
default=False,
)
ask_limit_on_launch = AskForField(
blank=True,
default=False,
)
ask_scm_branch_on_launch = AskForField(
blank=True,
default=False,
allows_field='scm_branch',
)
ask_labels_on_launch = AskForField(
blank=True,
default=False,
)
ask_tags_on_launch = AskForField(
blank=True,
default=False,
allows_field='job_tags',
)
ask_skip_tags_on_launch = AskForField(
blank=True,
default=False,
)
ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars') ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars')
def survey_password_variables(self): def survey_password_variables(self):
@@ -439,11 +412,6 @@ class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
class Meta: class Meta:
abstract = True abstract = True
def get_jobs_fail_chain(self):
if self.project_update_id:
return [self.project_update]
return []
class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin): class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin):
class Meta: class Meta:

View File

@@ -284,17 +284,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'), help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
) )
# credential (keys) used to validate content signature
signature_validation_credential = models.ForeignKey(
'Credential',
related_name='%(class)ss_signature_validation',
blank=True,
null=True,
default=None,
on_delete=models.SET_NULL,
help_text=_('An optional credential used for validating files in the project against unexpected changes.'),
)
scm_revision = models.CharField( scm_revision = models.CharField(
max_length=1024, max_length=1024,
blank=True, blank=True,
@@ -631,10 +620,6 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
added_update_fields = [] added_update_fields = []
if not self.job_tags: if not self.job_tags:
job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections'] job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']
if self.project.signature_validation_credential is not None:
credential_type = self.project.signature_validation_credential.credential_type.namespace
job_tags.append(f'validation_{credential_type}')
job_tags.append('validation_checksum_manifest')
self.job_tags = ','.join(job_tags) self.job_tags = ','.join(job_tags)
added_update_fields.append('job_tags') added_update_fields.append('job_tags')
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check': if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':

View File

@@ -18,7 +18,6 @@ from django.utils.translation import gettext_lazy as _
# AWX # AWX
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.fields import OrderedManyToManyField
from awx.main.models.base import PrimordialModel from awx.main.models.base import PrimordialModel
from awx.main.models.jobs import LaunchTimeConfig from awx.main.models.jobs import LaunchTimeConfig
from awx.main.utils import ignore_inventory_computed_fields from awx.main.utils import ignore_inventory_computed_fields
@@ -84,13 +83,6 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
) )
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule.")) rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run.")) next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
instance_groups = OrderedManyToManyField(
'InstanceGroup',
related_name='schedule_instance_groups',
blank=True,
editable=False,
through='ScheduleInstanceGroupMembership',
)
@classmethod @classmethod
def get_zoneinfo(cls): def get_zoneinfo(cls):
@@ -153,7 +145,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
# #
# Find the DTSTART rule or raise an error, its usually the first rule but that is not strictly enforced # Find the DTSTART rule or raise an error, its usually the first rule but that is not strictly enforced
start_date_rule = re.sub(r'^.*(DTSTART[^\s]+)\s.*$', r'\1', rrule) start_date_rule = re.sub('^.*(DTSTART[^\s]+)\s.*$', r'\1', rrule)
if not start_date_rule: if not start_date_rule:
raise ValueError('A DTSTART field needs to be in the rrule') raise ValueError('A DTSTART field needs to be in the rrule')

View File

@@ -332,11 +332,10 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
return NotificationTemplate.objects.none() return NotificationTemplate.objects.none()
def create_unified_job(self, instance_groups=None, **kwargs): def create_unified_job(self, **kwargs):
""" """
Create a new unified job based on this unified job template. Create a new unified job based on this unified job template.
""" """
# TODO: rename kwargs to prompts, to set expectation that these are runtime values
new_job_passwords = kwargs.pop('survey_passwords', {}) new_job_passwords = kwargs.pop('survey_passwords', {})
eager_fields = kwargs.pop('_eager_fields', None) eager_fields = kwargs.pop('_eager_fields', None)
@@ -383,9 +382,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
unified_job.survey_passwords = new_job_passwords unified_job.survey_passwords = new_job_passwords
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
if instance_groups:
unified_job.preferred_instance_groups_cache = [ig.id for ig in instance_groups]
else:
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache() unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
unified_job._set_default_dependencies_processed() unified_job._set_default_dependencies_processed()
@@ -416,17 +412,13 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
unified_job.handle_extra_data(validated_kwargs['extra_vars']) unified_job.handle_extra_data(validated_kwargs['extra_vars'])
# Create record of provided prompts for relaunch and rescheduling # Create record of provided prompts for relaunch and rescheduling
config = unified_job.create_config_from_prompts(kwargs, parent=self) unified_job.create_config_from_prompts(kwargs, parent=self)
if instance_groups:
for ig in instance_groups:
config.instance_groups.add(ig)
# manually issue the create activity stream entry _after_ M2M relations # manually issue the create activity stream entry _after_ M2M relations
# have been associated to the UJ # have been associated to the UJ
if unified_job.__class__ in activity_stream_registrar.models: if unified_job.__class__ in activity_stream_registrar.models:
activity_stream_create(None, unified_job, True) activity_stream_create(None, unified_job, True)
unified_job.log_lifecycle("created") unified_job.log_lifecycle("created")
return unified_job return unified_job
@classmethod @classmethod
@@ -981,38 +973,22 @@ class UnifiedJob(
valid_fields.extend(['survey_passwords', 'extra_vars']) valid_fields.extend(['survey_passwords', 'extra_vars'])
else: else:
kwargs.pop('survey_passwords', None) kwargs.pop('survey_passwords', None)
many_to_many_fields = []
for field_name, value in kwargs.items(): for field_name, value in kwargs.items():
if field_name not in valid_fields: if field_name not in valid_fields:
raise Exception('Unrecognized launch config field {}.'.format(field_name)) raise Exception('Unrecognized launch config field {}.'.format(field_name))
field = None if field_name == 'credentials':
# may use extra_data as a proxy for extra_vars
if field_name in config.SUBCLASS_FIELDS and field_name != 'extra_vars':
field = config._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
many_to_many_fields.append(field_name)
continue continue
if isinstance(field, (models.ForeignKey)) and (value is None): key = field_name
continue # the null value indicates not-provided for ForeignKey case if key == 'extra_vars':
setattr(config, field_name, value) key = 'extra_data'
setattr(config, key, value)
config.save() config.save()
for field_name in many_to_many_fields: job_creds = set(kwargs.get('credentials', []))
prompted_items = kwargs.get(field_name, []) if 'credentials' in [field.name for field in parent._meta.get_fields()]:
if not prompted_items: job_creds = job_creds - set(parent.credentials.all())
continue if job_creds:
if field_name == 'instance_groups': config.credentials.add(*job_creds)
# Here we are doing a loop to make sure we preserve order for this Ordered field
# also do not merge IGs with parent, so this saves the literal list
for item in prompted_items:
getattr(config, field_name).add(item)
else:
# Assuming this field merges prompts with parent, save just the diff
if field_name in [field.name for field in parent._meta.get_fields()]:
prompted_items = set(prompted_items) - set(getattr(parent, field_name).all())
if prompted_items:
getattr(config, field_name).add(*prompted_items)
return config return config
@property @property
@@ -1305,8 +1281,6 @@ class UnifiedJob(
status_data['instance_group_name'] = None status_data['instance_group_name'] = None
elif status in ['successful', 'failed', 'canceled'] and self.finished: elif status in ['successful', 'failed', 'canceled'] and self.finished:
status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ") status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
elif status == 'running':
status_data['started'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
status_data.update(self.websocket_emit_data()) status_data.update(self.websocket_emit_data())
status_data['group_name'] = 'jobs' status_data['group_name'] = 'jobs'
if getattr(self, 'unified_job_template_id', None): if getattr(self, 'unified_job_template_id', None):
@@ -1421,6 +1395,23 @@ class UnifiedJob(
# Done! # Done!
return True return True
@property
def actually_running(self):
# returns True if the job is running in the appropriate dispatcher process
running = False
if all([self.status == 'running', self.celery_task_id, self.execution_node]):
# If the job is marked as running, but the dispatcher
# doesn't know about it (or the dispatcher doesn't reply),
# then cancel the job
timeout = 5
try:
running = self.celery_task_id in ControlDispatcher('dispatcher', self.controller_node or self.execution_node).running(timeout=timeout)
except socket.timeout:
logger.error('could not reach dispatcher on {} within {}s'.format(self.execution_node, timeout))
except Exception:
logger.exception("error encountered when checking task status")
return running
@property @property
def can_cancel(self): def can_cancel(self):
return bool(self.status in CAN_CANCEL) return bool(self.status in CAN_CANCEL)
@@ -1430,61 +1421,27 @@ class UnifiedJob(
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (self.model_to_str(), self.name, self.id) return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (self.model_to_str(), self.name, self.id)
return None return None
def fallback_cancel(self):
if not self.celery_task_id:
self.refresh_from_db(fields=['celery_task_id'])
self.cancel_dispatcher_process()
def cancel_dispatcher_process(self):
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
if not self.celery_task_id:
return
canceled = []
try:
# Use control and reply mechanism to cancel and obtain confirmation
timeout = 5
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
except socket.timeout:
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
except Exception:
logger.exception("error encountered when checking task status")
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
def cancel(self, job_explanation=None, is_chain=False): def cancel(self, job_explanation=None, is_chain=False):
if self.can_cancel: if self.can_cancel:
if not is_chain: if not is_chain:
for x in self.get_jobs_fail_chain(): for x in self.get_jobs_fail_chain():
x.cancel(job_explanation=self._build_job_explanation(), is_chain=True) x.cancel(job_explanation=self._build_job_explanation(), is_chain=True)
cancel_fields = []
if not self.cancel_flag: if not self.cancel_flag:
self.cancel_flag = True self.cancel_flag = True
self.start_args = '' # blank field to remove encrypted passwords self.start_args = '' # blank field to remove encrypted passwords
cancel_fields.extend(['cancel_flag', 'start_args']) cancel_fields = ['cancel_flag', 'start_args']
connection.on_commit(lambda: self.websocket_emit_status("canceled")) if self.status in ('pending', 'waiting', 'new'):
self.status = 'canceled'
cancel_fields.append('status')
if self.status == 'running' and not self.actually_running:
self.status = 'canceled'
cancel_fields.append('status')
if job_explanation is not None: if job_explanation is not None:
self.job_explanation = job_explanation self.job_explanation = job_explanation
cancel_fields.append('job_explanation') cancel_fields.append('job_explanation')
# Important to save here before sending cancel signal to dispatcher to cancel because
# the job control process will use the cancel_flag to distinguish a shutdown from a cancel
self.save(update_fields=cancel_fields) self.save(update_fields=cancel_fields)
self.websocket_emit_status("canceled")
controller_notified = False
if self.celery_task_id:
controller_notified = self.cancel_dispatcher_process()
# If a SIGTERM signal was sent to the control process, and acked by the dispatcher
# then we want to let its own cleanup change status, otherwise change status now
if not controller_notified:
if self.status != 'canceled':
self.status = 'canceled'
self.save(update_fields=['status'])
# Avoid race condition where we have stale model from pending state but job has already started,
# its checking signal but not cancel_flag, so re-send signal after updating cancel fields
self.fallback_cancel()
return self.cancel_flag return self.cancel_flag
@property @property

View File

@@ -29,7 +29,7 @@ from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, Un
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
from awx.main.fields import ImplicitRoleField, JSONBlob, OrderedManyToManyField from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
SurveyJobTemplateMixin, SurveyJobTemplateMixin,
@@ -114,9 +114,6 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
'credentials', 'credentials',
'char_prompts', 'char_prompts',
'all_parents_must_converge', 'all_parents_must_converge',
'labels',
'instance_groups',
'execution_environment',
] ]
def create_workflow_job_node(self, **kwargs): def create_workflow_job_node(self, **kwargs):
@@ -125,7 +122,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
""" """
create_kwargs = {} create_kwargs = {}
for field_name in self._get_workflow_job_field_names(): for field_name in self._get_workflow_job_field_names():
if field_name in ['credentials', 'labels', 'instance_groups']: if field_name == 'credentials':
continue continue
if field_name in kwargs: if field_name in kwargs:
create_kwargs[field_name] = kwargs[field_name] create_kwargs[field_name] = kwargs[field_name]
@@ -135,20 +132,10 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
new_node = WorkflowJobNode.objects.create(**create_kwargs) new_node = WorkflowJobNode.objects.create(**create_kwargs)
if self.pk: if self.pk:
allowed_creds = self.credentials.all() allowed_creds = self.credentials.all()
allowed_labels = self.labels.all()
allowed_instance_groups = self.instance_groups.all()
else: else:
allowed_creds = [] allowed_creds = []
allowed_labels = []
allowed_instance_groups = []
for cred in allowed_creds: for cred in allowed_creds:
new_node.credentials.add(cred) new_node.credentials.add(cred)
for label in allowed_labels:
new_node.labels.add(label)
for instance_group in allowed_instance_groups:
new_node.instance_groups.add(instance_group)
return new_node return new_node
@@ -166,9 +153,6 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
'char_prompts', 'char_prompts',
'all_parents_must_converge', 'all_parents_must_converge',
'identifier', 'identifier',
'labels',
'execution_environment',
'instance_groups',
] ]
REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords'] REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords']
@@ -183,13 +167,6 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
blank=False, blank=False,
help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'), help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'),
) )
instance_groups = OrderedManyToManyField(
'InstanceGroup',
related_name='workflow_job_template_node_instance_groups',
blank=True,
editable=False,
through='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
)
class Meta: class Meta:
app_label = 'main' app_label = 'main'
@@ -234,7 +211,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
approval_template = WorkflowApprovalTemplate(**kwargs) approval_template = WorkflowApprovalTemplate(**kwargs)
approval_template.save() approval_template.save()
self.unified_job_template = approval_template self.unified_job_template = approval_template
self.save(update_fields=['unified_job_template']) self.save()
return approval_template return approval_template
@@ -273,9 +250,6 @@ class WorkflowJobNode(WorkflowNodeBase):
blank=True, # blank denotes pre-migration job nodes blank=True, # blank denotes pre-migration job nodes
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'), help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
) )
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='workflow_job_node_instance_groups', blank=True, editable=False, through='WorkflowJobNodeBaseInstanceGroupMembership'
)
class Meta: class Meta:
app_label = 'main' app_label = 'main'
@@ -291,6 +265,19 @@ class WorkflowJobNode(WorkflowNodeBase):
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
def prompts_dict(self, *args, **kwargs):
r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
# Explanation - WFJT extra_vars still break pattern, so they are not
# put through prompts processing, but inventory and others are only accepted
# if JT prompts for it, so it goes through this mechanism
if self.workflow_job:
if self.workflow_job.inventory_id:
# workflow job inventory takes precedence
r['inventory'] = self.workflow_job.inventory
if self.workflow_job.char_prompts:
r.update(self.workflow_job.char_prompts)
return r
def get_job_kwargs(self): def get_job_kwargs(self):
""" """
In advance of creating a new unified job as part of a workflow, In advance of creating a new unified job as part of a workflow,
@@ -300,38 +287,16 @@ class WorkflowJobNode(WorkflowNodeBase):
""" """
# reject/accept prompted fields # reject/accept prompted fields
data = {} data = {}
wj_special_vars = {}
wj_special_passwords = {}
ujt_obj = self.unified_job_template ujt_obj = self.unified_job_template
if ujt_obj is not None: if ujt_obj is not None:
node_prompts_data = self.prompts_dict(for_cls=ujt_obj.__class__) # MERGE note: move this to prompts_dict method on node when merging
wj_prompts_data = self.workflow_job.prompts_dict(for_cls=ujt_obj.__class__) # with the workflow inventory branch
# Explanation - special historical case prompts_data = self.prompts_dict()
# WFJT extra_vars ignored JobTemplate.ask_variables_on_launch, bypassing _accept_or_ignore_job_kwargs if isinstance(ujt_obj, WorkflowJobTemplate):
# inventory and others are only accepted if JT prompts for it with related ask_ field if self.workflow_job.extra_vars:
# this is inconsistent, but maintained prompts_data.setdefault('extra_vars', {})
if not isinstance(ujt_obj, WorkflowJobTemplate): prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict)
wj_special_vars = wj_prompts_data.pop('extra_vars', {}) accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data)
wj_special_passwords = wj_prompts_data.pop('survey_passwords', {})
elif 'extra_vars' in node_prompts_data:
# Follow the vars combination rules
node_prompts_data['extra_vars'].update(wj_prompts_data.pop('extra_vars', {}))
elif 'survey_passwords' in node_prompts_data:
node_prompts_data['survey_passwords'].update(wj_prompts_data.pop('survey_passwords', {}))
# Follow the credential combination rules
if ('credentials' in wj_prompts_data) and ('credentials' in node_prompts_data):
wj_pivoted_creds = Credential.unique_dict(wj_prompts_data['credentials'])
node_pivoted_creds = Credential.unique_dict(node_prompts_data['credentials'])
node_pivoted_creds.update(wj_pivoted_creds)
wj_prompts_data['credentials'] = [cred for cred in node_pivoted_creds.values()]
# NOTE: no special rules for instance_groups, because they do not merge
# or labels, because they do not propogate WFJT-->node at all
# Combine WFJT prompts with node here, WFJT at higher level
node_prompts_data.update(wj_prompts_data)
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**node_prompts_data)
if errors: if errors:
logger.info( logger.info(
_('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format( _('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format(
@@ -339,6 +304,15 @@ class WorkflowJobNode(WorkflowNodeBase):
) )
) )
data.update(accepted_fields) # missing fields are handled in the scheduler data.update(accepted_fields) # missing fields are handled in the scheduler
try:
# config saved on the workflow job itself
wj_config = self.workflow_job.launch_config
except ObjectDoesNotExist:
wj_config = None
if wj_config:
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
data.update(accepted_fields)
# build ancestor artifacts, save them to node model for later # build ancestor artifacts, save them to node model for later
aa_dict = {} aa_dict = {}
is_root_node = True is_root_node = True
@@ -351,12 +325,15 @@ class WorkflowJobNode(WorkflowNodeBase):
self.ancestor_artifacts = aa_dict self.ancestor_artifacts = aa_dict
self.save(update_fields=['ancestor_artifacts']) self.save(update_fields=['ancestor_artifacts'])
# process password list # process password list
password_dict = data.get('survey_passwords', {}) password_dict = {}
if '_ansible_no_log' in aa_dict: if '_ansible_no_log' in aa_dict:
for key in aa_dict: for key in aa_dict:
if key != '_ansible_no_log': if key != '_ansible_no_log':
password_dict[key] = REPLACE_STR password_dict[key] = REPLACE_STR
password_dict.update(wj_special_passwords) if self.workflow_job.survey_passwords:
password_dict.update(self.workflow_job.survey_passwords)
if self.survey_passwords:
password_dict.update(self.survey_passwords)
if password_dict: if password_dict:
data['survey_passwords'] = password_dict data['survey_passwords'] = password_dict
# process extra_vars # process extra_vars
@@ -366,12 +343,12 @@ class WorkflowJobNode(WorkflowNodeBase):
functional_aa_dict = copy(aa_dict) functional_aa_dict = copy(aa_dict)
functional_aa_dict.pop('_ansible_no_log', None) functional_aa_dict.pop('_ansible_no_log', None)
extra_vars.update(functional_aa_dict) extra_vars.update(functional_aa_dict)
if ujt_obj and isinstance(ujt_obj, JobTemplate):
# Workflow Job extra_vars higher precedence than ancestor artifacts # Workflow Job extra_vars higher precedence than ancestor artifacts
extra_vars.update(wj_special_vars) if self.workflow_job and self.workflow_job.extra_vars:
extra_vars.update(self.workflow_job.extra_vars_dict)
if extra_vars: if extra_vars:
data['extra_vars'] = extra_vars data['extra_vars'] = extra_vars
# ensure that unified jobs created by WorkflowJobs are marked # ensure that unified jobs created by WorkflowJobs are marked
data['_eager_fields'] = {'launch_type': 'workflow'} data['_eager_fields'] = {'launch_type': 'workflow'}
if self.workflow_job and self.workflow_job.created_by: if self.workflow_job and self.workflow_job.created_by:
@@ -397,10 +374,6 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
) )
) )
) )
# Workflow jobs are used for sliced jobs, and thus, must be a conduit for any JT prompts
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='workflow_job_instance_groups', blank=True, editable=False, through='WorkflowJobInstanceGroupMembership'
)
allow_simultaneous = models.BooleanField(default=False) allow_simultaneous = models.BooleanField(default=False)
extra_vars_dict = VarsDictProperty('extra_vars', True) extra_vars_dict = VarsDictProperty('extra_vars', True)
@@ -412,7 +385,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set( r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'job_tags', 'skip_tags'] ['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch']
) )
r.remove('char_prompts') # needed due to copying launch config to launch config r.remove('char_prompts') # needed due to copying launch config to launch config
return r return r
@@ -452,29 +425,26 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin): class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin):
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')] SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
FIELDS_TO_PRESERVE_AT_COPY = [ FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec']
'labels',
'organization',
'instance_groups',
'workflow_job_template_nodes',
'credentials',
'survey_spec',
'skip_tags',
'job_tags',
'execution_environment',
]
class Meta: class Meta:
app_label = 'main' app_label = 'main'
notification_templates_approvals = models.ManyToManyField( ask_inventory_on_launch = AskForField(
"NotificationTemplate",
blank=True, blank=True,
related_name='%(class)s_notification_templates_for_approvals', default=False,
) )
admin_role = ImplicitRoleField( ask_limit_on_launch = AskForField(
parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'], blank=True,
default=False,
) )
ask_scm_branch_on_launch = AskForField(
blank=True,
default=False,
)
notification_templates_approvals = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_approvals')
admin_role = ImplicitRoleField(parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'])
execute_role = ImplicitRoleField( execute_role = ImplicitRoleField(
parent_role=[ parent_role=[
'admin_role', 'admin_role',
@@ -743,25 +713,6 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set)) artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
return artifacts return artifacts
def prompts_dict(self, *args, **kwargs):
if self.job_template_id:
# HACK: Exception for sliced jobs here, this is bad
# when sliced jobs were introduced, workflows did not have all the prompted JT fields
# so to support prompting with slicing, we abused the workflow job launch config
# these would be more properly saved on the workflow job, but it gets the wrong fields now
try:
wj_config = self.launch_config
r = wj_config.prompts_dict(*args, **kwargs)
except ObjectDoesNotExist:
r = {}
else:
r = super().prompts_dict(*args, **kwargs)
# Workflow labels and job labels are treated separately
# that means that they do not propogate from WFJT / workflow job to jobs in workflow
r.pop('labels', None)
return r
def get_notification_templates(self): def get_notification_templates(self):
return self.workflow_job_template.notification_templates return self.workflow_job_template.notification_templates
@@ -772,10 +723,11 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
def preferred_instance_groups(self): def preferred_instance_groups(self):
return [] return []
def cancel_dispatcher_process(self): @property
def actually_running(self):
# WorkflowJobs don't _actually_ run anything in the dispatcher, so # WorkflowJobs don't _actually_ run anything in the dispatcher, so
# there's no point in asking the dispatcher if it knows about this task # there's no point in asking the dispatcher if it knows about this task
return True return self.status == 'running'
class WorkflowApprovalTemplate(UnifiedJobTemplate, RelatedJobsMixin): class WorkflowApprovalTemplate(UnifiedJobTemplate, RelatedJobsMixin):

View File

@@ -609,6 +609,8 @@ class TaskManager(TaskBase):
found_acceptable_queue = False found_acceptable_queue = False
preferred_instance_groups = self.instance_groups.get_instance_groups_from_task_cache(task)
# Determine if there is control capacity for the task # Determine if there is control capacity for the task
if task.capacity_type == 'control': if task.capacity_type == 'control':
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
@@ -634,12 +636,16 @@ class TaskManager(TaskBase):
found_acceptable_queue = True found_acceptable_queue = True
continue continue
for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task): for instance_group in preferred_instance_groups:
if instance_group.is_container_group: if instance_group.is_container_group:
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None) self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
found_acceptable_queue = True found_acceptable_queue = True
break break
# TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
if settings.IS_K8S and task.capacity_type == 'execution':
logger.debug("Skipping group {}, task cannot run on control plane".format(instance_group.name))
continue
# at this point we know the instance group is NOT a container group # at this point we know the instance group is NOT a container group
# because if it was, it would have started the task and broke out of the loop. # because if it was, it would have started the task and broke out of the loop.
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance( execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(

View File

@@ -37,11 +37,7 @@ class TaskManagerInstances:
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')): def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
self.instances_by_hostname = dict() self.instances_by_hostname = dict()
if instances is None: if instances is None:
instances = ( instances = Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop').only(*instance_fields)
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
.exclude(node_type='hop')
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
)
for instance in instances: for instance in instances:
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance) self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)

View File

@@ -6,16 +6,17 @@ import os
import stat import stat
# Django # Django
from django.utils.timezone import now
from django.conf import settings from django.conf import settings
from django_guid import get_guid from django_guid import get_guid
from django.utils.functional import cached_property from django.utils.functional import cached_property
from django.db import connections
# AWX # AWX
from awx.main.redact import UriCleaner from awx.main.redact import UriCleaner
from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
from awx.main.utils.update_model import update_model from awx.main.utils.update_model import update_model
from awx.main.queue import CallbackQueueDispatcher from awx.main.queue import CallbackQueueDispatcher
from awx.main.tasks.signals import signal_callback
logger = logging.getLogger('awx.main.tasks.callback') logger = logging.getLogger('awx.main.tasks.callback')
@@ -174,6 +175,28 @@ class RunnerCallback:
return False return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
if signal_callback():
return True
try:
self.instance = self.update_model(unified_job_id)
except Exception:
logger.exception(f'Encountered error during cancel check for {unified_job_id}, canceling now')
return True
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warning('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj): def finished_callback(self, runner_obj):
""" """
Ansible runner callback triggered on finished run Ansible runner callback triggered on finished run
@@ -204,8 +227,6 @@ class RunnerCallback:
with disable_activity_stream(): with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env) self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
# We opened a connection just for that save, close it here now
connections.close_all()
elif status_data['status'] == 'failed': elif status_data['status'] == 'failed':
# For encrypted ssh_key_data, ansible-runner worker will open and write the # For encrypted ssh_key_data, ansible-runner worker will open and write the
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will # ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will

View File

@@ -145,7 +145,7 @@ class BaseTask(object):
""" """
Return params structure to be executed by the container runtime Return params structure to be executed by the container runtime
""" """
if settings.IS_K8S and instance.instance_group.is_container_group: if settings.IS_K8S:
return {} return {}
image = instance.execution_environment.image image = instance.execution_environment.image
@@ -487,7 +487,6 @@ class BaseTask(object):
self.instance.log_lifecycle("preparing_playbook") self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag or signal_callback(): if self.instance.cancel_flag or signal_callback():
self.instance = self.update_model(self.instance.pk, status='canceled') self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running': if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has # Stop the task chain and prevent starting the job if it has
# already been canceled. # already been canceled.
@@ -590,7 +589,7 @@ class BaseTask(object):
event_handler=self.runner_callback.event_handler, event_handler=self.runner_callback.event_handler,
finished_callback=self.runner_callback.finished_callback, finished_callback=self.runner_callback.finished_callback,
status_handler=self.runner_callback.status_handler, status_handler=self.runner_callback.status_handler,
cancel_callback=signal_callback, cancel_callback=self.runner_callback.cancel_callback,
**params, **params,
) )
else: else:
@@ -700,7 +699,7 @@ class SourceControlMixin(BaseTask):
def spawn_project_sync(self, project, sync_needs, scm_branch=None): def spawn_project_sync(self, project, sync_needs, scm_branch=None):
pu_ig = self.instance.instance_group pu_ig = self.instance.instance_group
pu_en = Instance.objects.my_hostname() pu_en = Instance.objects.me().hostname
sync_metafields = dict( sync_metafields = dict(
launch_type="sync", launch_type="sync",
@@ -739,6 +738,7 @@ class SourceControlMixin(BaseTask):
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir) sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id) sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db() local_project_sync.refresh_from_db()
if isinstance(self.instance, Job):
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision) self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
except Exception: except Exception:
local_project_sync.refresh_from_db() local_project_sync.refresh_from_db()
@@ -758,6 +758,7 @@ class SourceControlMixin(BaseTask):
else: else:
# Case where a local sync is not needed, meaning that local tree is # Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version # up-to-date with project, job is running project current version
if isinstance(self.instance, Job):
self.instance = self.update_model(self.instance.pk, scm_revision=project.scm_revision) self.instance = self.update_model(self.instance.pk, scm_revision=project.scm_revision)
# Project update does not copy the folder, so copy here # Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(project, private_data_dir) RunProjectUpdate.make_local_copy(project, private_data_dir)
@@ -1269,10 +1270,6 @@ class RunProjectUpdate(BaseTask):
# for raw archive, prevent error moving files between volumes # for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp') extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
if project_update.project.signature_validation_credential is not None:
pubkey = project_update.project.signature_validation_credential.get_input('gpg_public_key')
extra_vars['gpg_pubkey'] = pubkey
self._write_extra_vars_file(private_data_dir, extra_vars) self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir): def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
@@ -1625,7 +1622,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
handler = SpecialInventoryHandler( handler = SpecialInventoryHandler(
self.runner_callback.event_handler, self.runner_callback.event_handler,
signal_callback, self.runner_callback.cancel_callback,
verbosity=inventory_update.verbosity, verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance), job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started, start_time=inventory_update.started,

View File

@@ -12,7 +12,6 @@ import yaml
# Django # Django
from django.conf import settings from django.conf import settings
from django.db import connections
# Runner # Runner
import ansible_runner import ansible_runner
@@ -26,19 +25,12 @@ from awx.main.utils.common import (
cleanup_new_process, cleanup_new_process,
) )
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
from awx.main.models import Instance, InstanceLink, UnifiedJob
from awx.main.dispatch import get_local_queuename
from awx.main.dispatch.publish import task
# Receptorctl # Receptorctl
from receptorctl.socket_interface import ReceptorControl from receptorctl.socket_interface import ReceptorControl
from filelock import FileLock
logger = logging.getLogger('awx.main.tasks.receptor') logger = logging.getLogger('awx.main.tasks.receptor')
__RECEPTOR_CONF = '/etc/receptor/receptor.conf' __RECEPTOR_CONF = '/etc/receptor/receptor.conf'
__RECEPTOR_CONF_LOCKFILE = f'{__RECEPTOR_CONF}.lock'
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running') RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
@@ -48,22 +40,9 @@ class ReceptorConnectionType(Enum):
STREAMTLS = 2 STREAMTLS = 2
def read_receptor_config():
# for K8S deployments, getting a lock is necessary as another process
# may be re-writing the config at this time
if settings.IS_K8S:
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
with lock:
with open(__RECEPTOR_CONF, 'r') as f:
return yaml.safe_load(f)
else:
with open(__RECEPTOR_CONF, 'r') as f:
return yaml.safe_load(f)
def get_receptor_sockfile(): def get_receptor_sockfile():
data = read_receptor_config() with open(__RECEPTOR_CONF, 'r') as f:
data = yaml.safe_load(f)
for section in data: for section in data:
for entry_name, entry_data in section.items(): for entry_name, entry_data in section.items():
if entry_name == 'control-service': if entry_name == 'control-service':
@@ -79,7 +58,8 @@ def get_tls_client(use_stream_tls=None):
if not use_stream_tls: if not use_stream_tls:
return None return None
data = read_receptor_config() with open(__RECEPTOR_CONF, 'r') as f:
data = yaml.safe_load(f)
for section in data: for section in data:
for entry_name, entry_data in section.items(): for entry_name, entry_data in section.items():
if entry_name == 'tls-client': if entry_name == 'tls-client':
@@ -96,25 +76,12 @@ def get_receptor_ctl():
return ReceptorControl(receptor_sockfile) return ReceptorControl(receptor_sockfile)
def find_node_in_mesh(node_name, receptor_ctl): def get_conn_type(node_name, receptor_ctl):
attempts = 10
backoff = 1
for attempt in range(attempts):
all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None) all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None)
for node in all_nodes: for node in all_nodes:
if node.get('NodeID') == node_name: if node.get('NodeID') == node_name:
return node
else:
logger.warning(f"Instance {node_name} is not in the receptor mesh. {attempts-attempt} attempts left.")
time.sleep(backoff)
backoff += 1
else:
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
def get_conn_type(node_name, receptor_ctl):
node = find_node_in_mesh(node_name, receptor_ctl)
return ReceptorConnectionType(node.get('ConnType')) return ReceptorConnectionType(node.get('ConnType'))
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
def administrative_workunit_reaper(work_list=None): def administrative_workunit_reaper(work_list=None):
@@ -167,7 +134,8 @@ def run_until_complete(node, timing_data=None, **kwargs):
kwargs.setdefault('payload', '') kwargs.setdefault('payload', '')
transmit_start = time.time() transmit_start = time.time()
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs) sign_work = False if settings.IS_K8S else True
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=sign_work, **kwargs)
unit_id = result['unitid'] unit_id = result['unitid']
run_start = time.time() run_start = time.time()
@@ -242,7 +210,7 @@ def worker_info(node_name, work_type='ansible-runner'):
else: else:
error_list.append(details) error_list.append(details)
except Exception as exc: except (ReceptorNodeNotFound, RuntimeError) as exc:
error_list.append(str(exc)) error_list.append(str(exc))
# If we have a connection error, missing keys would be trivial consequence of that # If we have a connection error, missing keys would be trivial consequence of that
@@ -313,6 +281,10 @@ class AWXReceptorJob:
except Exception: except Exception:
logger.exception(f"Error releasing work unit {self.unit_id}.") logger.exception(f"Error releasing work unit {self.unit_id}.")
@property
def sign_work(self):
return False if settings.IS_K8S else True
def _run_internal(self, receptor_ctl): def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload # Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for # (private data dir, kwargs). The right side will be passed to Receptor for
@@ -363,32 +335,24 @@ class AWXReceptorJob:
shutil.rmtree(artifact_dir) shutil.rmtree(artifact_dir)
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True) resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
connections.close_all() # We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# "processor" and the main thread will be separate threads. # to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# If a cancel happens, the main thread will encounter an exception, in which case # Which exits if the job has finished normally. The context manager ensures we do not
# we yank the socket out from underneath the processor, which will cause it to exit. # leave any threads laying around.
# The ThreadPoolExecutor context manager ensures we do not leave any threads laying around. with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
processor_future = executor.submit(self.processor, resultfile) processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
try: res = list(first_future.done)[0].result()
signal_state.raise_exception = True if res.status == 'canceled':
# address race condition where SIGTERM was issued after this dispatcher task started
if signal_callback():
raise SignalExit()
res = processor_future.result()
except SignalExit:
receptor_ctl.simple_command(f"work cancel {self.unit_id}") receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR) resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close() resultfile.close()
result = namedtuple('result', ['status', 'rc']) elif res.status == 'error':
res = result('canceled', 1)
finally:
signal_state.raise_exception = False
if res.status == 'error':
# If ansible-runner ran, but an error occured at runtime, the traceback information # If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor. # is saved via the status_handler passed in to the processor.
if 'result_traceback' in self.task.runner_callback.extra_update_fields: if 'result_traceback' in self.task.runner_callback.extra_update_fields:
@@ -472,10 +436,6 @@ class AWXReceptorJob:
return receptor_params return receptor_params
@property
def sign_work(self):
return True if self.work_type in ('ansible-runner', 'local') else False
@property @property
def work_type(self): def work_type(self):
if self.task.instance.is_container_group_task: if self.task.instance.is_container_group_task:
@@ -486,6 +446,18 @@ class AWXReceptorJob:
return 'local' return 'local'
return 'ansible-runner' return 'ansible-runner'
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.runner_callback.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
time.sleep(1)
@property @property
def pod_definition(self): def pod_definition(self):
ee = self.task.instance.execution_environment ee = self.task.instance.execution_environment
@@ -604,105 +576,3 @@ class AWXReceptorJob:
else: else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config return config
# TODO: receptor reload expects ordering within config items to be preserved
# if python dictionary is not preserving order properly, may need to find a
# solution. yaml.dump does not seem to work well with OrderedDict. below line may help
# yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()))
#
RECEPTOR_CONFIG_STARTER = (
{'local-only': None},
{'log-level': 'debug'},
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
{'work-signing': {'privatekey': '/etc/receptor/signing/work-private-key.pem', 'tokenexpiration': '1m'}},
{
'work-kubernetes': {
'worktype': 'kubernetes-runtime-auth',
'authmethod': 'runtime',
'allowruntimeauth': True,
'allowruntimepod': True,
'allowruntimeparams': True,
}
},
{
'work-kubernetes': {
'worktype': 'kubernetes-incluster-auth',
'authmethod': 'incluster',
'allowruntimeauth': True,
'allowruntimepod': True,
'allowruntimeparams': True,
}
},
{
'tls-client': {
'name': 'tlsclient',
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
'cert': '/etc/receptor/tls/receptor.crt',
'key': '/etc/receptor/tls/receptor.key',
}
},
)
@task()
def write_receptor_config():
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
with lock:
receptor_config = list(RECEPTOR_CONFIG_STARTER)
this_inst = Instance.objects.me()
instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)
existing_peers = {link.target_id for link in InstanceLink.objects.filter(source=this_inst)}
new_links = []
for instance in instances:
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
receptor_config.append(peer)
if instance.id not in existing_peers:
new_links.append(InstanceLink(source=this_inst, target=instance, link_state=InstanceLink.States.ADDING))
InstanceLink.objects.bulk_create(new_links)
with open(__RECEPTOR_CONF, 'w') as file:
yaml.dump(receptor_config, file, default_flow_style=False)
# This needs to be outside of the lock because this function itself will acquire the lock.
receptor_ctl = get_receptor_ctl()
attempts = 10
for backoff in range(1, attempts + 1):
try:
receptor_ctl.simple_command("reload")
break
except ValueError:
logger.warning(f"Unable to reload Receptor configuration. {attempts-backoff} attempts left.")
time.sleep(backoff)
else:
raise RuntimeError("Receptor reload failed")
links = InstanceLink.objects.filter(source=this_inst, target__in=instances, link_state=InstanceLink.States.ADDING)
links.update(link_state=InstanceLink.States.ESTABLISHED)
@task(queue=get_local_queuename)
def remove_deprovisioned_node(hostname):
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
node_jobs = UnifiedJob.objects.filter(
execution_node=hostname,
status__in=(
'running',
'waiting',
),
)
while node_jobs.exists():
time.sleep(60)
# This will as a side effect also delete the InstanceLinks that are tied to it.
Instance.objects.filter(hostname=hostname).delete()
# Update the receptor configs for all of the control-plane.
write_receptor_config.apply_async(queue='tower_broadcast_all')

View File

@@ -9,17 +9,12 @@ logger = logging.getLogger('awx.main.tasks.signals')
__all__ = ['with_signal_handling', 'signal_callback'] __all__ = ['with_signal_handling', 'signal_callback']
class SignalExit(Exception):
pass
class SignalState: class SignalState:
def reset(self): def reset(self):
self.sigterm_flag = False self.sigterm_flag = False
self.is_active = False self.is_active = False
self.original_sigterm = None self.original_sigterm = None
self.original_sigint = None self.original_sigint = None
self.raise_exception = False
def __init__(self): def __init__(self):
self.reset() self.reset()
@@ -27,9 +22,6 @@ class SignalState:
def set_flag(self, *args): def set_flag(self, *args):
"""Method to pass into the python signal.signal method to receive signals""" """Method to pass into the python signal.signal method to receive signals"""
self.sigterm_flag = True self.sigterm_flag = True
if self.raise_exception:
self.raise_exception = False # so it is not raised a second time in error handling
raise SignalExit()
def connect_signals(self): def connect_signals(self):
self.original_sigterm = signal.getsignal(signal.SIGTERM) self.original_sigterm = signal.getsignal(signal.SIGTERM)

View File

@@ -61,7 +61,7 @@ from awx.main.utils.common import (
from awx.main.utils.external_logging import reconfigure_rsyslog from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.reload import stop_local_services from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock from awx.main.utils.pglock import advisory_lock
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main import analytics from awx.main import analytics
from awx.conf import settings_registry from awx.conf import settings_registry
@@ -81,10 +81,6 @@ Try upgrading OpenSSH or providing your private key in an different format. \
def dispatch_startup(): def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks') startup_logger = logging.getLogger('awx.main.tasks')
# TODO: Enable this on VM installs
if settings.IS_K8S:
write_receptor_config()
startup_logger.debug("Syncing Schedules") startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all(): for sch in Schedule.objects.all():
try: try:
@@ -126,7 +122,7 @@ def inform_cluster_of_shutdown():
reaper.reap_waiting(this_inst, grace_period=0) reaper.reap_waiting(this_inst, grace_period=0)
except Exception: except Exception:
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname)) logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname)) logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception: except Exception:
logger.exception('Encountered problem with normal shutdown signal.') logger.exception('Encountered problem with normal shutdown signal.')
@@ -353,13 +349,9 @@ def _cleanup_images_and_files(**kwargs):
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}') logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
# if we are the first instance alphabetically, then run cleanup on execution nodes # if we are the first instance alphabetically, then run cleanup on execution nodes
checker_instance = ( checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
Instance.objects.filter(node_type__in=['hybrid', 'control'], node_state=Instance.States.READY, enabled=True, capacity__gt=0)
.order_by('-hostname')
.first()
)
if checker_instance and this_inst.hostname == checker_instance.hostname: if checker_instance and this_inst.hostname == checker_instance.hostname:
for inst in Instance.objects.filter(node_type='execution', node_state=Instance.States.READY, enabled=True, capacity__gt=0): for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs) runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
if not runner_cleanup_kwargs: if not runner_cleanup_kwargs:
continue continue
@@ -413,12 +405,7 @@ def execution_node_health_check(node):
return return
if instance.node_type != 'execution': if instance.node_type != 'execution':
logger.warning(f'Execution node health check ran against {instance.node_type} node {instance.hostname}') raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
return
if instance.node_state not in (Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
logger.warning(f"Execution node health check ran against node {instance.hostname} in state {instance.node_state}")
return
data = worker_info(node) data = worker_info(node)
@@ -453,7 +440,6 @@ def inspect_execution_nodes(instance_list):
nowtime = now() nowtime = now()
workers = mesh_status['Advertisements'] workers = mesh_status['Advertisements']
for ad in workers: for ad in workers:
hostname = ad['NodeID'] hostname = ad['NodeID']
@@ -464,23 +450,25 @@ def inspect_execution_nodes(instance_list):
continue continue
# Control-plane nodes are dealt with via local_health_check instead. # Control-plane nodes are dealt with via local_health_check instead.
if instance.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID): if instance.node_type in ('control', 'hybrid'):
continue continue
was_lost = instance.is_lost(ref_time=nowtime)
last_seen = parse_date(ad['Time']) last_seen = parse_date(ad['Time'])
if instance.last_seen and instance.last_seen >= last_seen: if instance.last_seen and instance.last_seen >= last_seen:
continue continue
instance.last_seen = last_seen instance.last_seen = last_seen
instance.save(update_fields=['last_seen']) instance.save(update_fields=['last_seen'])
# Only execution nodes should be dealt with by execution_node_health_check # Only execution nodes should be dealt with by execution_node_health_check
if instance.node_type == Instance.Types.HOP: if instance.node_type == 'hop':
if instance.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED): if was_lost and (not instance.is_lost(ref_time=nowtime)):
logger.warning(f'Hop node {hostname}, has rejoined the receptor mesh') logger.warning(f'Hop node {hostname}, has rejoined the receptor mesh')
instance.save_health_data(errors='') instance.save_health_data(errors='')
continue continue
if instance.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED): if was_lost:
# if the instance *was* lost, but has appeared again, # if the instance *was* lost, but has appeared again,
# attempt to re-establish the initial capacity and version # attempt to re-establish the initial capacity and version
# check # check
@@ -499,7 +487,7 @@ def inspect_execution_nodes(instance_list):
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None): def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
logger.debug("Cluster node heartbeat task.") logger.debug("Cluster node heartbeat task.")
nowtime = now() nowtime = now()
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED))) instance_list = list(Instance.objects.all())
this_inst = None this_inst = None
lost_instances = [] lost_instances = []
@@ -561,11 +549,11 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
except Exception: except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname)) logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try: try:
if settings.AWX_AUTO_DEPROVISION_INSTANCES and other_inst.node_type == "control": if settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname deprovision_hostname = other_inst.hostname
other_inst.delete() # FIXME: what about associated inbound links? other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname)) logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
elif other_inst.node_state == Instance.States.READY: elif other_inst.capacity != 0 or (not other_inst.errors):
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive')) other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen)) logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))

View File

@@ -210,7 +210,7 @@ def mk_workflow_job_template(name, extra_vars='', spec=None, organization=None,
if extra_vars: if extra_vars:
extra_vars = json.dumps(extra_vars) extra_vars = json.dumps(extra_vars)
wfjt = WorkflowJobTemplate.objects.create(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service) wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service)
if spec: if spec:
wfjt.survey_spec = spec wfjt.survey_spec = spec

View File

@@ -19,7 +19,8 @@ EXPECTED_VALUES = {
'awx_hosts_total': 1.0, 'awx_hosts_total': 1.0,
'awx_schedules_total': 1.0, 'awx_schedules_total': 1.0,
'awx_sessions_total': 0.0, 'awx_sessions_total': 0.0,
'awx_status_total': 0.0, 'awx_sessions_total': 0.0,
'awx_sessions_total': 0.0,
'awx_running_jobs_total': 0.0, 'awx_running_jobs_total': 0.0,
'awx_instance_capacity': 100.0, 'awx_instance_capacity': 100.0,
'awx_instance_consumed_capacity': 0.0, 'awx_instance_consumed_capacity': 0.0,

View File

@@ -1,9 +1,14 @@
import pytest import pytest
from unittest import mock
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.models.activity_stream import ActivityStream from awx.main.models.activity_stream import ActivityStream
from awx.main.models.ha import Instance from awx.main.models.ha import Instance
import redis
# Django
from django.test.utils import override_settings from django.test.utils import override_settings
@@ -45,44 +50,33 @@ def test_enabled_sets_capacity(patch, admin_user):
def test_auditor_user_health_check(get, post, system_auditor): def test_auditor_user_health_check(get, post, system_auditor):
instance = Instance.objects.create(**INSTANCE_KWARGS) instance = Instance.objects.create(**INSTANCE_KWARGS)
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk}) url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
get(url=url, user=system_auditor, expect=200) r = get(url=url, user=system_auditor, expect=200)
assert r.data['cpu_capacity'] == instance.cpu_capacity
post(url=url, user=system_auditor, expect=403) post(url=url, user=system_auditor, expect=403)
@pytest.mark.django_db @pytest.mark.django_db
def test_health_check_throws_error(post, admin_user):
instance = Instance.objects.create(node_type='execution', **INSTANCE_KWARGS)
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
# we will simulate a receptor error, similar to this one
# https://github.com/ansible/receptor/blob/156e6e24a49fbf868734507f9943ac96208ed8f5/receptorctl/receptorctl/socket_interface.py#L204
# related to issue https://github.com/ansible/tower/issues/5315
with mock.patch('awx.main.tasks.receptor.run_until_complete', side_effect=RuntimeError('Remote error: foobar')):
post(url=url, user=admin_user, expect=200)
instance.refresh_from_db()
assert 'Remote error: foobar' in instance.errors
assert instance.capacity == 0
@pytest.mark.django_db
@mock.patch.object(redis.client.Redis, 'ping', lambda self: True)
def test_health_check_usage(get, post, admin_user): def test_health_check_usage(get, post, admin_user):
instance = Instance.objects.create(**INSTANCE_KWARGS) instance = Instance.objects.create(**INSTANCE_KWARGS)
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk}) url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
get(url=url, user=admin_user, expect=200) r = get(url=url, user=admin_user, expect=200)
assert r.data['cpu_capacity'] == instance.cpu_capacity
assert r.data['last_health_check'] is None
with override_settings(CLUSTER_HOST_ID=instance.hostname): # force direct call of cluster_node_health_check
r = post(url=url, user=admin_user, expect=200) r = post(url=url, user=admin_user, expect=200)
assert r.data['msg'] == f"Health check is running for {instance.hostname}." assert r.data['last_health_check'] is not None
def test_custom_hostname_regex(post, admin_user):
url = reverse('api:instance_list')
with override_settings(IS_K8S=True):
for value in [
("foo.bar.baz", 201),
("f.bar.bz", 201),
("foo.bar.b", 400),
("a.b.c", 400),
("localhost", 400),
("127.0.0.1", 400),
("192.168.56.101", 201),
("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 201),
("foobar", 201),
("--yoooo", 400),
("$3$@foobar@#($!@#*$", 400),
("999.999.999.999", 201),
("0000:0000:0000:0000:0000:0000:0000:0001", 400),
("whitespaces are bad for hostnames", 400),
("0:0:0:0:0:0:0:1", 400),
("192.localhost.domain.101", 201),
("F@$%(@#$H%^(I@#^HCTQEWRFG", 400),
]:
data = {
"hostname": value[0],
"node_type": "execution",
"node_state": "installed",
}
post(url=url, user=admin_user, data=data, expect=value[1])

View File

@@ -216,7 +216,7 @@ def test_instance_attach_to_instance_group(post, instance_group, node_type_insta
count = ActivityStream.objects.count() count = ActivityStream.objects.count()
url = reverse('api:instance_group_instance_list', kwargs={'pk': instance_group.pk}) url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
post(url, {'associate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400) post(url, {'associate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
new_activity = ActivityStream.objects.all()[count:] new_activity = ActivityStream.objects.all()[count:]
@@ -240,7 +240,7 @@ def test_instance_unattach_from_instance_group(post, instance_group, node_type_i
count = ActivityStream.objects.count() count = ActivityStream.objects.count()
url = reverse('api:instance_group_instance_list', kwargs={'pk': instance_group.pk}) url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
post(url, {'disassociate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400) post(url, {'disassociate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
new_activity = ActivityStream.objects.all()[count:] new_activity = ActivityStream.objects.all()[count:]
@@ -263,7 +263,7 @@ def test_instance_group_attach_to_instance(post, instance_group, node_type_insta
count = ActivityStream.objects.count() count = ActivityStream.objects.count()
url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk}) url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
post(url, {'associate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400) post(url, {'associate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
new_activity = ActivityStream.objects.all()[count:] new_activity = ActivityStream.objects.all()[count:]
@@ -287,7 +287,7 @@ def test_instance_group_unattach_from_instance(post, instance_group, node_type_i
count = ActivityStream.objects.count() count = ActivityStream.objects.count()
url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk}) url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
post(url, {'disassociate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400) post(url, {'disassociate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
new_activity = ActivityStream.objects.all()[count:] new_activity = ActivityStream.objects.all()[count:]
@@ -314,4 +314,4 @@ def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance
url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk}) url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk})
r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400) r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400)
assert 'Cannot disassociate hybrid instance' in str(r.data) assert f'Cannot disassociate hybrid instance' in str(r.data)

View File

@@ -13,11 +13,17 @@ from django.utils import timezone
# AWX # AWX
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
from awx.main.models import JobTemplate, User, Job, AdHocCommand, ProjectUpdate, InstanceGroup, Label, Organization from awx.main.models import (
JobTemplate,
User,
Job,
AdHocCommand,
ProjectUpdate,
)
@pytest.mark.django_db @pytest.mark.django_db
def test_job_relaunch_permission_denied_response(post, get, inventory, project, net_credential, machine_credential): def test_job_relaunch_permission_denied_response(post, get, inventory, project, credential, net_credential, machine_credential):
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True) jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
jt.credentials.add(machine_credential) jt.credentials.add(machine_credential)
jt_user = User.objects.create(username='jobtemplateuser') jt_user = User.objects.create(username='jobtemplateuser')
@@ -33,22 +39,6 @@ def test_job_relaunch_permission_denied_response(post, get, inventory, project,
job.launch_config.credentials.add(net_credential) job.launch_config.credentials.add(net_credential)
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403) r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail'] assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.credentials.clear()
# Job has prompted instance group that user cannot see
job.launch_config.instance_groups.add(InstanceGroup.objects.create())
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.instance_groups.clear()
# Job has prompted label that user cannot see
job.launch_config.labels.add(Label.objects.create(organization=Organization.objects.create()))
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.labels.clear()
# without any of those prompts, user can launch
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=201)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -4,7 +4,8 @@ import yaml
import json import json
from awx.api.serializers import JobLaunchSerializer from awx.api.serializers import JobLaunchSerializer
from awx.main.models import Credential, Inventory, Host, ExecutionEnvironment, Label, InstanceGroup from awx.main.models.credential import Credential
from awx.main.models.inventory import Inventory, Host
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
from awx.api.versioning import reverse from awx.api.versioning import reverse
@@ -14,11 +15,6 @@ from awx.api.versioning import reverse
def runtime_data(organization, credentialtype_ssh): def runtime_data(organization, credentialtype_ssh):
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'}) cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
inv_obj = organization.inventories.create(name="runtime-inv") inv_obj = organization.inventories.create(name="runtime-inv")
inv_obj.hosts.create(name='foo1')
inv_obj.hosts.create(name='foo2')
ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
labels_obj = Label.objects.create(name='foo', description='bar', organization=organization)
return dict( return dict(
extra_vars='{"job_launch_var": 4}', extra_vars='{"job_launch_var": 4}',
limit='test-servers', limit='test-servers',
@@ -29,12 +25,6 @@ def runtime_data(organization, credentialtype_ssh):
credentials=[cred_obj.pk], credentials=[cred_obj.pk],
diff_mode=True, diff_mode=True,
verbosity=2, verbosity=2,
execution_environment=ee_obj.pk,
labels=[labels_obj.pk],
forks=7,
job_slice_count=2,
timeout=10,
instance_groups=[ig_obj.pk],
) )
@@ -64,12 +54,6 @@ def job_template_prompts(project, inventory, machine_credential):
ask_credential_on_launch=on_off, ask_credential_on_launch=on_off,
ask_diff_mode_on_launch=on_off, ask_diff_mode_on_launch=on_off,
ask_verbosity_on_launch=on_off, ask_verbosity_on_launch=on_off,
ask_execution_environment_on_launch=on_off,
ask_labels_on_launch=on_off,
ask_forks_on_launch=on_off,
ask_job_slice_count_on_launch=on_off,
ask_timeout_on_launch=on_off,
ask_instance_groups_on_launch=on_off,
) )
jt.credentials.add(machine_credential) jt.credentials.add(machine_credential)
return jt return jt
@@ -93,12 +77,6 @@ def job_template_prompts_null(project):
ask_credential_on_launch=True, ask_credential_on_launch=True,
ask_diff_mode_on_launch=True, ask_diff_mode_on_launch=True,
ask_verbosity_on_launch=True, ask_verbosity_on_launch=True,
ask_execution_environment_on_launch=True,
ask_labels_on_launch=True,
ask_forks_on_launch=True,
ask_job_slice_count_on_launch=True,
ask_timeout_on_launch=True,
ask_instance_groups_on_launch=True,
) )
@@ -114,12 +92,6 @@ def data_to_internal(data):
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
if 'inventory' in data: if 'inventory' in data:
internal['inventory'] = Inventory.objects.get(pk=data['inventory']) internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
if 'execution_environment' in data:
internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment'])
if 'labels' in data:
internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']]
if 'instance_groups' in data:
internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']]
return internal return internal
@@ -152,12 +124,6 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad
assert 'credentials' in response.data['ignored_fields'] assert 'credentials' in response.data['ignored_fields']
assert 'job_tags' in response.data['ignored_fields'] assert 'job_tags' in response.data['ignored_fields']
assert 'skip_tags' in response.data['ignored_fields'] assert 'skip_tags' in response.data['ignored_fields']
assert 'execution_environment' in response.data['ignored_fields']
assert 'labels' in response.data['ignored_fields']
assert 'forks' in response.data['ignored_fields']
assert 'job_slice_count' in response.data['ignored_fields']
assert 'timeout' in response.data['ignored_fields']
assert 'instance_groups' in response.data['ignored_fields']
@pytest.mark.django_db @pytest.mark.django_db
@@ -196,34 +162,6 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
mock_job.signal_start.assert_called_once() mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'timeout': '', 'job_slice_count': '', 'forks': ''}, admin_user, expect=400
)
assert 'forks' in response.data and response.data['forks'][0] == 'A valid integer is required.'
assert 'job_slice_count' in response.data and response.data['job_slice_count'][0] == 'A valid integer is required.'
assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.'
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_slice_count_not_supported(job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
assert job_template.inventory.hosts.count() == 0
job_template.inventory.hosts.create(name='foo')
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_slice_count': 8}, admin_user, expect=400)
assert response.data['job_slice_count'][0] == 'Job inventory does not have enough hosts for slicing'
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.job_runtime_vars @pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker): def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
@@ -238,10 +176,6 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null,
inventory = Inventory.objects.get(pk=runtime_data['inventory']) inventory = Inventory.objects.get(pk=runtime_data['inventory'])
inventory.use_role.members.add(rando) inventory.use_role.members.add(rando)
# Instance Groups and label can not currently easily be used by rando so we need to remove the instance groups from the runtime data
runtime_data.pop('instance_groups')
runtime_data.pop('labels')
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
@@ -309,59 +243,12 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.job_runtime_vars @pytest.mark.job_runtime_vars
def test_job_launch_works_without_access_to_ig_if_ig_in_template(job_template_prompts, runtime_data, post, rando, mocker): def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando):
job_template = job_template_prompts(True)
job_template.instance_groups.add(InstanceGroup.objects.get(id=runtime_data['instance_groups'][0]))
job_template.instance_groups.add(InstanceGroup.objects.create(name='foo'))
job_template.save()
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(instance_groups=runtime_data['instance_groups']), rando, expect=201)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_works_without_access_to_label_if_label_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
job_template = job_template_prompts(True)
job_template.labels.add(Label.objects.get(id=runtime_data['labels'][0]))
job_template.labels.add(Label.objects.create(name='baz', description='faz', organization=organization))
job_template.save()
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(labels=runtime_data['labels']), rando, expect=201)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_works_without_access_to_ee_if_ee_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override that is already in the template
post(
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(execution_environment=runtime_data['execution_environment']), rando, expect=201
)
@pytest.mark.parametrize(
'item_type',
[
('credentials'),
('labels'),
('instance_groups'),
],
)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_fails_without_access(job_template_prompts, runtime_data, post, rando, item_type):
job_template = job_template_prompts(True) job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando) job_template.execute_role.members.add(rando)
# Assure that giving a credential without access blocks the launch # Assure that giving a credential without access blocks the launch
data = {item_type: runtime_data[item_type]} post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(credentials=runtime_data['credentials']), rando, expect=403)
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), data, rando, expect=403)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -105,30 +105,6 @@ def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, su
assert decrypt_value(get_encryption_key('value', pk=None), schedule.extra_data['var1']) == 'bar' assert decrypt_value(get_encryption_key('value', pk=None), schedule.extra_data['var1']) == 'bar'
@pytest.mark.django_db
def test_survey_password_default(post, patch, admin_user, project, inventory, survey_spec_factory):
job_template = JobTemplate.objects.create(
name='test-jt',
project=project,
playbook='helloworld.yml',
inventory=inventory,
ask_variables_on_launch=False,
survey_enabled=True,
survey_spec=survey_spec_factory([{'variable': 'var1', 'question_name': 'Q1', 'type': 'password', 'required': True, 'default': 'foobar'}]),
)
# test removal of $encrypted$
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
r = post(url, {'name': 'test sch', 'rrule': RRULE_EXAMPLE, 'extra_data': '{"var1": "$encrypted$"}'}, admin_user, expect=201)
schedule = Schedule.objects.get(pk=r.data['id'])
assert schedule.extra_data == {}
assert schedule.enabled is True
# test an unrelated change
patch(schedule.get_absolute_url(), data={'enabled': False}, user=admin_user, expect=200)
patch(schedule.get_absolute_url(), data={'enabled': True}, user=admin_user, expect=200)
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize( @pytest.mark.parametrize(
'rrule, error', 'rrule, error',
@@ -147,19 +123,19 @@ def test_survey_password_default(post, patch, admin_user, project, inventory, su
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa ("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa ("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa
# Individual rule test with multiple rules # Individual rule test with multiple rules
# Bad Rule: RRULE:NONSENSE ## Bad Rule: RRULE:NONSENSE
("DTSTART:20300308T050000Z RRULE:NONSENSE RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU", "INTERVAL required in rrule"), ("DTSTART:20300308T050000Z RRULE:NONSENSE RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU", "INTERVAL required in rrule"),
# Bad Rule: RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO ## Bad Rule: RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO
( (
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO", "DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO",
"BYDAY with numeric prefix not supported", "BYDAY with numeric prefix not supported",
), # noqa ), # noqa
# Bad Rule: RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z ## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z
( (
"DTSTART:20030925T104941Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "DTSTART:20030925T104941Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z",
"RRULE may not contain both COUNT and UNTIL", "RRULE may not contain both COUNT and UNTIL",
), # noqa ), # noqa
# Bad Rule: RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000 ## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000
( (
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000",
"COUNT > 999 is unsupported", "COUNT > 999 is unsupported",

View File

@@ -77,18 +77,6 @@ class TestApprovalNodes:
assert approval_node.unified_job_template.description == 'Approval Node' assert approval_node.unified_job_template.description == 'Approval Node'
assert approval_node.unified_job_template.timeout == 0 assert approval_node.unified_job_template.timeout == 0
def test_approval_node_creation_with_timeout(self, post, approval_node, admin_user):
assert approval_node.timeout is None
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})
post(url, {'name': 'Test', 'description': 'Approval Node', 'timeout': 10}, user=admin_user, expect=201)
approval_node = WorkflowJobTemplateNode.objects.get(pk=approval_node.pk)
approval_node.refresh_from_db()
assert approval_node.timeout is None
assert isinstance(approval_node.unified_job_template, WorkflowApprovalTemplate)
assert approval_node.unified_job_template.timeout == 10
def test_approval_node_creation_failure(self, post, approval_node, admin_user): def test_approval_node_creation_failure(self, post, approval_node, admin_user):
# This test leaves off a required param to assert that user will get a 400. # This test leaves off a required param to assert that user will get a 400.
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'}) url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})

View File

@@ -706,7 +706,7 @@ def jt_linked(organization, project, inventory, machine_credential, credential,
@pytest.fixture @pytest.fixture
def workflow_job_template(organization): def workflow_job_template(organization):
wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization) wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization)
wjt.save() wjt.save()
return wjt return wjt

View File

@@ -64,26 +64,3 @@ class TestSlicingModels:
inventory2 = Inventory.objects.create(organization=organization, name='fooinv') inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)] [inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)]
assert job_template.get_effective_slice_ct({'inventory': inventory2}) assert job_template.get_effective_slice_ct({'inventory': inventory2})
def test_effective_slice_count_prompt(self, job_template, inventory, organization):
job_template.inventory = inventory
# Add our prompt fields to the JT to allow overrides
job_template.ask_job_slice_count_on_launch = True
job_template.ask_inventory_on_launch = True
# Set a default value of the slice count to something low
job_template.job_slice_count = 2
# Create an inventory with 4 nodes
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(4)]
# The inventory slice count will be the min of the number of nodes (4) or the job slice (2)
assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2
# Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back
assert job_template.get_effective_slice_ct({'inventory': inventory2, 'job_slice_count': 3}) == 3
def test_slice_count_prompt_limited_by_inventory(self, job_template, inventory, organization):
assert inventory.hosts.count() == 0
job_template.inventory = inventory
inventory.hosts.create(name='foo')
unified_job = job_template.create_unified_job(job_slice_count=2)
assert isinstance(unified_job, Job)

View File

@@ -1,8 +1,7 @@
import pytest import pytest
# AWX # AWX
from awx.main.models.jobs import JobTemplate, LaunchTimeConfigBase from awx.main.models import JobTemplate, JobLaunchConfig
from awx.main.models.execution_environments import ExecutionEnvironment
@pytest.fixture @pytest.fixture
@@ -12,6 +11,18 @@ def full_jt(inventory, project, machine_credential):
return jt return jt
@pytest.fixture
def config_factory(full_jt):
def return_config(data):
job = full_jt.create_unified_job(**data)
try:
return job.launch_config
except JobLaunchConfig.DoesNotExist:
return None
return return_config
@pytest.mark.django_db @pytest.mark.django_db
class TestConfigCreation: class TestConfigCreation:
""" """
@@ -29,73 +40,28 @@ class TestConfigCreation:
assert config.limit == 'foobar' assert config.limit == 'foobar'
assert config.char_prompts == {'limit': 'foobar'} assert config.char_prompts == {'limit': 'foobar'}
def test_added_related(self, full_jt, credential, default_instance_group, label): def test_added_credential(self, full_jt, credential):
job = full_jt.create_unified_job(credentials=[credential], instance_groups=[default_instance_group], labels=[label]) job = full_jt.create_unified_job(credentials=[credential])
config = job.launch_config config = job.launch_config
assert set(config.credentials.all()) == set([credential]) assert set(config.credentials.all()) == set([credential])
assert set(config.labels.all()) == set([label])
assert set(config.instance_groups.all()) == set([default_instance_group])
def test_survey_passwords_ignored(self, inventory_source): def test_survey_passwords_ignored(self, inventory_source):
iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'}) iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'})
assert iu.launch_config.prompts_dict() == {} assert iu.launch_config.prompts_dict() == {}
@pytest.fixture
def full_prompts_dict(inventory, credential, label, default_instance_group):
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
r = {
'limit': 'foobar',
'inventory': inventory,
'credentials': [credential],
'execution_environment': ee,
'labels': [label],
'instance_groups': [default_instance_group],
'verbosity': 3,
'scm_branch': 'non_dev',
'diff_mode': True,
'skip_tags': 'foobar',
'job_tags': 'untagged',
'forks': 26,
'job_slice_count': 2,
'timeout': 200,
'extra_vars': {'prompted_key': 'prompted_val'},
'job_type': 'check',
}
assert set(JobTemplate.get_ask_mapping().keys()) - set(r.keys()) == set() # make fixture comprehensive
return r
@pytest.mark.django_db @pytest.mark.django_db
def test_config_reversibility(full_jt, full_prompts_dict): class TestConfigReversibility:
""" """
Checks that a blob of saved prompts will be re-created in the Checks that a blob of saved prompts will be re-created in the
prompts_dict for launching new jobs prompts_dict for launching new jobs
""" """
config = full_jt.create_unified_job(**full_prompts_dict).launch_config
assert config.prompts_dict() == full_prompts_dict
def test_char_field_only(self, config_factory):
config = config_factory({'limit': 'foobar'})
assert config.prompts_dict() == {'limit': 'foobar'}
@pytest.mark.django_db def test_related_objects(self, config_factory, inventory, credential):
class TestLaunchConfigModels: prompts = {'limit': 'foobar', 'inventory': inventory, 'credentials': set([credential])}
def get_concrete_subclasses(self, cls): config = config_factory(prompts)
r = [] assert config.prompts_dict() == prompts
for c in cls.__subclasses__():
if c._meta.abstract:
r.extend(self.get_concrete_subclasses(c))
else:
r.append(c)
return r
def test_non_job_config_complete(self):
"""This performs model validation which replaces code that used run on import."""
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
assert not hasattr(LaunchTimeConfigBase, field_name)
else:
assert hasattr(LaunchTimeConfigBase, field_name)
def test_subclass_fields_complete(self):
for cls in self.get_concrete_subclasses(LaunchTimeConfigBase):
for field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
assert hasattr(cls, field_name)

View File

@@ -12,9 +12,6 @@ from awx.main.models.workflow import (
) )
from awx.main.models.jobs import JobTemplate, Job from awx.main.models.jobs import JobTemplate, Job
from awx.main.models.projects import ProjectUpdate from awx.main.models.projects import ProjectUpdate
from awx.main.models.credential import Credential, CredentialType
from awx.main.models.label import Label
from awx.main.models.ha import InstanceGroup
from awx.main.scheduler.dag_workflow import WorkflowDAG from awx.main.scheduler.dag_workflow import WorkflowDAG
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList
@@ -232,65 +229,6 @@ class TestWorkflowJob:
assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43} assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43}
assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43} assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43}
def test_combine_prompts_WFJT_to_node(self, project, inventory, organization):
"""
Test that complex prompts like variables, credentials, labels, etc
are properly combined from the workflow-level with the node-level
"""
jt = JobTemplate.objects.create(
project=project,
inventory=inventory,
ask_variables_on_launch=True,
ask_credential_on_launch=True,
ask_instance_groups_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
)
wj = WorkflowJob.objects.create(name='test-wf-job', extra_vars='{}')
common_ig = InstanceGroup.objects.create(name='common')
common_ct = CredentialType.objects.create(name='common')
node = WorkflowJobNode.objects.create(workflow_job=wj, unified_job_template=jt, extra_vars={'node_key': 'node_val'})
node.limit = 'node_limit'
node.save()
node_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='node'))
node_cred_conflicting = Credential.objects.create(credential_type=common_ct)
node.credentials.add(node_cred_unique, node_cred_conflicting)
node_labels = [Label.objects.create(name='node1', organization=organization), Label.objects.create(name='node2', organization=organization)]
node.labels.add(*node_labels)
node_igs = [common_ig, InstanceGroup.objects.create(name='node')]
for ig in node_igs:
node.instance_groups.add(ig)
# assertions for where node has prompts but workflow job does not
data = node.get_job_kwargs()
assert data['extra_vars'] == {'node_key': 'node_val'}
assert set(data['credentials']) == set([node_cred_conflicting, node_cred_unique])
assert data['instance_groups'] == node_igs
assert set(data['labels']) == set(node_labels)
assert data['limit'] == 'node_limit'
# add prompts to the WorkflowJob
wj.limit = 'wj_limit'
wj.extra_vars = {'wj_key': 'wj_val'}
wj.save()
wj_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='wj'))
wj_cred_conflicting = Credential.objects.create(credential_type=common_ct)
wj.credentials.add(wj_cred_unique, wj_cred_conflicting)
wj.labels.add(Label.objects.create(name='wj1', organization=organization), Label.objects.create(name='wj2', organization=organization))
wj_igs = [InstanceGroup.objects.create(name='wj'), common_ig]
for ig in wj_igs:
wj.instance_groups.add(ig)
# assertions for behavior where node and workflow jobs have prompts
data = node.get_job_kwargs()
assert data['extra_vars'] == {'node_key': 'node_val', 'wj_key': 'wj_val'}
assert set(data['credentials']) == set([wj_cred_unique, wj_cred_conflicting, node_cred_unique])
assert data['instance_groups'] == wj_igs
assert set(data['labels']) == set(node_labels) # as exception, WFJT labels not applied
assert data['limit'] == 'wj_limit'
@pytest.mark.django_db @pytest.mark.django_db
class TestWorkflowJobTemplate: class TestWorkflowJobTemplate:
@@ -349,25 +287,12 @@ class TestWorkflowJobTemplatePrompts:
@pytest.fixture @pytest.fixture
def wfjt_prompts(self): def wfjt_prompts(self):
return WorkflowJobTemplate.objects.create( return WorkflowJobTemplate.objects.create(
ask_variables_on_launch=True, ask_inventory_on_launch=True, ask_variables_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True
ask_inventory_on_launch=True,
ask_tags_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
ask_skip_tags_on_launch=True,
) )
@pytest.fixture @pytest.fixture
def prompts_data(self, inventory): def prompts_data(self, inventory):
return dict( return dict(inventory=inventory, extra_vars={'foo': 'bar'}, limit='webservers', scm_branch='release-3.3')
inventory=inventory,
extra_vars={'foo': 'bar'},
limit='webservers',
scm_branch='release-3.3',
job_tags='foo',
skip_tags='bar',
)
def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory): def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory):
# null or empty fields used # null or empty fields used
@@ -375,9 +300,6 @@ class TestWorkflowJobTemplatePrompts:
assert workflow_job.limit is None assert workflow_job.limit is None
assert workflow_job.inventory is None assert workflow_job.inventory is None
assert workflow_job.scm_branch is None assert workflow_job.scm_branch is None
assert workflow_job.job_tags is None
assert workflow_job.skip_tags is None
assert len(workflow_job.labels.all()) is 0
# fields from prompts used # fields from prompts used
workflow_job = workflow_job_template.create_unified_job(**prompts_data) workflow_job = workflow_job_template.create_unified_job(**prompts_data)
@@ -385,21 +307,15 @@ class TestWorkflowJobTemplatePrompts:
assert workflow_job.limit == 'webservers' assert workflow_job.limit == 'webservers'
assert workflow_job.inventory == inventory assert workflow_job.inventory == inventory
assert workflow_job.scm_branch == 'release-3.3' assert workflow_job.scm_branch == 'release-3.3'
assert workflow_job.job_tags == 'foo'
assert workflow_job.skip_tags == 'bar'
# non-null fields from WFJT used # non-null fields from WFJT used
workflow_job_template.inventory = inventory workflow_job_template.inventory = inventory
workflow_job_template.limit = 'fooo' workflow_job_template.limit = 'fooo'
workflow_job_template.scm_branch = 'bar' workflow_job_template.scm_branch = 'bar'
workflow_job_template.job_tags = 'baz'
workflow_job_template.skip_tags = 'dinosaur'
workflow_job = workflow_job_template.create_unified_job() workflow_job = workflow_job_template.create_unified_job()
assert workflow_job.limit == 'fooo' assert workflow_job.limit == 'fooo'
assert workflow_job.inventory == inventory assert workflow_job.inventory == inventory
assert workflow_job.scm_branch == 'bar' assert workflow_job.scm_branch == 'bar'
assert workflow_job.job_tags == 'baz'
assert workflow_job.skip_tags == 'dinosaur'
@pytest.mark.django_db @pytest.mark.django_db
def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data): def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data):
@@ -424,19 +340,12 @@ class TestWorkflowJobTemplatePrompts:
ask_limit_on_launch=True, ask_limit_on_launch=True,
scm_branch='bar', scm_branch='bar',
ask_scm_branch_on_launch=True, ask_scm_branch_on_launch=True,
job_tags='foo',
skip_tags='bar',
), ),
user=org_admin, user=org_admin,
expect=201, expect=201,
) )
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.char_prompts == { assert wfjt.char_prompts == {'limit': 'foooo', 'scm_branch': 'bar'}
'limit': 'foooo',
'scm_branch': 'bar',
'job_tags': 'foo',
'skip_tags': 'bar',
}
assert wfjt.ask_scm_branch_on_launch is True assert wfjt.ask_scm_branch_on_launch is True
assert wfjt.ask_limit_on_launch is True assert wfjt.ask_limit_on_launch is True
@@ -446,67 +355,6 @@ class TestWorkflowJobTemplatePrompts:
assert r.data['limit'] == 'prompt_limit' assert r.data['limit'] == 'prompt_limit'
assert r.data['scm_branch'] == 'prompt_branch' assert r.data['scm_branch'] == 'prompt_branch'
@pytest.mark.django_db
def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin):
'''
Tests default behaviour and values of ask_for_* fields on WFJT via POST
'''
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
),
user=org_admin,
expect=201,
)
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.ask_inventory_on_launch is False
assert wfjt.ask_labels_on_launch is False
assert wfjt.ask_limit_on_launch is False
assert wfjt.ask_scm_branch_on_launch is False
assert wfjt.ask_skip_tags_on_launch is False
assert wfjt.ask_tags_on_launch is False
assert wfjt.ask_variables_on_launch is False
@pytest.mark.django_db
def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
'''
Tests behaviour and values of ask_for_* fields on WFJT via POST
'''
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
ask_inventory_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
ask_skip_tags_on_launch=True,
ask_tags_on_launch=True,
ask_variables_on_launch=True,
),
user=org_admin,
expect=201,
)
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.ask_inventory_on_launch is True
assert wfjt.ask_labels_on_launch is True
assert wfjt.ask_limit_on_launch is True
assert wfjt.ask_scm_branch_on_launch is True
assert wfjt.ask_skip_tags_on_launch is True
assert wfjt.ask_tags_on_launch is True
assert wfjt.ask_variables_on_launch is True
@pytest.mark.django_db @pytest.mark.django_db
def test_workflow_ancestors(organization): def test_workflow_ancestors(organization):

View File

@@ -6,20 +6,12 @@ from awx.main.utils import decrypt_field
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate
from awx.main.models.jobs import JobTemplate from awx.main.models.jobs import JobTemplate
from awx.main.tasks.system import deep_copy_model_obj from awx.main.tasks.system import deep_copy_model_obj
from awx.main.models import Label, ExecutionEnvironment, InstanceGroup
@pytest.mark.django_db @pytest.mark.django_db
def test_job_template_copy( def test_job_template_copy(post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin):
post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin, organization
):
label = Label.objects.create(name="foobar", organization=organization)
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
job_template_with_survey_passwords.project = project job_template_with_survey_passwords.project = project
job_template_with_survey_passwords.inventory = inventory job_template_with_survey_passwords.inventory = inventory
job_template_with_survey_passwords.labels.add(label)
job_template_with_survey_passwords.instance_groups.add(ig)
job_template_with_survey_passwords.prevent_instance_group_fallback = True
job_template_with_survey_passwords.save() job_template_with_survey_passwords.save()
job_template_with_survey_passwords.credentials.add(credential) job_template_with_survey_passwords.credentials.add(credential)
job_template_with_survey_passwords.credentials.add(machine_credential) job_template_with_survey_passwords.credentials.add(machine_credential)
@@ -62,11 +54,6 @@ def test_job_template_copy(
assert vault_credential in jt_copy.credentials.all() assert vault_credential in jt_copy.credentials.all()
assert machine_credential in jt_copy.credentials.all() assert machine_credential in jt_copy.credentials.all()
assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec
assert jt_copy.labels.count() != 0
assert jt_copy.labels.get(pk=label.pk) == label
assert jt_copy.instance_groups.count() != 0
assert jt_copy.instance_groups.get(pk=ig.pk) == ig
assert jt_copy.prevent_instance_group_fallback == True
@pytest.mark.django_db @pytest.mark.django_db
@@ -97,8 +84,6 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
host = group_1_1.hosts.create(name='host', inventory=inventory) host = group_1_1.hosts.create(name='host', inventory=inventory)
group_2_1.hosts.add(host) group_2_1.hosts.add(host)
inventory.admin_role.members.add(alice) inventory.admin_role.members.add(alice)
inventory.prevent_instance_group_fallback = True
inventory.save()
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is False assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is False
inventory.organization.admin_role.members.add(alice) inventory.organization.admin_role.members.add(alice)
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is True assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is True
@@ -114,7 +99,6 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
assert inventory_copy.organization == organization assert inventory_copy.organization == organization
assert inventory_copy.created_by == alice assert inventory_copy.created_by == alice
assert inventory_copy.name == 'new inv name' assert inventory_copy.name == 'new inv name'
assert inventory_copy.prevent_instance_group_fallback == True
assert set(group_1_1_copy.parents.all()) == set() assert set(group_1_1_copy.parents.all()) == set()
assert set(group_2_1_copy.parents.all()) == set([group_1_1_copy]) assert set(group_2_1_copy.parents.all()) == set([group_1_1_copy])
assert set(group_2_2_copy.parents.all()) == set([group_1_1_copy, group_2_1_copy]) assert set(group_2_2_copy.parents.all()) == set([group_1_1_copy, group_2_1_copy])
@@ -125,22 +109,8 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
@pytest.mark.django_db @pytest.mark.django_db
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
'''
Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs
'''
workflow_job_template.organization = organization workflow_job_template.organization = organization
label = Label.objects.create(name="foobar", organization=organization)
workflow_job_template.labels.add(label)
ee = ExecutionEnvironment.objects.create(name="barfoo", organization=organization)
workflow_job_template.execution_environment = ee
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
workflow_job_template.instance_groups.add(ig)
workflow_job_template.save() workflow_job_template.save()
jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)] jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)]
nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)] nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)]
nodes[0].success_nodes.add(nodes[1]) nodes[0].success_nodes.add(nodes[1])
@@ -154,16 +124,9 @@ def test_workflow_job_template_copy(workflow_job_template, post, get, admin, org
wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id)
args, kwargs = deep_copy_mock.call_args args, kwargs = deep_copy_mock.call_args
deep_copy_model_obj(*args, **kwargs) deep_copy_model_obj(*args, **kwargs)
assert wfjt_copy.organization == organization assert wfjt_copy.organization == organization
assert wfjt_copy.created_by == admin assert wfjt_copy.created_by == admin
assert wfjt_copy.name == 'new wfjt name' assert wfjt_copy.name == 'new wfjt name'
assert wfjt_copy.labels.count() != 0
assert wfjt_copy.labels.get(pk=label.pk) == label
assert wfjt_copy.execution_environment == ee
assert wfjt_copy.instance_groups.count() != 0
assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig
copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()] copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()]
copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1])) copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1]))
for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]): for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]):

View File

@@ -74,8 +74,7 @@ GLqbpJyX2r3p/Rmo6mLY71SqpA==
@pytest.mark.django_db @pytest.mark.django_db
def test_default_cred_types(): def test_default_cred_types():
assert sorted(CredentialType.defaults.keys()) == sorted( assert sorted(CredentialType.defaults.keys()) == [
[
'aim', 'aim',
'aws', 'aws',
'azure_kv', 'azure_kv',
@@ -87,7 +86,6 @@ def test_default_cred_types():
'gce', 'gce',
'github_token', 'github_token',
'gitlab_token', 'gitlab_token',
'gpg_public_key',
'hashivault_kv', 'hashivault_kv',
'hashivault_ssh', 'hashivault_ssh',
'insights', 'insights',
@@ -104,7 +102,6 @@ def test_default_cred_types():
'vault', 'vault',
'vmware', 'vmware',
] ]
)
for type_ in CredentialType.defaults.values(): for type_ in CredentialType.defaults.values():
assert type_().managed is True assert type_().managed is True

View File

@@ -244,7 +244,7 @@ class TestAutoScaling:
assert not self.pool.should_grow assert not self.pool.should_grow
alive_pid = self.pool.workers[1].pid alive_pid = self.pool.workers[1].pid
self.pool.workers[0].process.terminate() self.pool.workers[0].process.terminate()
time.sleep(2) # wait a moment for sigterm time.sleep(1) # wait a moment for sigterm
# clean up and the dead worker # clean up and the dead worker
self.pool.cleanup() self.pool.cleanup()

View File

@@ -391,8 +391,6 @@ class TestInstanceGroupOrdering:
assert ad_hoc.preferred_instance_groups == [ig_org] assert ad_hoc.preferred_instance_groups == [ig_org]
inventory.instance_groups.add(ig_inv) inventory.instance_groups.add(ig_inv)
assert ad_hoc.preferred_instance_groups == [ig_inv, ig_org] assert ad_hoc.preferred_instance_groups == [ig_inv, ig_org]
inventory.prevent_instance_group_fallback = True
assert ad_hoc.preferred_instance_groups == [ig_inv]
def test_inventory_update_instance_groups(self, instance_group_factory, inventory_source, default_instance_group): def test_inventory_update_instance_groups(self, instance_group_factory, inventory_source, default_instance_group):
iu = InventoryUpdate.objects.create(inventory_source=inventory_source, source=inventory_source.source) iu = InventoryUpdate.objects.create(inventory_source=inventory_source, source=inventory_source.source)
@@ -406,8 +404,6 @@ class TestInstanceGroupOrdering:
inventory_source.instance_groups.add(ig_tmp) inventory_source.instance_groups.add(ig_tmp)
# API does not allow setting IGs on inventory source, so ignore those # API does not allow setting IGs on inventory source, so ignore those
assert iu.preferred_instance_groups == [ig_inv, ig_org] assert iu.preferred_instance_groups == [ig_inv, ig_org]
inventory_source.inventory.prevent_instance_group_fallback = True
assert iu.preferred_instance_groups == [ig_inv]
def test_job_instance_groups(self, instance_group_factory, inventory, project, default_instance_group): def test_job_instance_groups(self, instance_group_factory, inventory, project, default_instance_group):
jt = JobTemplate.objects.create(inventory=inventory, project=project) jt = JobTemplate.objects.create(inventory=inventory, project=project)
@@ -421,31 +417,3 @@ class TestInstanceGroupOrdering:
assert job.preferred_instance_groups == [ig_inv, ig_org] assert job.preferred_instance_groups == [ig_inv, ig_org]
job.job_template.instance_groups.add(ig_tmp) job.job_template.instance_groups.add(ig_tmp)
assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org] assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org]
def test_job_instance_groups_cache_default(self, instance_group_factory, inventory, project, default_instance_group):
jt = JobTemplate.objects.create(inventory=inventory, project=project)
job = jt.create_unified_job()
print(job.preferred_instance_groups_cache)
print(default_instance_group)
assert job.preferred_instance_groups_cache == [default_instance_group.id]
def test_job_instance_groups_cache_default_additional_items(self, instance_group_factory, inventory, project, default_instance_group):
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
project.organization.instance_groups.add(ig_org)
inventory.instance_groups.add(ig_inv)
jt = JobTemplate.objects.create(inventory=inventory, project=project)
jt.instance_groups.add(ig_tmp)
job = jt.create_unified_job()
assert job.preferred_instance_groups_cache == [ig_tmp.id, ig_inv.id, ig_org.id]
def test_job_instance_groups_cache_prompt(self, instance_group_factory, inventory, project, default_instance_group):
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
project.organization.instance_groups.add(ig_org)
inventory.instance_groups.add(ig_inv)
jt = JobTemplate.objects.create(inventory=inventory, project=project)
job = jt.create_unified_job(instance_groups=[ig_tmp])
assert job.preferred_instance_groups_cache == [ig_tmp.id]

View File

@@ -3,20 +3,7 @@ import pytest
from unittest import mock from unittest import mock
import json import json
from awx.main.models import ( from awx.main.models import Job, Instance, JobHostSummary, InventoryUpdate, InventorySource, Project, ProjectUpdate, SystemJob, AdHocCommand
Job,
Instance,
JobHostSummary,
InventoryUpdate,
InventorySource,
Project,
ProjectUpdate,
SystemJob,
AdHocCommand,
InstanceGroup,
Label,
ExecutionEnvironment,
)
from awx.main.tasks.system import cluster_node_heartbeat from awx.main.tasks.system import cluster_node_heartbeat
from django.test.utils import override_settings from django.test.utils import override_settings
@@ -116,88 +103,14 @@ def test_job_notification_host_data(inventory, machine_credential, project, job_
class TestLaunchConfig: class TestLaunchConfig:
def test_null_creation_from_prompts(self): def test_null_creation_from_prompts(self):
job = Job.objects.create() job = Job.objects.create()
data = { data = {"credentials": [], "extra_vars": {}, "limit": None, "job_type": None}
"credentials": [],
"extra_vars": {},
"limit": None,
"job_type": None,
"execution_environment": None,
"instance_groups": None,
"labels": None,
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data) config = job.create_config_from_prompts(data)
assert config is None assert config is None
def test_only_limit_defined(self, job_template): def test_only_limit_defined(self, job_template):
job = Job.objects.create(job_template=job_template) job = Job.objects.create(job_template=job_template)
data = { data = {"credentials": [], "extra_vars": {}, "job_tags": None, "limit": ""}
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": "",
"execution_environment": None,
"instance_groups": None,
"labels": None,
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data) config = job.create_config_from_prompts(data)
assert config.char_prompts == {"limit": ""} assert config.char_prompts == {"limit": ""}
assert not config.credentials.exists() assert not config.credentials.exists()
assert config.prompts_dict() == {"limit": ""} assert config.prompts_dict() == {"limit": ""}
def test_many_to_many_fields(self, job_template, organization):
job = Job.objects.create(job_template=job_template)
ig1 = InstanceGroup.objects.create(name='bar')
ig2 = InstanceGroup.objects.create(name='foo')
job_template.instance_groups.add(ig2)
label1 = Label.objects.create(name='foo', description='bar', organization=organization)
label2 = Label.objects.create(name='faz', description='baz', organization=organization)
# Order should matter here which is why we do 2 and then 1
data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": None,
"execution_environment": None,
"instance_groups": [ig2, ig1],
"labels": [label2, label1],
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data)
assert config.instance_groups.exists()
config_instance_group_ids = [item.id for item in config.instance_groups.all()]
assert config_instance_group_ids == [ig2.id, ig1.id]
assert config.labels.exists()
config_label_ids = [item.id for item in config.labels.all()]
assert config_label_ids == [label2.id, label1.id]
def test_pk_field(self, job_template, organization):
job = Job.objects.create(job_template=job_template)
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
# Order should matter here which is why we do 2 and then 1
data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": None,
"execution_environment": ee,
"instance_groups": [],
"labels": [],
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data)
assert config.execution_environment
# We just write the PK instead of trying to assign an item, that happens on the save
assert config.execution_environment_id == ee.id

View File

@@ -3,20 +3,7 @@ import pytest
from rest_framework.exceptions import PermissionDenied from rest_framework.exceptions import PermissionDenied
from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess
from awx.main.models import ( from awx.main.models import Job, JobLaunchConfig, JobTemplate, AdHocCommand, InventoryUpdate, InventorySource, ProjectUpdate, User, Credential
Job,
JobLaunchConfig,
JobTemplate,
AdHocCommand,
InventoryUpdate,
InventorySource,
ProjectUpdate,
User,
Credential,
ExecutionEnvironment,
InstanceGroup,
Label,
)
from crum import impersonate from crum import impersonate
@@ -315,33 +302,13 @@ class TestLaunchConfigAccess:
access = JobLaunchConfigAccess(rando) access = JobLaunchConfigAccess(rando)
cred1, cred2 = self._make_two_credentials(credentialtype_ssh) cred1, cred2 = self._make_two_credentials(credentialtype_ssh)
assert access.has_obj_m2m_access(config) # has access if 0 creds assert access.has_credentials_access(config) # has access if 0 creds
config.credentials.add(cred1, cred2) config.credentials.add(cred1, cred2)
assert not access.has_obj_m2m_access(config) # lacks access to both assert not access.has_credentials_access(config) # lacks access to both
cred1.use_role.members.add(rando) cred1.use_role.members.add(rando)
assert not access.has_obj_m2m_access(config) # lacks access to 1 assert not access.has_credentials_access(config) # lacks access to 1
cred2.use_role.members.add(rando) cred2.use_role.members.add(rando)
assert access.has_obj_m2m_access(config) # has access to both assert access.has_credentials_access(config) # has access to both
def test_new_execution_environment_access(self, rando):
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
access = JobLaunchConfigAccess(rando)
assert access.can_add({'execution_environment': ee}) # can add because access to ee will be granted
def test_new_label_access(self, rando, organization):
label = Label.objects.create(name='foo', description='bar', organization=organization)
access = JobLaunchConfigAccess(rando)
assert not access.can_add({'labels': [label]}) # can't add because no access to label
# We assert in JT unit tests that the access will be granted if label is in JT
def test_new_instance_group_access(self, rando):
ig = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
access = JobLaunchConfigAccess(rando)
assert not access.can_add({'instance_groups': [ig]}) # can't add because no access to ig
# We assert in JT unit tests that the access will be granted if instance group is in JT
def test_can_use_minor(self, rando): def test_can_use_minor(self, rando):
# Config object only has flat-field overrides, no RBAC restrictions # Config object only has flat-field overrides, no RBAC restrictions

View File

@@ -6,7 +6,6 @@ from awx.main.access import (
WorkflowJobAccess, WorkflowJobAccess,
# WorkflowJobNodeAccess # WorkflowJobNodeAccess
) )
from awx.main.models import JobTemplate, WorkflowJobTemplateNode
from rest_framework.exceptions import PermissionDenied from rest_framework.exceptions import PermissionDenied
@@ -88,16 +87,6 @@ class TestWorkflowJobTemplateNodeAccess:
job_template.read_role.members.add(rando) job_template.read_role.members.add(rando)
assert not access.can_add({'workflow_job_template': wfjt, 'unified_job_template': job_template}) assert not access.can_add({'workflow_job_template': wfjt, 'unified_job_template': job_template})
def test_change_JT_no_start_perm(self, wfjt, rando):
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando)
jt1 = JobTemplate.objects.create()
jt1.execute_role.members.add(rando)
assert access.can_add({'workflow_job_template': wfjt, 'unified_job_template': jt1})
node = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=jt1)
jt2 = JobTemplate.objects.create()
assert not access.can_change(node, {'unified_job_template': jt2.id})
def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando): def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando):
wfjt.admin_role.members.add(rando) wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando) access = WorkflowJobTemplateNodeAccess(rando)
@@ -112,92 +101,6 @@ class TestWorkflowJobTemplateNodeAccess:
access = WorkflowJobTemplateNodeAccess(rando) access = WorkflowJobTemplateNodeAccess(rando)
assert access.can_delete(wfjt_node) assert access.can_delete(wfjt_node)
@pytest.mark.parametrize(
"add_wfjt_admin, add_jt_admin, permission_type, expected_result, method_type",
[
(True, False, 'credentials', False, 'can_attach'),
(True, True, 'credentials', True, 'can_attach'),
(True, False, 'labels', False, 'can_attach'),
(True, True, 'labels', True, 'can_attach'),
(True, False, 'instance_groups', False, 'can_attach'),
(True, True, 'instance_groups', True, 'can_attach'),
(True, False, 'credentials', False, 'can_unattach'),
(True, True, 'credentials', True, 'can_unattach'),
(True, False, 'labels', False, 'can_unattach'),
(True, True, 'labels', True, 'can_unattach'),
(True, False, 'instance_groups', False, 'can_unattach'),
(True, True, 'instance_groups', True, 'can_unattach'),
],
)
def test_attacher_permissions(self, wfjt_node, job_template, rando, add_wfjt_admin, permission_type, add_jt_admin, expected_result, mocker, method_type):
wfjt = wfjt_node.workflow_job_template
if add_wfjt_admin:
wfjt.admin_role.members.add(rando)
wfjt.unified_job_template = job_template
if add_jt_admin:
job_template.execute_role.members.add(rando)
from awx.main.models import Credential, Label, InstanceGroup, Organization, CredentialType
if permission_type == 'credentials':
sub_obj = Credential.objects.create(credential_type=CredentialType.objects.create())
sub_obj.use_role.members.add(rando)
elif permission_type == 'labels':
sub_obj = Label.objects.create(organization=Organization.objects.create())
sub_obj.organization.member_role.members.add(rando)
elif permission_type == 'instance_groups':
sub_obj = InstanceGroup.objects.create()
org = Organization.objects.create()
org.admin_role.members.add(rando) # only admins can see IGs
org.instance_groups.add(sub_obj)
access = WorkflowJobTemplateNodeAccess(rando)
if method_type == 'can_unattach':
assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type) == expected_result
else:
assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type, {}) == expected_result
# The actual attachment of labels, credentials and instance groups are tested from JobLaunchConfigAccess
@pytest.mark.parametrize(
"attachment_type, expect_exception, method_type",
[
("credentials", False, 'can_attach'),
("labels", False, 'can_attach'),
("instance_groups", False, 'can_attach'),
("success_nodes", False, 'can_attach'),
("failure_nodes", False, 'can_attach'),
("always_nodes", False, 'can_attach'),
("junk", True, 'can_attach'),
("credentials", False, 'can_unattach'),
("labels", False, 'can_unattach'),
("instance_groups", False, 'can_unattach'),
("success_nodes", False, 'can_unattach'),
("failure_nodes", False, 'can_unattach'),
("always_nodes", False, 'can_unattach'),
("junk", True, 'can_unattach'),
],
)
def test_attacher_raise_not_implemented(self, wfjt_node, rando, attachment_type, expect_exception, method_type):
wfjt = wfjt_node.workflow_job_template
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando)
if expect_exception:
with pytest.raises(NotImplementedError):
access.can_attach(wfjt_node, None, attachment_type, None)
else:
try:
getattr(access, method_type)(wfjt_node, None, attachment_type, None)
except NotImplementedError:
# We explicitly catch NotImplemented because the _nodes type will raise a different exception
assert False, "Exception was raised when it should not have been"
except Exception:
# File "/awx_devel/awx/main/access.py", line 2074, in check_same_WFJT
# raise Exception('Attaching workflow nodes only allowed for other nodes')
pass
# TODO: Implement additional tests for _nodes attachments here
@pytest.mark.django_db @pytest.mark.django_db
class TestWorkflowJobAccess: class TestWorkflowJobAccess:

View File

@@ -19,11 +19,12 @@ def scm_revision_file(tmpdir_factory):
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize('node_type', ('control. hybrid')) @pytest.mark.parametrize('node_type', ('control', 'hybrid'))
def test_no_worker_info_on_AWX_nodes(node_type): def test_no_worker_info_on_AWX_nodes(node_type):
hostname = 'us-south-3-compute.invalid' hostname = 'us-south-3-compute.invalid'
Instance.objects.create(hostname=hostname, node_type=node_type) Instance.objects.create(hostname=hostname, node_type=node_type)
assert execution_node_health_check(hostname) is None with pytest.raises(RuntimeError):
execution_node_health_check(hostname)
@pytest.fixture @pytest.fixture

View File

@@ -8,17 +8,9 @@ from rest_framework.exceptions import ValidationError
from awx.api.serializers import JobLaunchSerializer from awx.api.serializers import JobLaunchSerializer
@pytest.mark.parametrize( def test_primary_key_related_field():
"param",
[
('credentials'),
('instance_groups'),
('labels'),
],
)
def test_primary_key_related_field(param):
# We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary. # We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary.
# PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError. # PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError.
data = {param: {'1': '2', '3': '4'}} data = {'credentials': {'1': '2', '3': '4'}}
with pytest.raises(ValidationError): with pytest.raises(ValidationError):
JobLaunchSerializer(data=data) JobLaunchSerializer(data=data)

View File

@@ -11,7 +11,6 @@ from awx.api.serializers import (
from awx.main.models import Job, WorkflowJobTemplateNode, WorkflowJob, WorkflowJobNode, WorkflowJobTemplate, Project, Inventory, JobTemplate from awx.main.models import Job, WorkflowJobTemplateNode, WorkflowJob, WorkflowJobNode, WorkflowJobTemplate, Project, Inventory, JobTemplate
@pytest.mark.django_db
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {}) @mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {})
class TestWorkflowJobTemplateSerializerGetRelated: class TestWorkflowJobTemplateSerializerGetRelated:
@pytest.fixture @pytest.fixture
@@ -27,7 +26,6 @@ class TestWorkflowJobTemplateSerializerGetRelated:
'launch', 'launch',
'workflow_nodes', 'workflow_nodes',
'webhook_key', 'webhook_key',
'labels',
], ],
) )
def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name): def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name):
@@ -60,7 +58,6 @@ class TestWorkflowNodeBaseSerializerGetRelated:
assert 'unified_job_template' not in related assert 'unified_job_template' not in related
@pytest.mark.django_db
@mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x, y: {}) @mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x, y: {})
class TestWorkflowJobTemplateNodeSerializerGetRelated: class TestWorkflowJobTemplateNodeSerializerGetRelated:
@pytest.fixture @pytest.fixture
@@ -90,8 +87,6 @@ class TestWorkflowJobTemplateNodeSerializerGetRelated:
'success_nodes', 'success_nodes',
'failure_nodes', 'failure_nodes',
'always_nodes', 'always_nodes',
'labels',
'instance_groups',
], ],
) )
def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name): def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name):
@@ -151,7 +146,6 @@ class TestWorkflowJobTemplateNodeSerializerCharPrompts:
assert WFJT_serializer.instance.limit == 'webservers' assert WFJT_serializer.instance.limit == 'webservers'
@pytest.mark.django_db
@mock.patch('awx.api.serializers.BaseSerializer.validate', lambda self, attrs: attrs) @mock.patch('awx.api.serializers.BaseSerializer.validate', lambda self, attrs: attrs)
class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
@pytest.fixture @pytest.fixture
@@ -168,7 +162,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_set_survey_passwords_create(self, jt): def test_set_survey_passwords_create(self, jt):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') wfjt = WorkflowJobTemplate(name='fake-wfjt')
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
assert 'var1' in attrs['survey_passwords'] assert 'var1' in attrs['survey_passwords']
@@ -177,7 +171,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_set_survey_passwords_modify(self, jt): def test_set_survey_passwords_modify(self, jt):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') wfjt = WorkflowJobTemplate(name='fake-wfjt')
serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt) serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt)
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
@@ -187,7 +181,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_use_db_answer(self, jt, mocker): def test_use_db_answer(self, jt, mocker):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') wfjt = WorkflowJobTemplate(name='fake-wfjt')
serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt, extra_data={'var1': '$encrypted$foooooo'}) serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt, extra_data={'var1': '$encrypted$foooooo'})
with mocker.patch('awx.main.models.mixins.decrypt_value', return_value='foo'): with mocker.patch('awx.main.models.mixins.decrypt_value', return_value='foo'):
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}})
@@ -202,7 +196,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
with that particular var omitted so on launch time the default takes effect with that particular var omitted so on launch time the default takes effect
""" """
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') wfjt = WorkflowJobTemplate(name='fake-wfjt')
jt.survey_spec['spec'][0]['default'] = '$encrypted$bar' jt.survey_spec['spec'][0]['default'] = '$encrypted$bar'
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
@@ -236,8 +230,6 @@ class TestWorkflowJobNodeSerializerGetRelated:
'success_nodes', 'success_nodes',
'failure_nodes', 'failure_nodes',
'always_nodes', 'always_nodes',
'labels',
'instance_groups',
], ],
) )
def test_get_related(self, test_get_related, workflow_job_node, related_resource_name): def test_get_related(self, test_get_related, workflow_job_node, related_resource_name):

View File

@@ -5,8 +5,7 @@ from unittest import mock
from collections import namedtuple from collections import namedtuple
from awx.api.views.root import ApiVersionRootView from awx.api.views import ApiVersionRootView, JobTemplateLabelList, InventoryInventorySourcesUpdate, JobTemplateSurveySpec
from awx.api.views import JobTemplateLabelList, InventoryInventorySourcesUpdate, JobTemplateSurveySpec
from awx.main.views import handle_error from awx.main.views import handle_error
@@ -24,7 +23,7 @@ class TestApiRootView:
endpoints = [ endpoints = [
'ping', 'ping',
'config', 'config',
# 'settings', #'settings',
'me', 'me',
'dashboard', 'dashboard',
'organizations', 'organizations',
@@ -60,7 +59,7 @@ class TestApiRootView:
class TestJobTemplateLabelList: class TestJobTemplateLabelList:
def test_inherited_mixin_unattach(self): def test_inherited_mixin_unattach(self):
with mock.patch('awx.api.views.labels.LabelSubListCreateAttachDetachView.unattach') as mixin_unattach: with mock.patch('awx.api.generics.DeleteLastUnattachLabelMixin.unattach') as mixin_unattach:
view = JobTemplateLabelList() view = JobTemplateLabelList()
mock_request = mock.MagicMock() mock_request = mock.MagicMock()

View File

@@ -1,15 +1,9 @@
import pytest import pytest
from unittest import mock from unittest import mock
from awx.main.models import ( from awx.main.models.label import Label
Label, from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
UnifiedJobTemplate, from awx.main.models.inventory import Inventory
UnifiedJob,
Inventory,
Schedule,
WorkflowJobTemplateNode,
WorkflowJobNode,
)
mock_query_set = mock.MagicMock() mock_query_set = mock.MagicMock()
@@ -20,6 +14,12 @@ mock_objects = mock.MagicMock(filter=mock.MagicMock(return_value=mock_query_set)
@pytest.mark.django_db @pytest.mark.django_db
@mock.patch('awx.main.models.label.Label.objects', mock_objects) @mock.patch('awx.main.models.label.Label.objects', mock_objects)
class TestLabelFilterMocked: class TestLabelFilterMocked:
def test_get_orphaned_labels(self, mocker):
ret = Label.get_orphaned_labels()
assert mock_query_set == ret
Label.objects.filter.assert_called_with(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
def test_is_detached(self, mocker): def test_is_detached(self, mocker):
mock_query_set.exists.return_value = True mock_query_set.exists.return_value = True
@@ -27,15 +27,7 @@ class TestLabelFilterMocked:
ret = label.is_detached() ret = label.is_detached()
assert ret is True assert ret is True
Label.objects.filter.assert_called_with( Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
id=37,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
)
mock_query_set.exists.assert_called_with() mock_query_set.exists.assert_called_with()
def test_is_detached_not(self, mocker): def test_is_detached_not(self, mocker):
@@ -45,102 +37,39 @@ class TestLabelFilterMocked:
ret = label.is_detached() ret = label.is_detached()
assert ret is False assert ret is False
Label.objects.filter.assert_called_with( Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
id=37,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
)
mock_query_set.exists.assert_called_with() mock_query_set.exists.assert_called_with()
@pytest.mark.parametrize( @pytest.mark.parametrize(
"jt_count,j_count,inv_count,sched_count,wfnode_count,wfnodej_count,expected", "jt_count,j_count,inv_count,expected",
[ [
(1, 0, 0, 0, 0, 0, True), (1, 0, 0, True),
(0, 1, 0, 0, 0, 0, True), (0, 1, 0, True),
(1, 1, 0, 0, 0, 0, False), (0, 0, 1, True),
(0, 0, 1, 0, 0, 0, True), (1, 1, 1, False),
(1, 0, 1, 0, 0, 0, False),
(0, 1, 1, 0, 0, 0, False),
(1, 1, 1, 0, 0, 0, False),
(0, 0, 0, 1, 0, 0, True),
(1, 0, 0, 1, 0, 0, False),
(0, 1, 0, 1, 0, 0, False),
(1, 1, 0, 1, 0, 0, False),
(0, 0, 1, 1, 0, 0, False),
(1, 0, 1, 1, 0, 0, False),
(0, 1, 1, 1, 0, 0, False),
(1, 1, 1, 1, 0, 0, False),
(0, 0, 0, 0, 1, 0, True),
(1, 0, 0, 0, 1, 0, False),
(0, 1, 0, 0, 1, 0, False),
(1, 1, 0, 0, 1, 0, False),
(0, 0, 1, 0, 1, 0, False),
(1, 0, 1, 0, 1, 0, False),
(0, 1, 1, 0, 1, 0, False),
(1, 1, 1, 0, 1, 0, False),
(0, 0, 0, 1, 1, 0, False),
(1, 0, 0, 1, 1, 0, False),
(0, 1, 0, 1, 1, 0, False),
(1, 1, 0, 1, 1, 0, False),
(0, 0, 1, 1, 1, 0, False),
(1, 0, 1, 1, 1, 0, False),
(0, 1, 1, 1, 1, 0, False),
(1, 1, 1, 1, 1, 0, False),
(0, 0, 0, 0, 0, 1, True),
(1, 0, 0, 0, 0, 1, False),
(0, 1, 0, 0, 0, 1, False),
(1, 1, 0, 0, 0, 1, False),
(0, 0, 1, 0, 0, 1, False),
(1, 0, 1, 0, 0, 1, False),
(0, 1, 1, 0, 0, 1, False),
(1, 1, 1, 0, 0, 1, False),
(0, 0, 0, 1, 0, 1, False),
(1, 0, 0, 1, 0, 1, False),
(0, 1, 0, 1, 0, 1, False),
(1, 1, 0, 1, 0, 1, False),
(0, 0, 1, 1, 0, 1, False),
(1, 0, 1, 1, 0, 1, False),
(0, 1, 1, 1, 0, 1, False),
(1, 1, 1, 1, 0, 1, False),
(0, 0, 0, 0, 1, 1, False),
(1, 0, 0, 0, 1, 1, False),
(0, 1, 0, 0, 1, 1, False),
(1, 1, 0, 0, 1, 1, False),
(0, 0, 1, 0, 1, 1, False),
(1, 0, 1, 0, 1, 1, False),
(0, 1, 1, 0, 1, 1, False),
(1, 1, 1, 0, 1, 1, False),
(0, 0, 0, 1, 1, 1, False),
(1, 0, 0, 1, 1, 1, False),
(0, 1, 0, 1, 1, 1, False),
(1, 1, 0, 1, 1, 1, False),
(0, 0, 1, 1, 1, 1, False),
(1, 0, 1, 1, 1, 1, False),
(0, 1, 1, 1, 1, 1, False),
(1, 1, 1, 1, 1, 1, False),
], ],
) )
def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count, expected): def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, expected):
counts = [jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count] mock_job_qs = mocker.MagicMock()
models = [UnifiedJobTemplate, UnifiedJob, Inventory, Schedule, WorkflowJobTemplateNode, WorkflowJobNode] mock_job_qs.count = mocker.MagicMock(return_value=j_count)
mockers = [] mocker.patch.object(UnifiedJob, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_job_qs)))
for index in range(0, len(models)):
a_mocker = mocker.MagicMock() mock_jt_qs = mocker.MagicMock()
a_mocker.count = mocker.MagicMock(return_value=counts[index]) mock_jt_qs.count = mocker.MagicMock(return_value=jt_count)
mocker.patch.object(models[index], 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=a_mocker))) mocker.patch.object(UnifiedJobTemplate, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_jt_qs)))
mockers.append(a_mocker)
mock_inv_qs = mocker.MagicMock()
mock_inv_qs.count = mocker.MagicMock(return_value=inv_count)
mocker.patch.object(Inventory, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_inv_qs)))
label = Label(id=37) label = Label(id=37)
ret = label.is_candidate_for_detach() ret = label.is_candidate_for_detach()
for index in range(0, len(models)): UnifiedJob.objects.filter.assert_called_with(labels__in=[label.id])
models[index].objects.filter.assert_called_with(labels__in=[label.id]) UnifiedJobTemplate.objects.filter.assert_called_with(labels__in=[label.id])
for index in range(0, len(mockers)): Inventory.objects.filter.assert_called_with(labels__in=[label.id])
mockers[index].count.assert_called_with() mock_job_qs.count.assert_called_with()
mock_jt_qs.count.assert_called_with()
mock_inv_qs.count.assert_called_with()
assert ret is expected assert ret is expected

View File

@@ -259,14 +259,13 @@ def test_survey_encryption_defaults(survey_spec_factory, question_type, default,
@pytest.mark.survey @pytest.mark.survey
@pytest.mark.django_db
class TestWorkflowSurveys: class TestWorkflowSurveys:
def test_update_kwargs_survey_defaults(self, survey_spec_factory): def test_update_kwargs_survey_defaults(self, survey_spec_factory):
"Assure that the survey default over-rides a JT variable" "Assure that the survey default over-rides a JT variable"
spec = survey_spec_factory('var1') spec = survey_spec_factory('var1')
spec['spec'][0]['default'] = 3 spec['spec'][0]['default'] = 3
spec['spec'][0]['required'] = False spec['spec'][0]['required'] = False
wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5") wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5")
updated_extra_vars = wfjt._update_unified_job_kwargs({}, {}) updated_extra_vars = wfjt._update_unified_job_kwargs({}, {})
assert 'extra_vars' in updated_extra_vars assert 'extra_vars' in updated_extra_vars
assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3 assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3
@@ -278,7 +277,7 @@ class TestWorkflowSurveys:
spec['spec'][0]['required'] = False spec['spec'][0]['required'] = False
spec['spec'][1]['required'] = True spec['spec'][1]['required'] = True
spec['spec'][2]['required'] = False spec['spec'][2]['required'] = False
wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld") wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld")
assert wfjt.variables_needed_to_start == ['question2'] assert wfjt.variables_needed_to_start == ['question2']
assert not wfjt.can_start_without_user_input() assert not wfjt.can_start_without_user_input()
@@ -312,6 +311,6 @@ class TestExtraVarsNoPrompt:
self.process_vars_and_assert(jt, provided_vars, valid) self.process_vars_and_assert(jt, provided_vars, valid)
def test_wfjt_extra_vars_counting(self, provided_vars, valid): def test_wfjt_extra_vars_counting(self, provided_vars, valid):
wfjt = WorkflowJobTemplate.objects.create(name='foo', extra_vars={'tmpl_var': 'bar'}) wfjt = WorkflowJobTemplate(name='foo', extra_vars={'tmpl_var': 'bar'})
prompted_fields, ignored_fields, errors = wfjt._accept_or_ignore_job_kwargs(extra_vars=provided_vars) prompted_fields, ignored_fields, errors = wfjt._accept_or_ignore_job_kwargs(extra_vars=provided_vars)
self.process_vars_and_assert(wfjt, provided_vars, valid) self.process_vars_and_assert(wfjt, provided_vars, valid)

View File

@@ -22,10 +22,6 @@ def test_unified_job_workflow_attributes():
assert job.workflow_job_id == 1 assert job.workflow_job_id == 1
def mock_on_commit(f):
f()
@pytest.fixture @pytest.fixture
def unified_job(mocker): def unified_job(mocker):
mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True) mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)
@@ -34,13 +30,11 @@ def unified_job(mocker):
j.cancel_flag = None j.cancel_flag = None
j.save = mocker.MagicMock() j.save = mocker.MagicMock()
j.websocket_emit_status = mocker.MagicMock() j.websocket_emit_status = mocker.MagicMock()
j.fallback_cancel = mocker.MagicMock()
return j return j
def test_cancel(unified_job): def test_cancel(unified_job):
with mock.patch('awx.main.models.unified_jobs.connection.on_commit', wraps=mock_on_commit):
unified_job.cancel() unified_job.cancel()
assert unified_job.cancel_flag is True assert unified_job.cancel_flag is True
@@ -50,23 +44,16 @@ def test_cancel(unified_job):
# Some more thought may want to go into only emitting canceled if/when the job record # Some more thought may want to go into only emitting canceled if/when the job record
# status is changed to canceled. Unlike, currently, where it's emitted unconditionally. # status is changed to canceled. Unlike, currently, where it's emitted unconditionally.
unified_job.websocket_emit_status.assert_called_with("canceled") unified_job.websocket_emit_status.assert_called_with("canceled")
assert [(args, kwargs) for args, kwargs in unified_job.save.call_args_list] == [ unified_job.save.assert_called_with(update_fields=['cancel_flag', 'start_args', 'status'])
((), {'update_fields': ['cancel_flag', 'start_args']}),
((), {'update_fields': ['status']}),
]
def test_cancel_job_explanation(unified_job): def test_cancel_job_explanation(unified_job):
job_explanation = 'giggity giggity' job_explanation = 'giggity giggity'
with mock.patch('awx.main.models.unified_jobs.connection.on_commit'):
unified_job.cancel(job_explanation=job_explanation) unified_job.cancel(job_explanation=job_explanation)
assert unified_job.job_explanation == job_explanation assert unified_job.job_explanation == job_explanation
assert [(args, kwargs) for args, kwargs in unified_job.save.call_args_list] == [ unified_job.save.assert_called_with(update_fields=['cancel_flag', 'start_args', 'status', 'job_explanation'])
((), {'update_fields': ['cancel_flag', 'start_args', 'job_explanation']}),
((), {'update_fields': ['status']}),
]
def test_organization_copy_to_jobs(): def test_organization_copy_to_jobs():

View File

@@ -94,7 +94,7 @@ def workflow_job_unit():
@pytest.fixture @pytest.fixture
def workflow_job_template_unit(): def workflow_job_template_unit():
return WorkflowJobTemplate.objects.create(name='workflow') return WorkflowJobTemplate(name='workflow')
@pytest.fixture @pytest.fixture
@@ -151,7 +151,6 @@ def test_node_getter_and_setters():
assert node.job_type == 'check' assert node.job_type == 'check'
@pytest.mark.django_db
class TestWorkflowJobCreate: class TestWorkflowJobCreate:
def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker): def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker):
mock_create = mocker.MagicMock() mock_create = mocker.MagicMock()
@@ -166,7 +165,6 @@ class TestWorkflowJobCreate:
unified_job_template=wfjt_node_no_prompts.unified_job_template, unified_job_template=wfjt_node_no_prompts.unified_job_template,
workflow_job=workflow_job_unit, workflow_job=workflow_job_unit,
identifier=mocker.ANY, identifier=mocker.ANY,
execution_environment=None,
) )
def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker): def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker):
@@ -182,11 +180,9 @@ class TestWorkflowJobCreate:
unified_job_template=wfjt_node_with_prompts.unified_job_template, unified_job_template=wfjt_node_with_prompts.unified_job_template,
workflow_job=workflow_job_unit, workflow_job=workflow_job_unit,
identifier=mocker.ANY, identifier=mocker.ANY,
execution_environment=None,
) )
@pytest.mark.django_db
@mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: []) @mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: [])
class TestWorkflowJobNodeJobKWARGS: class TestWorkflowJobNodeJobKWARGS:
""" """
@@ -235,12 +231,4 @@ class TestWorkflowJobNodeJobKWARGS:
def test_get_ask_mapping_integrity(): def test_get_ask_mapping_integrity():
assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == [ assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == ['extra_vars', 'inventory', 'limit', 'scm_branch']
'inventory',
'limit',
'scm_branch',
'labels',
'job_tags',
'skip_tags',
'extra_vars',
]

View File

@@ -196,7 +196,6 @@ def test_jt_can_add_bad_data(user_unit):
assert not access.can_add({'asdf': 'asdf'}) assert not access.can_add({'asdf': 'asdf'})
@pytest.mark.django_db
class TestWorkflowAccessMethods: class TestWorkflowAccessMethods:
@pytest.fixture @pytest.fixture
def workflow(self, workflow_job_template_factory): def workflow(self, workflow_job_template_factory):

View File

@@ -3,7 +3,6 @@
# Copyright (c) 2017 Ansible, Inc. # Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import os import os
import re
import pytest import pytest
from uuid import uuid4 from uuid import uuid4
import json import json
@@ -13,13 +12,9 @@ from unittest import mock
from rest_framework.exceptions import ParseError from rest_framework.exceptions import ParseError
from awx.main.utils import common from awx.main.utils import common
from awx.api.validators import HostnameRegexValidator
from awx.main.models import Job, AdHocCommand, InventoryUpdate, ProjectUpdate, SystemJob, WorkflowJob, Inventory, JobTemplate, UnifiedJobTemplate, UnifiedJob from awx.main.models import Job, AdHocCommand, InventoryUpdate, ProjectUpdate, SystemJob, WorkflowJob, Inventory, JobTemplate, UnifiedJobTemplate, UnifiedJob
from django.core.exceptions import ValidationError
from django.utils.regex_helper import _lazy_re_compile
@pytest.mark.parametrize( @pytest.mark.parametrize(
'input_, output', 'input_, output',
@@ -199,136 +194,3 @@ def test_extract_ansible_vars():
redacted, var_list = common.extract_ansible_vars(json.dumps(my_dict)) redacted, var_list = common.extract_ansible_vars(json.dumps(my_dict))
assert var_list == set(['ansible_connetion_setting']) assert var_list == set(['ansible_connetion_setting'])
assert redacted == {"foobar": "baz"} assert redacted == {"foobar": "baz"}
@pytest.mark.parametrize(
'scm_type, url, username, password, check_special_cases, scp_format, expected',
[
# General/random cases
('git', '', True, True, True, False, ''),
('git', 'git://example.com/foo.git', True, True, True, False, 'git://example.com/foo.git'),
('git', 'http://example.com/foo.git', True, True, True, False, 'http://example.com/foo.git'),
('git', 'example.com:bar.git', True, True, True, False, 'git+ssh://example.com/bar.git'),
('git', 'user@example.com:bar.git', True, True, True, False, 'git+ssh://user@example.com/bar.git'),
('git', '127.0.0.1:bar.git', True, True, True, False, 'git+ssh://127.0.0.1/bar.git'),
('git', 'git+ssh://127.0.0.1/bar.git', True, True, True, True, '127.0.0.1:bar.git'),
('git', 'ssh://127.0.0.1:22/bar.git', True, True, True, False, 'ssh://127.0.0.1:22/bar.git'),
('git', 'ssh://root@127.0.0.1:22/bar.git', True, True, True, False, 'ssh://root@127.0.0.1:22/bar.git'),
('git', 'some/path', True, True, True, False, 'file:///some/path'),
('git', '/some/path', True, True, True, False, 'file:///some/path'),
# Invalid URLs - ensure we error properly
('cvs', 'anything', True, True, True, False, ValueError('Unsupported SCM type "cvs"')),
('svn', 'anything-without-colon-slash-slash', True, True, True, False, ValueError('Invalid svn URL')),
('git', 'http://example.com:123invalidport/foo.git', True, True, True, False, ValueError('Invalid git URL')),
('git', 'git+ssh://127.0.0.1/bar.git', True, True, True, False, ValueError('Unsupported git URL')),
('git', 'git@example.com:3000:/git/repo.git', True, True, True, False, ValueError('Invalid git URL')),
('insights', 'git://example.com/foo.git', True, True, True, False, ValueError('Unsupported insights URL')),
('svn', 'file://example/path', True, True, True, False, ValueError('Unsupported host "example" for file:// URL')),
('svn', 'svn:///example', True, True, True, False, ValueError('Host is required for svn URL')),
# Username/password cases
('git', 'https://example@example.com/bar.git', False, True, True, False, 'https://example.com/bar.git'),
('git', 'https://example@example.com/bar.git', 'user', True, True, False, 'https://user@example.com/bar.git'),
('git', 'https://example@example.com/bar.git', 'user:pw', True, True, False, 'https://user%3Apw@example.com/bar.git'),
('git', 'https://example@example.com/bar.git', False, 'pw', True, False, 'https://example.com/bar.git'),
('git', 'https://some:example@example.com/bar.git', True, False, True, False, 'https://some@example.com/bar.git'),
('git', 'https://some:example@example.com/bar.git', False, False, True, False, 'https://example.com/bar.git'),
('git', 'https://example.com/bar.git', 'user', 'pw', True, False, 'https://user:pw@example.com/bar.git'),
('git', 'https://example@example.com/bar.git', False, 'something', True, False, 'https://example.com/bar.git'),
# Special github/bitbucket cases
('git', 'notgit@github.com:ansible/awx.git', True, True, True, False, ValueError('Username must be "git" for SSH access to github.com.')),
(
'git',
'notgit@bitbucket.org:does-not-exist/example.git',
True,
True,
True,
False,
ValueError('Username must be "git" for SSH access to bitbucket.org.'),
),
(
'git',
'notgit@altssh.bitbucket.org:does-not-exist/example.git',
True,
True,
True,
False,
ValueError('Username must be "git" for SSH access to altssh.bitbucket.org.'),
),
('git', 'git:password@github.com:ansible/awx.git', True, True, True, False, 'git+ssh://git@github.com/ansible/awx.git'),
# Disabling the special handling should not raise an error
('git', 'notgit@github.com:ansible/awx.git', True, True, False, False, 'git+ssh://notgit@github.com/ansible/awx.git'),
('git', 'notgit@bitbucket.org:does-not-exist/example.git', True, True, False, False, 'git+ssh://notgit@bitbucket.org/does-not-exist/example.git'),
(
'git',
'notgit@altssh.bitbucket.org:does-not-exist/example.git',
True,
True,
False,
False,
'git+ssh://notgit@altssh.bitbucket.org/does-not-exist/example.git',
),
# awx#12992 - IPv6
('git', 'http://[fd00:1234:2345:6789::11]:3000/foo.git', True, True, True, False, 'http://[fd00:1234:2345:6789::11]:3000/foo.git'),
('git', 'http://foo:bar@[fd00:1234:2345:6789::11]:3000/foo.git', True, True, True, False, 'http://foo:bar@[fd00:1234:2345:6789::11]:3000/foo.git'),
('git', 'example@[fd00:1234:2345:6789::11]:example/foo.git', True, True, True, False, 'git+ssh://example@[fd00:1234:2345:6789::11]/example/foo.git'),
],
)
def test_update_scm_url(scm_type, url, username, password, check_special_cases, scp_format, expected):
if isinstance(expected, Exception):
with pytest.raises(type(expected)) as excinfo:
common.update_scm_url(scm_type, url, username, password, check_special_cases, scp_format)
assert str(excinfo.value) == str(expected)
else:
assert common.update_scm_url(scm_type, url, username, password, check_special_cases, scp_format) == expected
class TestHostnameRegexValidator:
@pytest.fixture
def regex_expr(self):
return '^[a-z0-9][-a-z0-9]*$|^([a-z0-9][-a-z0-9]{0,62}[.])*[a-z0-9][-a-z0-9]{1,62}$'
@pytest.fixture
def re_flags(self):
return re.IGNORECASE
@pytest.fixture
def custom_err_message(self):
return "foobar"
def test_hostame_regex_validator_constructor_with_args(self, regex_expr, re_flags, custom_err_message):
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags, message=custom_err_message)
assert h.regex == _lazy_re_compile(regex_expr, re_flags)
assert h.message == 'foobar'
assert h.code == 'invalid'
assert h.inverse_match == False
assert h.flags == re_flags
def test_hostame_regex_validator_default_constructor(self, regex_expr, re_flags):
h = HostnameRegexValidator()
assert h.regex == _lazy_re_compile(regex_expr, re_flags)
assert h.message == 'Enter a valid value.'
assert h.code == 'invalid'
assert h.inverse_match == False
assert h.flags == re_flags
def test_good_call(self, regex_expr, re_flags):
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags)
assert (h("192.168.56.101"), None)
def test_bad_call(self, regex_expr, re_flags):
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags)
try:
h("@#$%)$#(TUFAS_DG")
except ValidationError as e:
assert e.message is not None
def test_good_call_with_inverse(self, regex_expr, re_flags, inverse_match=True):
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags, inverse_match=inverse_match)
try:
h("1.2.3.4")
except ValidationError as e:
assert e.message is not None
def test_bad_call_with_inverse(self, regex_expr, re_flags, inverse_match=True):
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags, inverse_match=inverse_match)
assert (h("@#$%)$#(TUFAS_DG"), None)

View File

@@ -264,14 +264,8 @@ def update_scm_url(scm_type, url, username=True, password=True, check_special_ca
userpass, hostpath = url.split('@', 1) userpass, hostpath = url.split('@', 1)
else: else:
userpass, hostpath = '', url userpass, hostpath = '', url
# Handle IPv6 here. In this case, we might have hostpath of: if hostpath.count(':') > 1:
# [fd00:1234:2345:6789::11]:example/foo.git
if hostpath.startswith('[') and ']:' in hostpath:
host, path = hostpath.split(']:', 1)
host = host + ']'
elif hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type) raise ValueError(_('Invalid %s URL') % scm_type)
else:
host, path = hostpath.split(':', 1) host, path = hostpath.split(':', 1)
# if not path.startswith('/') and not path.startswith('~/'): # if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path # path = '~/%s' % path
@@ -331,11 +325,7 @@ def update_scm_url(scm_type, url, username=True, password=True, check_special_ca
netloc = u':'.join([urllib.parse.quote(x, safe='') for x in (netloc_username, netloc_password) if x]) netloc = u':'.join([urllib.parse.quote(x, safe='') for x in (netloc_username, netloc_password) if x])
else: else:
netloc = u'' netloc = u''
# urllib.parse strips brackets from IPv6 addresses, so we need to add them back in netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
hostname = parts.hostname
if hostname and ':' in hostname and '[' in url and ']' in url:
hostname = f'[{hostname}]'
netloc = u'@'.join(filter(None, [netloc, hostname]))
if parts.port: if parts.port:
netloc = u':'.join([netloc, str(parts.port)]) netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path, parts.query, parts.fragment]) new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path, parts.query, parts.fragment])
@@ -542,10 +532,6 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
if kwargs and field_name in kwargs: if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name] override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)): if isinstance(override_field_val, (set, list, QuerySet)):
# Labels are additive so we are going to add any src labels in addition to the override labels
if field_name == 'labels':
for jt_label in src_field_value.all():
getattr(obj2, field_name).add(jt_label.id)
getattr(obj2, field_name).add(*override_field_val) getattr(obj2, field_name).add(*override_field_val)
continue continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager': if override_field_val.__class__.__name__ == 'ManyRelatedManager':

View File

@@ -76,7 +76,7 @@ class SpecialInventoryHandler(logging.Handler):
def emit(self, record): def emit(self, record):
# check cancel and timeout status regardless of log level # check cancel and timeout status regardless of log level
this_time = now() this_time = now()
if (this_time - self.last_check).total_seconds() > 0.1: if (this_time - self.last_check).total_seconds() > 0.5: # cancel callback is expensive
self.last_check = this_time self.last_check = this_time
if self.cancel_callback(): if self.cancel_callback():
raise PostRunError('Inventory update has been canceled', status='canceled') raise PostRunError('Inventory update has been canceled', status='canceled')

View File

@@ -35,7 +35,7 @@ def unwrap_broadcast_msg(payload: dict):
def get_broadcast_hosts(): def get_broadcast_hosts():
Instance = apps.get_model('main', 'Instance') Instance = apps.get_model('main', 'Instance')
instances = ( instances = (
Instance.objects.exclude(hostname=Instance.objects.my_hostname()) Instance.objects.exclude(hostname=Instance.objects.me().hostname)
.exclude(node_type='execution') .exclude(node_type='execution')
.exclude(node_type='hop') .exclude(node_type='hop')
.order_by('hostname') .order_by('hostname')
@@ -47,7 +47,7 @@ def get_broadcast_hosts():
def get_local_host(): def get_local_host():
Instance = apps.get_model('main', 'Instance') Instance = apps.get_model('main', 'Instance')
return Instance.objects.my_hostname() return Instance.objects.me().hostname
class WebsocketTask: class WebsocketTask:

View File

@@ -1,114 +0,0 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import gnupg
import os
import tempfile
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible_sign.checksum import (
ChecksumFile,
ChecksumMismatch,
InvalidChecksumLine,
)
from ansible_sign.checksum.differ import DistlibManifestChecksumFileExistenceDiffer
from ansible_sign.signing import GPGVerifier
display = Display()
VALIDATION_TYPES = (
"checksum_manifest",
"gpg",
)
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
super(ActionModule, self).run(tmp, task_vars)
self.params = self._task.args
self.project_path = self.params.get("project_path")
if self.project_path is None:
return {
"failed": True,
"msg": "No project path (project_path) was supplied.",
}
validation_type = self.params.get("validation_type")
if validation_type is None or validation_type not in VALIDATION_TYPES:
return {"failed": True, "msg": "validation_type must be one of: " + ', '.join(VALIDATION_TYPES)}
validation_method = getattr(self, f"validate_{validation_type}")
return validation_method()
def validate_gpg(self):
gpg_pubkey = self.params.get("gpg_pubkey")
if gpg_pubkey is None:
return {
"failed": True,
"msg": "No GPG public key (gpg_pubkey) was supplied.",
}
signature_file = os.path.join(self.project_path, ".ansible-sign", "sha256sum.txt.sig")
manifest_file = os.path.join(self.project_path, ".ansible-sign", "sha256sum.txt")
for path in (signature_file, manifest_file):
if not os.path.exists(path):
return {
"failed": True,
"msg": f"Expected file not found: {path}",
}
with tempfile.TemporaryDirectory() as gpg_home:
gpg = gnupg.GPG(gnupghome=gpg_home)
gpg.import_keys(gpg_pubkey)
verifier = GPGVerifier(
manifest_path=manifest_file,
detached_signature_path=signature_file,
gpg_home=gpg_home,
)
result = verifier.verify()
return {
"failed": not result.success,
"msg": result.summary,
"gpg_details": result.extra_information,
}
def validate_checksum_manifest(self):
checksum = ChecksumFile(self.project_path, differ=DistlibManifestChecksumFileExistenceDiffer)
manifest_file = os.path.join(self.project_path, ".ansible-sign", "sha256sum.txt")
if not os.path.exists(manifest_file):
return {
"failed": True,
"msg": f"Expected file not found: {manifest_file}",
}
checksum_file_contents = open(manifest_file, "r").read()
try:
manifest = checksum.parse(checksum_file_contents)
except InvalidChecksumLine as e:
return {
"failed": True,
"msg": f"Invalid line in checksum manifest: {e}",
}
try:
checksum.verify(manifest)
except ChecksumMismatch as e:
return {
"failed": True,
"msg": str(e),
}
return {
"failed": False,
"msg": "Checksum manifest is valid.",
}

View File

@@ -1,65 +0,0 @@
ANSIBLE_METADATA = {"metadata_version": "1.0", "status": ["stableinterface"], "supported_by": "community"}
DOCUMENTATION = """
---
module: playbook_integrity
short_description: verify that files within a project have not been tampered with.
description:
- Makes use of the 'ansible-sign' project as a library for ensuring that an
Ansible project has not been tampered with.
- There are multiple types of validation that this action plugin supports,
currently: GPG public/private key signing of a checksum manifest file, and
checking the checksum manifest file itself against the checksum of each file
that is being verified.
- In the future, other types of validation may be supported.
options:
project_path:
description:
- Directory of the project being verified. Expected to contain a
C(.ansible-sign) directory with a generated checksum manifest file and a
detached signature for it. These files are produced by the
C(ansible-sign) command-line utility.
required: true
validation_type:
description:
- Describes the kind of validation to perform on the project.
- I(validation_type=gpg) means that a GPG Public Key credential is being
used to verify the integrity of the checksum manifest (and therefore the
project).
- 'checksum_manifest' means that the signed checksum manifest is validated
against all files in the project listed by its MANIFEST.in file. Just
running this plugin with I(validation_type=checksum_manifest) is
typically B(NOT) enough. It should also be run with a I(validation_type)
that ensures that the manifest file itself has not changed, such as
I(validation_type=gpg).
required: true
choices:
- gpg
- checksum_manifest
gpg_pubkey:
description:
- The public key to validate a checksum manifest against. Must match the
detached signature in the project's C(.ansible-sign) directory.
- Required when I(validation_type=gpg).
author:
- Ansible AWX Team
"""
EXAMPLES = """
- name: Verify project content using GPG signature
playbook_integrity:
project_path: /srv/projects/example
validation_type: gpg
gpg_pubkey: |
-----BEING PGP PUBLIC KEY BLOCK-----
mWINAFXMtjsACADIf/zJS0V3UO3c+KAUcpVAcChpliM31ICDWydfIfF3dzMzLcCd
Cj2kk1mPWtP/JHfk1V5czcWWWWGC2Tw4g4IS+LokAAuwk7VKTlI34eeMl8SiZCAI
[...]
- name: Verify project content against checksum manifest
playbook_integrity:
project_path: /srv/projects/example
validation_type: checksum_manifest
"""

View File

@@ -18,7 +18,6 @@
# galaxy_task_env: environment variables to use specifically for ansible-galaxy commands # galaxy_task_env: environment variables to use specifically for ansible-galaxy commands
# awx_version: Current running version of the awx or tower as a string # awx_version: Current running version of the awx or tower as a string
# awx_license_type: "open" for AWX; else presume Tower # awx_license_type: "open" for AWX; else presume Tower
# gpg_pubkey: the GPG public key to use for validation, when enabled
- hosts: localhost - hosts: localhost
gather_facts: false gather_facts: false
@@ -154,26 +153,6 @@
- update_insights - update_insights
- update_archive - update_archive
- hosts: localhost
gather_facts: false
connection: local
name: Perform project signature/checksum verification
tasks:
- name: Verify project content using GPG signature
verify_project:
project_path: "{{ project_path | quote }}"
validation_type: gpg
gpg_pubkey: "{{ gpg_pubkey }}"
tags:
- validation_gpg_public_key
- name: Verify project content against checksum manifest
verify_project:
project_path: "{{ project_path | quote }}"
validation_type: checksum_manifest
tags:
- validation_checksum_manifest
- hosts: localhost - hosts: localhost
gather_facts: false gather_facts: false
connection: local connection: local

Some files were not shown because too many files have changed in this diff Show More