Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Corey
bea8b1a754 Adds an Instance Group component that renders IGs as a PF Label 2022-10-26 13:36:14 -04:00
504 changed files with 15450 additions and 17591 deletions

View File

@@ -53,16 +53,6 @@ https://github.com/ansible/awx/#get-involved \
Thank you once again for this and your interest in AWX! Thank you once again for this and your interest in AWX!
### Red Hat Support Team
- Hi! \
\
It appears that you are using an RPM build for RHEL. Please reach out to the Red Hat support team and submit a ticket. \
\
Here is the link to do so: \
\
https://access.redhat.com/support \
\
Thank you for your submission and for supporting AWX!
## Common ## Common

View File

@@ -2,7 +2,6 @@
name: CI name: CI
env: env:
BRANCH: ${{ github.base_ref || 'devel' }} BRANCH: ${{ github.base_ref || 'devel' }}
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
pull_request: pull_request:
jobs: jobs:
@@ -28,9 +27,6 @@ jobs:
- name: awx-collection - name: awx-collection
command: /start_tests.sh test_collection_all command: /start_tests.sh test_collection_all
label: Run Collection Tests label: Run Collection Tests
- name: awx-collection-sanity
command: /start_tests.sh test_collection_sanity
label: Run Ansible core Collection Sanity tests
- name: api-schema - name: api-schema
label: Check API Schema label: Check API Schema
command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }} command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }}

View File

@@ -1,7 +1,5 @@
--- ---
name: Build/Push Development Images name: Build/Push Development Images
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
push: push:
branches: branches:

View File

@@ -1,8 +1,5 @@
--- ---
name: E2E Tests name: E2E Tests
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
pull_request_target: pull_request_target:
types: [labeled] types: [labeled]

View File

@@ -1,26 +0,0 @@
---
name: Feature branch deletion cleanup
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
delete:
branches:
- feature_**
jobs:
push:
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Delete API Schema
env:
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
AWS_REGION: 'us-east-1'
run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -13,13 +13,21 @@ jobs:
packages: write packages: write
contents: read contents: read
steps: steps:
- name: Check for each of the lines - name: Write PR body to a file
env:
PR_BODY: ${{ github.event.pull_request.body }}
run: | run: |
echo $PR_BODY | grep "Bug, Docs Fix or other nominal change" > Z cat >> pr.body << __SOME_RANDOM_PR_EOF__
echo $PR_BODY | grep "New or Enhanced Feature" > Y ${{ github.event.pull_request.body }}
echo $PR_BODY | grep "Breaking Change" > X __SOME_RANDOM_PR_EOF__
- name: Display the received body for troubleshooting
run: cat pr.body
# We want to write these out individually just incase the options were joined on a single line
- name: Check for each of the lines
run: |
grep "Bug, Docs Fix or other nominal change" pr.body > Z
grep "New or Enhanced Feature" pr.body > Y
grep "Breaking Change" pr.body > X
exit 0 exit 0
# We exit 0 and set the shell to prevent the returns from the greps from failing this step # We exit 0 and set the shell to prevent the returns from the greps from failing this step
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference # See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference

View File

@@ -1,9 +1,5 @@
--- ---
name: Promote Release name: Promote Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
release: release:
types: [published] types: [published]

View File

@@ -1,9 +1,5 @@
--- ---
name: Stage Release name: Stage Release
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:

View File

@@ -1,15 +1,10 @@
--- ---
name: Upload API Schema name: Upload API Schema
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
push: push:
branches: branches:
- devel - devel
- release_** - release_**
- feature_**
jobs: jobs:
push: push:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -12,7 +12,7 @@ recursive-include awx/plugins *.ps1
recursive-include requirements *.txt recursive-include requirements *.txt
recursive-include requirements *.yml recursive-include requirements *.yml
recursive-include config * recursive-include config *
recursive-include licenses * recursive-include docs/licenses *
recursive-exclude awx devonly.py* recursive-exclude awx devonly.py*
recursive-exclude awx/api/tests * recursive-exclude awx/api/tests *
recursive-exclude awx/main/tests * recursive-exclude awx/main/tests *

View File

@@ -6,9 +6,7 @@ CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage MANAGEMENT_COMMAND ?= awx-manage
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py) VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
# ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
# NOTE: This defaults the container image version to the branch that's active # NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH) COMPOSE_TAG ?= $(GIT_BRANCH)
@@ -36,7 +34,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
# These should be upgraded in the AWX and Ansible venv before attempting # These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements # to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4 VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 setuptools_scm[toml]==6.4.2 wheel==0.36.2
NAME ?= awx NAME ?= awx
@@ -87,7 +85,6 @@ clean: clean-ui clean-api clean-awxkit clean-dist
clean-api: clean-api:
rm -rf build $(NAME)-$(VERSION) *.egg-info rm -rf build $(NAME)-$(VERSION) *.egg-info
rm -rf .tox
find . -type f -regex ".*\.py[co]$$" -delete find . -type f -regex ".*\.py[co]$$" -delete
find . -type d -name "__pycache__" -delete find . -type d -name "__pycache__" -delete
rm -f awx/awx_test.sqlite3* rm -f awx/awx_test.sqlite3*
@@ -184,7 +181,7 @@ collectstatic:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:* DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
@@ -302,8 +299,7 @@ test_collection:
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi && \ fi && \
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi pip install ansible-core && \
ansible --version
py.test $(COLLECTION_TEST_DIRS) -v py.test $(COLLECTION_TEST_DIRS) -v
# The python path needs to be modified so that the tests can find Ansible within the container # The python path needs to be modified so that the tests can find Ansible within the container
# First we will use anything expility set as PYTHONPATH # First we will use anything expility set as PYTHONPATH
@@ -333,13 +329,8 @@ install_collection: build_collection
rm -rf $(COLLECTION_INSTALL) rm -rf $(COLLECTION_INSTALL)
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz
test_collection_sanity: test_collection_sanity: install_collection
rm -rf awx_collection_build/ cd $(COLLECTION_INSTALL) && ansible-test sanity
rm -rf $(COLLECTION_INSTALL)
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
ansible --version
COLLECTION_VERSION=1.0.0 make install_collection
cd $(COLLECTION_INSTALL) && ansible-test sanity --exclude=plugins/modules/export.py
test_collection_integration: install_collection test_collection_integration: install_collection
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET) cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
@@ -386,8 +377,6 @@ clean-ui:
rm -rf awx/ui/build rm -rf awx/ui/build
rm -rf awx/ui/src/locales/_build rm -rf awx/ui/src/locales/_build
rm -rf $(UI_BUILD_FLAG_FILE) rm -rf $(UI_BUILD_FLAG_FILE)
# the collectstatic command doesn't like it if this dir doesn't exist.
mkdir -p awx/ui/build/static
awx/ui/node_modules: awx/ui/node_modules:
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci
@@ -397,18 +386,20 @@ $(UI_BUILD_FLAG_FILE):
$(PYTHON) tools/scripts/compilemessages.py $(PYTHON) tools/scripts/compilemessages.py
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings $(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
$(NPM_BIN) --prefix awx/ui --loglevel warn run build $(NPM_BIN) --prefix awx/ui --loglevel warn run build
mkdir -p awx/public/static/css
mkdir -p awx/public/static/js
mkdir -p awx/public/static/media
cp -r awx/ui/build/static/css/* awx/public/static/css
cp -r awx/ui/build/static/js/* awx/public/static/js
cp -r awx/ui/build/static/media/* awx/public/static/media
touch $@ touch $@
ui-release: $(UI_BUILD_FLAG_FILE) ui-release: $(UI_BUILD_FLAG_FILE)
ui-devel: awx/ui/node_modules ui-devel: awx/ui/node_modules
@$(MAKE) -B $(UI_BUILD_FLAG_FILE) @$(MAKE) -B $(UI_BUILD_FLAG_FILE)
mkdir -p /var/lib/awx/public/static/css
mkdir -p /var/lib/awx/public/static/js
mkdir -p /var/lib/awx/public/static/media
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
ui-devel-instrumented: awx/ui/node_modules ui-devel-instrumented: awx/ui/node_modules
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented $(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
@@ -460,9 +451,8 @@ awx/projects:
COMPOSE_UP_OPTS ?= COMPOSE_UP_OPTS ?=
COMPOSE_OPTS ?= COMPOSE_OPTS ?=
CONTROL_PLANE_NODE_COUNT ?= 1 CONTROL_PLANE_NODE_COUNT ?= 1
EXECUTION_NODE_COUNT ?= 0 EXECUTION_NODE_COUNT ?= 2
MINIKUBE_CONTAINER_GROUP ?= false MINIKUBE_CONTAINER_GROUP ?= false
MINIKUBE_SETUP ?= false # if false, run minikube separately
EXTRA_SOURCES_ANSIBLE_OPTS ?= EXTRA_SOURCES_ANSIBLE_OPTS ?=
ifneq ($(ADMIN_PASSWORD),) ifneq ($(ADMIN_PASSWORD),)
@@ -471,7 +461,7 @@ endif
docker-compose-sources: .git/hooks/pre-commit docker-compose-sources: .git/hooks/pre-commit
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\ @if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \ ansible-playbook -i tools/docker-compose/inventory tools/docker-compose-minikube/deploy.yml; \
fi; fi;
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \ ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
@@ -601,12 +591,13 @@ pot: $(UI_BUILD_FLAG_FILE)
po: $(UI_BUILD_FLAG_FILE) po: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean $(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
LANG = "en_us"
## generate API django .pot .po ## generate API django .pot .po
messages: messages:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(PYTHON) manage.py makemessages -l en_us --keep-pot $(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
print-%: print-%:
@echo $($*) @echo $($*)

View File

@@ -113,7 +113,7 @@ from awx.main.utils import (
) )
from awx.main.utils.filters import SmartFilter from awx.main.utils.filters import SmartFilter
from awx.main.utils.named_url_graph import reset_counters from awx.main.utils.named_url_graph import reset_counters
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
from awx.main.redact import UriCleaner, REPLACE_STR from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise from awx.main.validators import vars_validate_or_raise
@@ -2221,15 +2221,6 @@ class InventorySourceUpdateSerializer(InventorySourceSerializer):
class Meta: class Meta:
fields = ('can_update',) fields = ('can_update',)
def validate(self, attrs):
project = self.instance.source_project
if project:
failed_reason = project.get_reason_if_failed()
if failed_reason:
raise serializers.ValidationError(failed_reason)
return super(InventorySourceUpdateSerializer, self).validate(attrs)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer): class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
@@ -4281,10 +4272,17 @@ class JobLaunchSerializer(BaseSerializer):
# Basic validation - cannot run a playbook without a playbook # Basic validation - cannot run a playbook without a playbook
if not template.project: if not template.project:
errors['project'] = _("A project is required to run a job.") errors['project'] = _("A project is required to run a job.")
else: elif template.project.status in ('error', 'failed'):
failure_reason = template.project.get_reason_if_failed() errors['playbook'] = _("Missing a revision to run due to failed project update.")
if failure_reason:
errors['playbook'] = failure_reason latest_update = template.project.project_updates.last()
if latest_update is not None and latest_update.failed:
failed_validation_tasks = latest_update.project_update_events.filter(
event='runner_on_failed',
play="Perform project signature/checksum verification",
)
if failed_validation_tasks:
errors['playbook'] = _("Last project update failed due to signature validation failure.")
# cannot run a playbook without an inventory # cannot run a playbook without an inventory
if template.inventory and template.inventory.pending_deletion is True: if template.inventory and template.inventory.pending_deletion is True:
@@ -4954,7 +4952,7 @@ class InstanceSerializer(BaseSerializer):
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk}) res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk}) res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type == 'execution': if obj.node_type != 'hop':
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk}) res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
return res return res
@@ -5040,10 +5038,12 @@ class InstanceHealthCheckSerializer(BaseSerializer):
class InstanceGroupSerializer(BaseSerializer): class InstanceGroupSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete'] show_capabilities = ['edit', 'delete']
capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField() consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField() percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField() jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance group'), read_only=True
)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
instances = serializers.SerializerMethodField() instances = serializers.SerializerMethodField()
is_container_group = serializers.BooleanField( is_container_group = serializers.BooleanField(
@@ -5069,22 +5069,6 @@ class InstanceGroupSerializer(BaseSerializer):
label=_('Policy Instance Minimum'), label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."), help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
) )
max_concurrent_jobs = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Concurrent Jobs'),
help_text=_("Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced."),
)
max_forks = serializers.IntegerField(
default=0,
min_value=0,
required=False,
initial=0,
label=_('Max Forks'),
help_text=_("Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced."),
)
policy_instance_list = serializers.ListField( policy_instance_list = serializers.ListField(
child=serializers.CharField(), child=serializers.CharField(),
required=False, required=False,
@@ -5106,8 +5090,6 @@ class InstanceGroupSerializer(BaseSerializer):
"consumed_capacity", "consumed_capacity",
"percent_capacity_remaining", "percent_capacity_remaining",
"jobs_running", "jobs_running",
"max_concurrent_jobs",
"max_forks",
"jobs_total", "jobs_total",
"instances", "instances",
"is_container_group", "is_container_group",
@@ -5189,39 +5171,28 @@ class InstanceGroupSerializer(BaseSerializer):
# Store capacity values (globally computed) in the context # Store capacity values (globally computed) in the context
if 'task_manager_igs' not in self.context: if 'task_manager_igs' not in self.context:
instance_groups_queryset = None instance_groups_queryset = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView: if self.parent: # Is ListView:
instance_groups_queryset = self.parent.instance instance_groups_queryset = self.parent.instance
tm_models = TaskManagerModels.init_with_consumed_capacity( instances = TaskManagerInstances(jobs_qs)
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'], instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
instance_groups_queryset=instance_groups_queryset,
)
self.context['task_manager_igs'] = tm_models.instance_groups self.context['task_manager_igs'] = instance_groups
return self.context['task_manager_igs'] return self.context['task_manager_igs']
def get_consumed_capacity(self, obj): def get_consumed_capacity(self, obj):
ig_mgr = self.get_ig_mgr() ig_mgr = self.get_ig_mgr()
return ig_mgr.get_consumed_capacity(obj.name) return ig_mgr.get_consumed_capacity(obj.name)
def get_capacity(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_capacity(obj.name)
def get_percent_capacity_remaining(self, obj): def get_percent_capacity_remaining(self, obj):
capacity = self.get_capacity(obj) if not obj.capacity:
if not capacity:
return 0.0 return 0.0
consumed_capacity = self.get_consumed_capacity(obj) ig_mgr = self.get_ig_mgr()
return float("{0:.2f}".format(((float(capacity) - float(consumed_capacity)) / (float(capacity))) * 100)) return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
def get_instances(self, obj): def get_instances(self, obj):
ig_mgr = self.get_ig_mgr() return obj.instances.count()
return len(ig_mgr.get_instances(obj.name))
def get_jobs_running(self, obj):
ig_mgr = self.get_ig_mgr()
return ig_mgr.get_jobs_running(obj.name)
class ActivityStreamSerializer(BaseSerializer): class ActivityStreamSerializer(BaseSerializer):

View File

@@ -1,5 +1,5 @@
Launch a Job Template: Launch a Job Template:
{% ifmeth GET %}
Make a GET request to this resource to determine if the job_template can be Make a GET request to this resource to determine if the job_template can be
launched and whether any passwords are required to launch the job_template. launched and whether any passwords are required to launch the job_template.
The response will include the following fields: The response will include the following fields:
@@ -29,8 +29,8 @@ The response will include the following fields:
* `inventory_needed_to_start`: Flag indicating the presence of an inventory * `inventory_needed_to_start`: Flag indicating the presence of an inventory
associated with the job template. If not then one should be supplied when associated with the job template. If not then one should be supplied when
launching the job (boolean, read-only) launching the job (boolean, read-only)
{% endifmeth %}
{% ifmeth POST %}Make a POST request to this resource to launch the job_template. If any Make a POST request to this resource to launch the job_template. If any
passwords, inventory, or extra variables (extra_vars) are required, they must passwords, inventory, or extra variables (extra_vars) are required, they must
be passed via POST data, with extra_vars given as a YAML or JSON string and be passed via POST data, with extra_vars given as a YAML or JSON string and
escaped parentheses. If the `inventory_needed_to_start` is `True` then the escaped parentheses. If the `inventory_needed_to_start` is `True` then the
@@ -41,4 +41,3 @@ are not provided, a 400 status code will be returned. If the job cannot be
launched, a 405 status code will be returned. If the provided credential or launched, a 405 status code will be returned. If the provided credential or
inventory are not allowed to be used by the user, then a 403 status code will inventory are not allowed to be used by the user, then a 403 status code will
be returned. be returned.
{% endifmeth %}

View File

@@ -5,7 +5,6 @@
import dateutil import dateutil
import functools import functools
import html import html
import itertools
import logging import logging
import re import re
import requests import requests
@@ -21,10 +20,9 @@ from urllib3.exceptions import ConnectTimeoutError
# Django # Django
from django.conf import settings from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum, Count from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.db.models.functions import Trunc
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
@@ -49,6 +47,9 @@ from rest_framework import status
from rest_framework_yaml.parsers import YAMLParser from rest_framework_yaml.parsers import YAMLParser
from rest_framework_yaml.renderers import YAMLRenderer from rest_framework_yaml.renderers import YAMLRenderer
# QSStats
import qsstats
# ANSIConv # ANSIConv
import ansiconv import ansiconv
@@ -282,50 +283,30 @@ class DashboardJobsGraphView(APIView):
success_query = success_query.filter(instance_of=models.ProjectUpdate) success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
end = now() success_qss = qsstats.QuerySetStats(success_query, 'finished')
interval = 'day' failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
start_date = now()
if period == 'month': if period == 'month':
start = end - dateutil.relativedelta.relativedelta(months=1) end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
interval = 'days'
elif period == 'two_weeks': elif period == 'two_weeks':
start = end - dateutil.relativedelta.relativedelta(weeks=2) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
interval = 'days'
elif period == 'week': elif period == 'week':
start = end - dateutil.relativedelta.relativedelta(weeks=1) end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
interval = 'days'
elif period == 'day': elif period == 'day':
start = end - dateutil.relativedelta.relativedelta(days=1) end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
interval = 'hour' interval = 'hours'
else: else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": []}} dashboard_data = {"jobs": {"successful": [], "failed": []}}
for element in success_qss.time_series(end_date, start_date, interval=interval):
succ_list = dashboard_data['jobs']['successful'] dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
fail_list = dashboard_data['jobs']['failed'] for element in failed_qss.time_series(end_date, start_date, interval=interval):
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
qs_s = (
success_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_s = {item['d']: item['agg'] for item in qs_s}
qs_f = (
failed_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_f = {item['d']: item['agg'] for item in qs_f}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count():
date = start_date + dateutil.relativedelta.relativedelta(days=d)
if date > end:
break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
return Response(dashboard_data) return Response(dashboard_data)
@@ -344,13 +325,6 @@ class InstanceDetail(RetrieveUpdateAPIView):
model = models.Instance model = models.Instance
serializer_class = serializers.InstanceSerializer serializer_class = serializers.InstanceSerializer
def update_raw_data(self, data):
# these fields are only valid on creation of an instance, so they unwanted on detail view
data.pop('listener_port', None)
data.pop('node_type', None)
data.pop('hostname', None)
return super(InstanceDetail, self).update_raw_data(data)
def update(self, request, *args, **kwargs): def update(self, request, *args, **kwargs):
r = super(InstanceDetail, self).update(request, *args, **kwargs) r = super(InstanceDetail, self).update(request, *args, **kwargs)
if status.is_success(r.status_code): if status.is_success(r.status_code):
@@ -418,8 +392,8 @@ class InstanceHealthCheck(GenericAPIView):
permission_classes = (IsSystemAdminOrAuditor,) permission_classes = (IsSystemAdminOrAuditor,)
def get_queryset(self): def get_queryset(self):
return super().get_queryset().filter(node_type='execution')
# FIXME: For now, we don't have a good way of checking the health of a hop node. # FIXME: For now, we don't have a good way of checking the health of a hop node.
return super().get_queryset().exclude(node_type='hop')
def get(self, request, *args, **kwargs): def get(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
@@ -439,10 +413,9 @@ class InstanceHealthCheck(GenericAPIView):
execution_node_health_check.apply_async([obj.hostname]) execution_node_health_check.apply_async([obj.hostname])
else: else:
return Response( from awx.main.tasks.system import cluster_node_health_check
{"error": f"Cannot run a health check on instances of type {obj.node_type}. Health checks can only be run on execution nodes."},
status=status.HTTP_400_BAD_REQUEST, cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
)
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK) return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK)
@@ -2247,8 +2220,6 @@ class InventorySourceUpdateView(RetrieveAPIView):
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
obj = self.get_object() obj = self.get_object()
serializer = self.get_serializer(instance=obj, data=request.data)
serializer.is_valid(raise_exception=True)
if obj.can_update: if obj.can_update:
update = obj.update() update = obj.update()
if not update: if not update:

View File

@@ -16,7 +16,7 @@ from rest_framework import status
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.utils import get_object_or_400 from awx.main.utils import get_object_or_400
from awx.main.models.ha import Instance, InstanceGroup, schedule_policy_task from awx.main.models.ha import Instance, InstanceGroup
from awx.main.models.organization import Team from awx.main.models.organization import Team
from awx.main.models.projects import Project from awx.main.models.projects import Project
from awx.main.models.inventory import Inventory from awx.main.models.inventory import Inventory
@@ -107,11 +107,6 @@ class InstanceGroupMembershipMixin(object):
if inst_name in ig_obj.policy_instance_list: if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name)) ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list']) ig_obj.save(update_fields=['policy_instance_list'])
# sometimes removing an instance has a non-obvious consequence
# this is almost always true if policy_instance_percentage or _minimum is non-zero
# after removing a single instance, the other memberships need to be re-balanced
schedule_policy_task()
return response return response

View File

@@ -6238,4 +6238,3 @@ msgstr "%s se está actualizando."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Esta página se actualizará cuando se complete." msgstr "Esta página se actualizará cuando se complete."

View File

@@ -721,7 +721,7 @@ msgstr "DTSTART valide obligatoire dans rrule. La valeur doit commencer par : DT
#: awx/api/serializers.py:4657 #: awx/api/serializers.py:4657
msgid "" msgid ""
"DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ." "DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ."
msgstr "DTSTART ne peut correspondre à une date-heure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ." msgstr "DTSTART ne peut correspondre à une DateHeure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
#: awx/api/serializers.py:4659 #: awx/api/serializers.py:4659
msgid "Multiple DTSTART is not supported." msgid "Multiple DTSTART is not supported."
@@ -6240,4 +6240,3 @@ msgstr "%s est en cours de mise à niveau."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Cette page sera rafraîchie une fois terminée." msgstr "Cette page sera rafraîchie une fois terminée."

View File

@@ -6238,4 +6238,3 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
#: awx/ui/urls.py:24 #: awx/ui/urls.py:24
msgid "This page will refresh when complete." msgid "This page will refresh when complete."
msgstr "Deze pagina wordt vernieuwd als hij klaar is." msgstr "Deze pagina wordt vernieuwd als hij klaar is."

View File

@@ -2697,66 +2697,46 @@ class ActivityStreamAccess(BaseAccess):
# 'job_template', 'job', 'project', 'project_update', 'workflow_job', # 'job_template', 'job', 'project', 'project_update', 'workflow_job',
# 'inventory_source', 'workflow_job_template' # 'inventory_source', 'workflow_job_template'
q = Q(user=self.user) inventory_set = Inventory.accessible_objects(self.user, 'read_role')
inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role') credential_set = Credential.accessible_objects(self.user, 'read_role')
if inventory_set:
q |= (
Q(ad_hoc_command__inventory__in=inventory_set)
| Q(inventory__in=inventory_set)
| Q(host__inventory__in=inventory_set)
| Q(group__inventory__in=inventory_set)
| Q(inventory_source__inventory__in=inventory_set)
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
)
credential_set = Credential.accessible_pk_qs(self.user, 'read_role')
if credential_set:
q |= Q(credential__in=credential_set)
auditing_orgs = ( auditing_orgs = (
(Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role')) (Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role'))
.distinct() .distinct()
.values_list('id', flat=True) .values_list('id', flat=True)
) )
if auditing_orgs: project_set = Project.accessible_objects(self.user, 'read_role')
q |= ( jt_set = JobTemplate.accessible_objects(self.user, 'read_role')
Q(user__in=auditing_orgs.values('member_role__members')) team_set = Team.accessible_objects(self.user, 'read_role')
| Q(organization__in=auditing_orgs) wfjt_set = WorkflowJobTemplate.accessible_objects(self.user, 'read_role')
| Q(notification_template__organization__in=auditing_orgs)
| Q(notification__notification_template__organization__in=auditing_orgs)
| Q(label__organization__in=auditing_orgs)
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
)
project_set = Project.accessible_pk_qs(self.user, 'read_role')
if project_set:
q |= Q(project__in=project_set) | Q(project_update__project__in=project_set)
jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role')
if jt_set:
q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set)
wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role')
if wfjt_set:
q |= (
Q(workflow_job_template__in=wfjt_set)
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
| Q(workflow_job__workflow_job_template__in=wfjt_set)
)
team_set = Team.accessible_pk_qs(self.user, 'read_role')
if team_set:
q |= Q(team__in=team_set)
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset() app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
if app_set:
q |= Q(o_auth2_application__in=app_set)
token_set = OAuth2TokenAccess(self.user).filtered_queryset() token_set = OAuth2TokenAccess(self.user).filtered_queryset()
if token_set:
q |= Q(o_auth2_access_token__in=token_set)
return qs.filter(q).distinct() return qs.filter(
Q(ad_hoc_command__inventory__in=inventory_set)
| Q(o_auth2_application__in=app_set)
| Q(o_auth2_access_token__in=token_set)
| Q(user__in=auditing_orgs.values('member_role__members'))
| Q(user=self.user)
| Q(organization__in=auditing_orgs)
| Q(inventory__in=inventory_set)
| Q(host__inventory__in=inventory_set)
| Q(group__inventory__in=inventory_set)
| Q(inventory_source__inventory__in=inventory_set)
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
| Q(credential__in=credential_set)
| Q(team__in=team_set)
| Q(project__in=project_set)
| Q(project_update__project__in=project_set)
| Q(job_template__in=jt_set)
| Q(job__job_template__in=jt_set)
| Q(workflow_job_template__in=wfjt_set)
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
| Q(workflow_job__workflow_job_template__in=wfjt_set)
| Q(notification_template__organization__in=auditing_orgs)
| Q(notification__notification_template__organization__in=auditing_orgs)
| Q(label__organization__in=auditing_orgs)
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
).distinct()
def can_add(self, data): def can_add(self, data):
return False return False

View File

@@ -1,8 +1,8 @@
import datetime import datetime
import asyncio import asyncio
import logging import logging
import aioredis
import redis import redis
import redis.asyncio
import re import re
from prometheus_client import ( from prometheus_client import (
@@ -82,7 +82,7 @@ class BroadcastWebsocketStatsManager:
async def run_loop(self): async def run_loop(self):
try: try:
redis_conn = await redis.asyncio.Redis.from_url(settings.BROKER_URL) redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
while True: while True:
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values()) stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
await redis_conn.set(self._redis_key, stats_data_str) await redis_conn.set(self._redis_key, stats_data_str)
@@ -122,8 +122,8 @@ class BroadcastWebsocketStats:
'Number of messages received, to be forwarded, by the broadcast websocket system', 'Number of messages received, to be forwarded, by the broadcast websocket system',
registry=self._registry, registry=self._registry,
) )
self._messages_received_current_conn = Gauge( self._messages_received = Gauge(
f'awx_{self.remote_name}_messages_received_currrent_conn', f'awx_{self.remote_name}_messages_received',
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection', 'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
registry=self._registry, registry=self._registry,
) )
@@ -144,13 +144,13 @@ class BroadcastWebsocketStats:
def record_message_received(self): def record_message_received(self):
self._internal_messages_received_per_minute.record() self._internal_messages_received_per_minute.record()
self._messages_received_current_conn.inc() self._messages_received.inc()
self._messages_received_total.inc() self._messages_received_total.inc()
def record_connection_established(self): def record_connection_established(self):
self._connection.state('connected') self._connection.state('connected')
self._connection_start.set_to_current_time() self._connection_start.set_to_current_time()
self._messages_received_current_conn.set(0) self._messages_received.set(0)
def record_connection_lost(self): def record_connection_lost(self):
self._connection.state('disconnected') self._connection.state('disconnected')

View File

@@ -16,7 +16,7 @@ from awx.conf.license import get_license
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
from awx.main import models from awx.main import models
from awx.main.analytics import register from awx.main.analytics import register
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerInstances
""" """
This module is used to define metrics collected by awx.main.analytics.gather() This module is used to define metrics collected by awx.main.analytics.gather()
@@ -237,8 +237,9 @@ def projects_by_scm_type(since, **kwargs):
def instance_info(since, include_hostnames=False, **kwargs): def instance_info(since, include_hostnames=False, **kwargs):
info = {} info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance # Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled']) active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
for tm_instance in tm_models.instances.instances_by_hostname.values(): tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
for tm_instance in tm_instances.instances_by_hostname.values():
instance = tm_instance.obj instance = tm_instance.obj
instance_info = { instance_info = {
'uuid': instance.uuid, 'uuid': instance.uuid,
@@ -250,7 +251,6 @@ def instance_info(since, include_hostnames=False, **kwargs):
'enabled': instance.enabled, 'enabled': instance.enabled,
'consumed_capacity': tm_instance.consumed_capacity, 'consumed_capacity': tm_instance.consumed_capacity,
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity, 'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
'node_type': instance.node_type,
} }
if include_hostnames is True: if include_hostnames is True:
instance_info['hostname'] = instance.hostname instance_info['hostname'] = instance.hostname

View File

@@ -57,7 +57,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -85,7 +84,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -113,7 +111,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -123,7 +120,6 @@ def metrics():
[ [
'hostname', 'hostname',
'instance_uuid', 'instance_uuid',
'node_type',
], ],
registry=REGISTRY, registry=REGISTRY,
) )
@@ -184,13 +180,12 @@ def metrics():
instance_data = instance_info(None, include_hostnames=True) instance_data = instance_info(None, include_hostnames=True)
for uuid, info in instance_data.items(): for uuid, info in instance_data.items():
hostname = info['hostname'] hostname = info['hostname']
node_type = info['node_type'] INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['capacity'])
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu']) INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory']) INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['consumed_capacity']) INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['remaining_capacity']) INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).info( INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
{ {
'enabled': str(instance_data[uuid]['enabled']), 'enabled': str(instance_data[uuid]['enabled']),
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']), 'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),

View File

@@ -5,9 +5,7 @@ import logging
from django.conf import settings from django.conf import settings
from django.apps import apps from django.apps import apps
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing
root_key = 'awx_metrics' root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics') logger = logging.getLogger('awx.main.analytics')
@@ -165,7 +163,7 @@ class Metrics:
Instance = apps.get_model('main', 'Instance') Instance = apps.get_model('main', 'Instance')
if instance_name: if instance_name:
self.instance_name = instance_name self.instance_name = instance_name
elif is_testing(): elif settings.IS_TESTING():
self.instance_name = "awx_testing" self.instance_name = "awx_testing"
else: else:
self.instance_name = Instance.objects.my_hostname() self.instance_name = Instance.objects.my_hostname()

View File

@@ -569,7 +569,7 @@ register(
register( register(
'LOG_AGGREGATOR_LOGGERS', 'LOG_AGGREGATOR_LOGGERS',
field_class=fields.StringListField, field_class=fields.StringListField,
default=['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket'], default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
label=_('Loggers Sending Data to Log Aggregator Form'), label=_('Loggers Sending Data to Log Aggregator Form'),
help_text=_( help_text=_(
'List of loggers that will send HTTP logs to the collector, these can ' 'List of loggers that will send HTTP logs to the collector, these can '
@@ -577,8 +577,7 @@ register(
'awx - service logs\n' 'awx - service logs\n'
'activity_stream - activity stream records\n' 'activity_stream - activity stream records\n'
'job_events - callback data from Ansible job events\n' 'job_events - callback data from Ansible job events\n'
'system_tracking - facts gathered from scan jobs\n' 'system_tracking - facts gathered from scan jobs.'
'broadcast_websocket - errors pertaining to websockets broadcast metrics\n'
), ),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',

View File

@@ -9,16 +9,10 @@ aim_inputs = {
'fields': [ 'fields': [
{ {
'id': 'url', 'id': 'url',
'label': _('CyberArk CCP URL'), 'label': _('CyberArk AIM URL'),
'type': 'string', 'type': 'string',
'format': 'url', 'format': 'url',
}, },
{
'id': 'webservice_id',
'label': _('Web Service ID'),
'type': 'string',
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
},
{ {
'id': 'app_id', 'id': 'app_id',
'label': _('Application ID'), 'label': _('Application ID'),
@@ -70,13 +64,10 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None) client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None) client_key = kwargs.get('client_key', None)
verify = kwargs['verify'] verify = kwargs['verify']
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id'] app_id = kwargs['app_id']
object_query = kwargs['object_query'] object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format'] object_query_format = kwargs['object_query_format']
reason = kwargs.get('reason', None) reason = kwargs.get('reason', None)
if webservice_id == '':
webservice_id = 'AIMWebService'
query_params = { query_params = {
'AppId': app_id, 'AppId': app_id,
@@ -87,7 +78,7 @@ def aim_backend(**kwargs):
query_params['reason'] = reason query_params['reason'] = reason
request_qs = '?' + urlencode(query_params, quote_via=quote) request_qs = '?' + urlencode(query_params, quote_via=quote)
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts'])) request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
with CertFiles(client_cert, client_key) as cert: with CertFiles(client_cert, client_key) as cert:
res = requests.get( res = requests.get(
@@ -101,4 +92,4 @@ def aim_backend(**kwargs):
return res.json()['Content'] return res.json()['Content']
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend) aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)

View File

@@ -1,5 +1,6 @@
from .plugin import CredentialPlugin, CertFiles, raise_for_status from .plugin import CredentialPlugin, CertFiles, raise_for_status
import base64
from urllib.parse import urljoin, quote from urllib.parse import urljoin, quote
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
@@ -60,7 +61,7 @@ def conjur_backend(**kwargs):
cacert = kwargs.get('cacert', None) cacert = kwargs.get('cacert', None)
auth_kwargs = { auth_kwargs = {
'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'}, 'headers': {'Content-Type': 'text/plain'},
'data': api_key, 'data': api_key,
'allow_redirects': False, 'allow_redirects': False,
} }
@@ -68,9 +69,9 @@ def conjur_backend(**kwargs):
with CertFiles(cacert) as cert: with CertFiles(cacert) as cert:
# https://www.conjur.org/api.html#authentication-authenticate-post # https://www.conjur.org/api.html#authentication-authenticate-post
auth_kwargs['verify'] = cert auth_kwargs['verify'] = cert
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs) resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp) raise_for_status(resp)
token = resp.content.decode('utf-8') token = base64.b64encode(resp.content).decode('utf-8')
lookup_kwargs = { lookup_kwargs = {
'headers': {'Authorization': 'Token token="{}"'.format(token)}, 'headers': {'Authorization': 'Token token="{}"'.format(token)},
@@ -78,10 +79,9 @@ def conjur_backend(**kwargs):
} }
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get # https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path])) path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
if version: if version:
ver = "version={}".format(version) path = '?'.join([path, version])
path = '?'.join([path, ver])
with CertFiles(cacert) as cert: with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert lookup_kwargs['verify'] = cert
@@ -90,4 +90,4 @@ def conjur_backend(**kwargs):
return resp.text return resp.text
conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend) conjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)

View File

@@ -466,7 +466,7 @@ class AutoscalePool(WorkerPool):
task_name = 'unknown' task_name = 'unknown'
if isinstance(body, dict): if isinstance(body, dict):
task_name = body.get('task') task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}') logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
return super(AutoscalePool, self).write(preferred_queue, body) return super(AutoscalePool, self).write(preferred_queue, body)
except Exception: except Exception:
for conn in connections.all(): for conn in connections.all():

View File

@@ -1,13 +1,14 @@
import inspect import inspect
import logging import logging
import sys
import json import json
import time import time
from uuid import uuid4 from uuid import uuid4
from django.conf import settings
from django_guid import get_guid from django_guid import get_guid
from . import pg_bus_conn from . import pg_bus_conn
from awx.main.utils import is_testing
logger = logging.getLogger('awx.main.dispatch') logger = logging.getLogger('awx.main.dispatch')
@@ -92,7 +93,7 @@ class task:
obj.update(**kw) obj.update(**kw)
if callable(queue): if callable(queue):
queue = queue() queue = queue()
if not is_testing(): if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn: with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj)) conn.notify(queue, json.dumps(obj))
return (obj, queue) return (obj, queue)

View File

@@ -38,14 +38,7 @@ class Command(BaseCommand):
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
RegisterQueue( RegisterQueue(
settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
100,
0,
[],
is_container_group=True,
pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE,
max_forks=settings.DEFAULT_EXECUTION_QUEUE_MAX_FORKS,
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
).register() ).register()
else: else:
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)

View File

@@ -32,14 +32,8 @@ class Command(BaseCommand):
def handle(self, **options): def handle(self, **options):
self.old_key = settings.SECRET_KEY self.old_key = settings.SECRET_KEY
custom_key = os.environ.get("TOWER_SECRET_KEY") custom_key = os.environ.get("TOWER_SECRET_KEY")
if options.get("use_custom_key"): if options.get("use_custom_key") and custom_key:
if custom_key: self.new_key = custom_key
self.new_key = custom_key
else:
print("Use custom key was specified but the env var TOWER_SECRET_KEY was not available")
import sys
sys.exit(1)
else: else:
self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip() self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip()
self._notification_templates() self._notification_templates()

View File

@@ -17,9 +17,7 @@ class InstanceNotFound(Exception):
class RegisterQueue: class RegisterQueue:
def __init__( def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None, max_forks=None, max_concurrent_jobs=None
):
self.instance_not_found_err = None self.instance_not_found_err = None
self.queuename = queuename self.queuename = queuename
self.instance_percent = instance_percent self.instance_percent = instance_percent
@@ -27,8 +25,6 @@ class RegisterQueue:
self.hostname_list = hostname_list self.hostname_list = hostname_list
self.is_container_group = is_container_group self.is_container_group = is_container_group
self.pod_spec_override = pod_spec_override self.pod_spec_override = pod_spec_override
self.max_forks = max_forks
self.max_concurrent_jobs = max_concurrent_jobs
def get_create_update_instance_group(self): def get_create_update_instance_group(self):
created = False created = False
@@ -49,14 +45,6 @@ class RegisterQueue:
ig.pod_spec_override = self.pod_spec_override ig.pod_spec_override = self.pod_spec_override
changed = True changed = True
if self.max_forks and (ig.max_forks != self.max_forks):
ig.max_forks = self.max_forks
changed = True
if self.max_concurrent_jobs and (ig.max_concurrent_jobs != self.max_concurrent_jobs):
ig.max_concurrent_jobs = self.max_concurrent_jobs
changed = True
if changed: if changed:
ig.save() ig.save()

View File

@@ -158,11 +158,7 @@ class InstanceManager(models.Manager):
return (False, instance) return (False, instance)
# Create new instance, and fill in default values # Create new instance, and fill in default values
create_defaults = { create_defaults = {'node_state': Instance.States.INSTALLED, 'capacity': 0}
'node_state': Instance.States.INSTALLED,
'capacity': 0,
'listener_port': 27199,
}
if defaults is not None: if defaults is not None:
create_defaults.update(defaults) create_defaults.update(defaults)
uuid_option = {} uuid_option = {}

View File

@@ -1,14 +1,24 @@
# Generated by Django 3.2.13 on 2022-06-21 21:29 # Generated by Django 3.2.13 on 2022-06-21 21:29
from django.db import migrations from django.db import migrations
import logging
logger = logging.getLogger("awx")
def forwards(apps, schema_editor): def forwards(apps, schema_editor):
InventorySource = apps.get_model('main', 'InventorySource') InventorySource = apps.get_model('main', 'InventorySource')
InventorySource.objects.filter(update_on_project_update=True).update(update_on_launch=True) sources = InventorySource.objects.filter(update_on_project_update=True)
for src in sources:
Project = apps.get_model('main', 'Project') if src.update_on_launch == False:
Project.objects.filter(scm_inventory_sources__update_on_project_update=True).update(scm_update_on_launch=True) src.update_on_launch = True
src.save(update_fields=['update_on_launch'])
logger.info(f"Setting update_on_launch to True for {src}")
proj = src.source_project
if proj and proj.scm_update_on_launch is False:
proj.scm_update_on_launch = True
proj.save(update_fields=['scm_update_on_launch'])
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
class Migration(migrations.Migration): class Migration(migrations.Migration):

View File

@@ -1,23 +0,0 @@
# Generated by Django 3.2.13 on 2022-10-24 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0172_prevent_instance_fallback'),
]
operations = [
migrations.AddField(
model_name='instancegroup',
name='max_concurrent_jobs',
field=models.IntegerField(default=0, help_text='Maximum number of concurrent jobs to run on this group. Zero means no limit.'),
),
migrations.AddField(
model_name='instancegroup',
name='max_forks',
field=models.IntegerField(default=0, help_text='Max forks to execute on this group. Zero means no limit.'),
),
]

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.16 on 2022-12-07 21:11
from django.db import migrations
from awx.main.migrations import _rbac as rbac
from awx.main.migrations import _migration_utils as migration_utils
class Migration(migrations.Migration):
dependencies = [
('main', '0173_instancegroup_max_limits'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(rbac.create_roles),
]

View File

@@ -15,7 +15,6 @@ def aws(cred, env, private_data_dir):
if cred.has_input('security_token'): if cred.has_input('security_token'):
env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='') env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='')
env['AWS_SESSION_TOKEN'] = env['AWS_SECURITY_TOKEN']
def gce(cred, env, private_data_dir): def gce(cred, env, private_data_dir):

View File

@@ -233,12 +233,11 @@ class Instance(HasPolicyEditsMixin, BaseModel):
if not isinstance(vargs.get('grace_period'), int): if not isinstance(vargs.get('grace_period'), int):
vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect
if 'exclude_strings' not in vargs and vargs.get('file_pattern'): if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
active_job_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting')) active_pks = list(
if self.node_type == 'execution': UnifiedJob.objects.filter(
active_job_qs = active_job_qs.filter(execution_node=self.hostname) (models.Q(execution_node=self.hostname) | models.Q(controller_node=self.hostname)) & models.Q(status__in=('running', 'waiting'))
else: ).values_list('pk', flat=True)
active_job_qs = active_job_qs.filter(controller_node=self.hostname) )
active_pks = list(active_job_qs.values_list('pk', flat=True))
if active_pks: if active_pks:
vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks] vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks]
if 'remove_images' in vargs or 'image_prune' in vargs: if 'remove_images' in vargs or 'image_prune' in vargs:
@@ -379,8 +378,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
default='', default='',
) )
) )
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group")) policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
policy_instance_minimum = models.IntegerField(default=0, help_text=_("Static minimum number of Instances to automatically assign to this group")) policy_instance_minimum = models.IntegerField(default=0, help_text=_("Static minimum number of Instances to automatically assign to this group"))
policy_instance_list = JSONBlob( policy_instance_list = JSONBlob(
@@ -394,8 +391,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
@property @property
def capacity(self): def capacity(self):
if self.is_container_group:
return self.max_forks
return sum(inst.capacity for inst in self.instances.all()) return sum(inst.capacity for inst in self.instances.all())
@property @property

View File

@@ -247,19 +247,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
return (number, step) return (number, step)
def get_sliced_hosts(self, host_queryset, slice_number, slice_count): def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
"""
Returns a slice of Hosts given a slice number and total slice count, or
the original queryset if slicing is not requested.
NOTE: If slicing is performed, this will return a List[Host] with the
resulting slice. If slicing is not performed it will return the
original queryset (not evaluating it or forcing it to a list). This
puts the burden on the caller to check the resulting type. This is
non-ideal because it's easy to get wrong, but I think the only way
around it is to force the queryset which has memory implications for
large inventories.
"""
if slice_count > 1 and slice_number > 0: if slice_count > 1 and slice_number > 0:
offset = slice_number - 1 offset = slice_number - 1
host_queryset = host_queryset[offset::slice_count] host_queryset = host_queryset[offset::slice_count]
@@ -567,6 +554,17 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
# Use .job_host_summaries.all() to get jobs affecting this host. # Use .job_host_summaries.all() to get jobs affecting this host.
# Use .job_events.all() to get events affecting this host. # Use .job_events.all() to get events affecting this host.
'''
We don't use timestamp, but we may in the future.
'''
def update_ansible_facts(self, module, facts, timestamp=None):
if module == "ansible":
self.ansible_facts.update(facts)
else:
self.ansible_facts[module] = facts
self.save()
def get_effective_host_name(self): def get_effective_host_name(self):
""" """
Return the name of the host that will be used in actual ansible Return the name of the host that will be used in actual ansible

View File

@@ -15,7 +15,6 @@ from urllib.parse import urljoin
from django.conf import settings from django.conf import settings
from django.core.exceptions import ValidationError from django.core.exceptions import ValidationError
from django.db import models from django.db import models
from django.db.models.query import QuerySet
# from django.core.cache import cache # from django.core.cache import cache
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
@@ -44,7 +43,7 @@ from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
JobNotificationMixin, JobNotificationMixin,
) )
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
@@ -845,35 +844,22 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
def get_notification_friendly_name(self): def get_notification_friendly_name(self):
return "Job" return "Job"
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters): def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
"""Return value is an iterable for the relevant hosts for this job"""
if not self.inventory: if not self.inventory:
return [] return []
host_queryset = self.inventory.hosts.only(*only) host_queryset = self.inventory.hosts.only(*only)
if filters: return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
host_queryset = host_queryset.filter(**filters)
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
if isinstance(host_queryset, QuerySet):
return host_queryset.iterator()
return host_queryset
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True) def start_job_fact_cache(self, destination, modification_times, timeout=None):
def start_job_fact_cache(self, destination, log_data, timeout=None):
self.log_lifecycle("start_job_fact_cache") self.log_lifecycle("start_job_fact_cache")
log_data['job_id'] = self.id
log_data['written_ct'] = 0
os.makedirs(destination, mode=0o700) os.makedirs(destination, mode=0o700)
hosts = self._get_inventory_hosts()
if timeout is None: if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
if timeout > 0: if timeout > 0:
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds` # exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
timeout = now() - datetime.timedelta(seconds=timeout) timeout = now() - datetime.timedelta(seconds=timeout)
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout) hosts = hosts.filter(ansible_facts_modified__gte=timeout)
else:
hosts = self._get_inventory_hosts()
last_filepath_written = None
for host in hosts: for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
@@ -883,38 +869,23 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
with codecs.open(filepath, 'w', encoding='utf-8') as f: with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600) os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f) json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1
last_filepath_written = filepath
except IOError: except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
# make note of the time we wrote the last file so we can check if any file changed later # make note of the time we wrote the file so we can check if it changed later
if last_filepath_written: modification_times[filepath] = os.path.getmtime(filepath)
return os.path.getmtime(last_filepath_written)
return None
@log_excess_runtime( def finish_job_fact_cache(self, destination, modification_times):
logger,
debug_cutoff=0.01,
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True,
)
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
self.log_lifecycle("finish_job_fact_cache") self.log_lifecycle("finish_job_fact_cache")
log_data['job_id'] = self.id
log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0
hosts_to_update = []
for host in self._get_inventory_hosts(): for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
if os.path.exists(filepath): if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run... # If the file changed since we wrote it pre-playbook run...
modified = os.path.getmtime(filepath) modified = os.path.getmtime(filepath)
if (not facts_write_time) or modified > facts_write_time: if modified > modification_times.get(filepath, 0):
with codecs.open(filepath, 'r', encoding='utf-8') as f: with codecs.open(filepath, 'r', encoding='utf-8') as f:
try: try:
ansible_facts = json.load(f) ansible_facts = json.load(f)
@@ -922,7 +893,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
continue continue
host.ansible_facts = ansible_facts host.ansible_facts = ansible_facts
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host) host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
system_tracking_logger.info( system_tracking_logger.info(
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)), 'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict( extra=dict(
@@ -933,21 +904,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
job_id=self.id, job_id=self.id,
), ),
) )
log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else: else:
# if the file goes missing, ansible removed it (likely via clear_facts) # if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {} host.ansible_facts = {}
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name))) system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1 host.save()
if len(hosts_to_update) > 100:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = []
if hosts_to_update:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
class LaunchTimeConfigBase(BaseModel): class LaunchTimeConfigBase(BaseModel):

View File

@@ -471,29 +471,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request)
def get_reason_if_failed(self):
"""
If the project is in a failed or errored state, return a human-readable
error message explaining why. Otherwise return None.
This is used during validation in the serializer and also by
RunProjectUpdate/RunInventoryUpdate.
"""
if self.status not in ('error', 'failed'):
return None
latest_update = self.project_updates.last()
if latest_update is not None and latest_update.failed:
failed_validation_tasks = latest_update.project_update_events.filter(
event='runner_on_failed',
play="Perform project signature/checksum verification",
)
if failed_validation_tasks:
return _("Last project update failed due to signature validation failure.")
return _("Missing a revision to run due to failed project update.")
''' '''
RelatedJobsMixin RelatedJobsMixin
''' '''

View File

@@ -1351,12 +1351,12 @@ class UnifiedJob(
if required in defined_fields and not credential.has_input(required): if required in defined_fields and not credential.has_input(required):
missing_credential_inputs.append(required) missing_credential_inputs.append(required)
if missing_credential_inputs: if missing_credential_inputs:
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format( self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs)) self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
) )
self.save(update_fields=['job_explanation']) self.save(update_fields=['job_explanation'])
return (False, None) return (False, None)
needed = self.get_passwords_needed_to_start() needed = self.get_passwords_needed_to_start()
try: try:

View File

@@ -5,6 +5,9 @@ import json
import logging import logging
import requests import requests
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
from awx.main.notifications.base import AWXBaseEmailBackend from awx.main.notifications.base import AWXBaseEmailBackend
from awx.main.utils import get_awx_http_client_headers from awx.main.utils import get_awx_http_client_headers
from awx.main.notifications.custom_notification_base import CustomNotificationBase from awx.main.notifications.custom_notification_base import CustomNotificationBase
@@ -14,8 +17,6 @@ logger = logging.getLogger('awx.main.notifications.webhook_backend')
class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase): class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
MAX_RETRIES = 5
init_parameters = { init_parameters = {
"url": {"label": "Target URL", "type": "string"}, "url": {"label": "Target URL", "type": "string"},
"http_method": {"label": "HTTP Method", "type": "string", "default": "POST"}, "http_method": {"label": "HTTP Method", "type": "string", "default": "POST"},
@@ -63,67 +64,20 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
if self.http_method.lower() not in ['put', 'post']: if self.http_method.lower() not in ['put', 'post']:
raise ValueError("HTTP method must be either 'POST' or 'PUT'.") raise ValueError("HTTP method must be either 'POST' or 'PUT'.")
chosen_method = getattr(requests, self.http_method.lower(), None) chosen_method = getattr(requests, self.http_method.lower(), None)
for m in messages: for m in messages:
auth = None auth = None
if self.username or self.password: if self.username or self.password:
auth = (self.username, self.password) auth = (self.username, self.password)
r = chosen_method(
# the constructor for EmailMessage - https://docs.djangoproject.com/en/4.1/_modules/django/core/mail/message will turn an empty dictionary to an empty string "{}".format(m.recipients()[0]),
# sometimes an empty dict is intentional and we added this conditional to enforce that auth=auth,
if not m.body: data=json.dumps(m.body, ensure_ascii=False).encode('utf-8'),
m.body = {} headers=dict(list(get_awx_http_client_headers().items()) + list((self.headers or {}).items())),
verify=(not self.disable_ssl_verification),
url = str(m.recipients()[0]) )
data = json.dumps(m.body, ensure_ascii=False).encode('utf-8') if r.status_code >= 400:
headers = {**(get_awx_http_client_headers()), **(self.headers or {})} logger.error(smart_str(_("Error sending notification webhook: {}").format(r.status_code)))
err = None
for retries in range(self.MAX_RETRIES):
# Sometimes we hit redirect URLs. We must account for this. We still extract the redirect URL from the response headers and try again. Max retires == 5
resp = chosen_method(
url=url,
auth=auth,
data=data,
headers=headers,
verify=(not self.disable_ssl_verification),
allow_redirects=False, # override default behaviour for redirects
)
# either success or error reached if this conditional fires
if resp.status_code not in [301, 307]:
break
# we've hit a redirect. extract the redirect URL out of the first response header and try again
logger.warning(
f"Received a {resp.status_code} from {url}, trying to reach redirect url {resp.headers.get('Location', None)}; attempt #{retries+1}"
)
# take the first redirect URL in the response header and try that
url = resp.headers.get("Location", None)
if url is None:
err = f"Webhook notification received redirect to a blank URL from {url}. Response headers={resp.headers}"
break
else:
# no break condition in the loop encountered; therefore we have hit the maximum number of retries
err = f"Webhook notification max number of retries [{self.MAX_RETRIES}] exceeded. Failed to send webhook notification to {url}"
if resp.status_code >= 400:
err = f"Error sending webhook notification: {resp.status_code}"
# log error message
if err:
logger.error(err)
if not self.fail_silently: if not self.fail_silently:
raise Exception(err) raise Exception(smart_str(_("Error sending notification webhook: {}").format(r.status_code)))
sent_messages += 1
# no errors were encountered therefore we successfully sent off the notification webhook
if resp.status_code in range(200, 299):
logger.debug(f"Notification webhook successfully sent to {url}. Received {resp.status_code}")
sent_messages += 1
return sent_messages return sent_messages

View File

@@ -3,8 +3,6 @@
from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed
from taggit.managers import TaggableManager
class ActivityStreamRegistrar(object): class ActivityStreamRegistrar(object):
def __init__(self): def __init__(self):
@@ -21,8 +19,6 @@ class ActivityStreamRegistrar(object):
pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete") pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete")
for m2mfield in model._meta.many_to_many: for m2mfield in model._meta.many_to_many:
if isinstance(m2mfield, TaggableManager):
continue # Special case for taggit app
try: try:
m2m_attr = getattr(model, m2mfield.name) m2m_attr = getattr(model, m2mfield.name)
m2m_changed.connect( m2m_changed.connect(

View File

@@ -27,8 +27,8 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
websocket_urlpatterns = [ websocket_urlpatterns = [
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), re_path(r'websocket/$', consumers.EventConsumer),
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()), re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),
] ]
application = AWXProtocolTypeRouter( application = AWXProtocolTypeRouter(

View File

@@ -39,11 +39,12 @@ from awx.main.utils import (
ScheduleTaskManager, ScheduleTaskManager,
ScheduleWorkflowManager, ScheduleWorkflowManager,
) )
from awx.main.utils.common import task_manager_bulk_reschedule, is_testing from awx.main.utils.common import task_manager_bulk_reschedule
from awx.main.signals import disable_activity_stream from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph from awx.main.scheduler.dependency_graph import DependencyGraph
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerInstances
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
import awx.main.analytics.subsystem_metrics as s_metrics import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.utils import decrypt_field from awx.main.utils import decrypt_field
@@ -70,12 +71,7 @@ class TaskBase:
# is called later. # is called later.
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False) self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.start_time = time.time() self.start_time = time.time()
# We want to avoid calling settings in loops, so cache these settings at init time
self.start_task_limit = settings.START_TASK_LIMIT self.start_task_limit = settings.START_TASK_LIMIT
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT
self.control_task_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
for m in self.subsystem_metrics.METRICS: for m in self.subsystem_metrics.METRICS:
if m.startswith(self.prefix): if m.startswith(self.prefix):
self.subsystem_metrics.set(m, 0) self.subsystem_metrics.set(m, 0)
@@ -83,7 +79,7 @@ class TaskBase:
def timed_out(self): def timed_out(self):
"""Return True/False if we have met or exceeded the timeout for the task manager.""" """Return True/False if we have met or exceeded the timeout for the task manager."""
elapsed = time.time() - self.start_time elapsed = time.time() - self.start_time
if elapsed >= self.task_manager_timeout: if elapsed >= settings.TASK_MANAGER_TIMEOUT:
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.") logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
return True return True
return False return False
@@ -101,7 +97,7 @@ class TaskBase:
self.all_tasks = [t for t in qs] self.all_tasks = [t for t in qs]
def record_aggregate_metrics(self, *args): def record_aggregate_metrics(self, *args):
if not is_testing(): if not settings.IS_TESTING():
# increment task_manager_schedule_calls regardless if the other # increment task_manager_schedule_calls regardless if the other
# metrics are recorded # metrics are recorded
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1) s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
@@ -475,8 +471,9 @@ class TaskManager(TaskBase):
Init AFTER we know this instance of the task manager will run because the lock is acquired. Init AFTER we know this instance of the task manager will run because the lock is acquired.
""" """
self.dependency_graph = DependencyGraph() self.dependency_graph = DependencyGraph()
self.tm_models = TaskManagerModels() self.instances = TaskManagerInstances(self.all_tasks)
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
self.controlplane_ig = self.instance_groups.controlplane_ig
def job_blocked_by(self, task): def job_blocked_by(self, task):
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph # TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
@@ -507,16 +504,8 @@ class TaskManager(TaskBase):
return None return None
@timeit @timeit
def start_task(self, task, instance_group, instance=None): def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
# Just like for process_running_tasks, add the job to the dependency graph and
# ask the TaskManagerInstanceGroups object to update consumed capacity on all
# implicated instances and container groups.
self.dependency_graph.add_job(task) self.dependency_graph.add_job(task)
if instance_group is not None:
task.instance_group = instance_group
# We need the instance group assigned to correctly account for container group max_concurrent_jobs and max_forks
self.tm_models.consume_capacity(task)
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1) self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
self.start_task_limit -= 1 self.start_task_limit -= 1
if self.start_task_limit == 0: if self.start_task_limit == 0:
@@ -524,6 +513,20 @@ class TaskManager(TaskBase):
ScheduleTaskManager().schedule() ScheduleTaskManager().schedule()
from awx.main.tasks.system import handle_work_error, handle_work_success from awx.main.tasks.system import handle_work_error, handle_work_success
# update capacity for control node and execution node
if task.controller_node:
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
if task.execution_node:
self.instances[task.execution_node].consume_capacity(task.task_impact)
dependent_tasks = dependent_tasks or []
task_actual = {
'type': get_type_for_model(type(task)),
'id': task.id,
}
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
task.status = 'waiting' task.status = 'waiting'
(start_status, opts) = task.pre_start() (start_status, opts) = task.pre_start()
@@ -543,6 +546,7 @@ class TaskManager(TaskBase):
ScheduleWorkflowManager().schedule() ScheduleWorkflowManager().schedule()
# at this point we already have control/execution nodes selected for the following cases # at this point we already have control/execution nodes selected for the following cases
else: else:
task.instance_group = instance_group
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else '' execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
logger.debug( logger.debug(
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.' f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
@@ -555,7 +559,6 @@ class TaskManager(TaskBase):
# apply_async does a NOTIFY to the channel dispatcher is listening to # apply_async does a NOTIFY to the channel dispatcher is listening to
# postgres will treat this as part of the transaction, which is what we want # postgres will treat this as part of the transaction, which is what we want
if task.status != 'failed' and type(task) is not WorkflowJob: if task.status != 'failed' and type(task) is not WorkflowJob:
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
task_cls = task._get_task_class() task_cls = task._get_task_class()
task_cls.apply_async( task_cls.apply_async(
[task.pk], [task.pk],
@@ -563,7 +566,7 @@ class TaskManager(TaskBase):
queue=task.get_queue_name(), queue=task.get_queue_name(),
uuid=task.celery_task_id, uuid=task.celery_task_id,
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}], callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}], errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
) )
# In exception cases, like a job failing pre-start checks, we send the websocket status message # In exception cases, like a job failing pre-start checks, we send the websocket status message
@@ -577,7 +580,6 @@ class TaskManager(TaskBase):
if type(task) is WorkflowJob: if type(task) is WorkflowJob:
ScheduleWorkflowManager().schedule() ScheduleWorkflowManager().schedule()
self.dependency_graph.add_job(task) self.dependency_graph.add_job(task)
self.tm_models.consume_capacity(task)
@timeit @timeit
def process_pending_tasks(self, pending_tasks): def process_pending_tasks(self, pending_tasks):
@@ -602,18 +604,18 @@ class TaskManager(TaskBase):
if isinstance(task, WorkflowJob): if isinstance(task, WorkflowJob):
# Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph. # Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
# Double check that using just the DependencyGraph works for Workflows and Sliced Jobs. # Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
self.start_task(task, None, None) self.start_task(task, None, task.get_jobs_fail_chain(), None)
continue continue
found_acceptable_queue = False found_acceptable_queue = False
# Determine if there is control capacity for the task # Determine if there is control capacity for the task
if task.capacity_type == 'control': if task.capacity_type == 'control':
control_impact = task.task_impact + self.control_task_impact control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
else: else:
control_impact = self.control_task_impact control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
control_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance( control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
task, instance_group_name=self.controlplane_ig.name, impact=control_impact, capacity_type='control' task, instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, impact=control_impact, capacity_type='control'
) )
if not control_instance: if not control_instance:
self.task_needs_capacity(task, tasks_to_update_job_explanation) self.task_needs_capacity(task, tasks_to_update_job_explanation)
@@ -624,29 +626,25 @@ class TaskManager(TaskBase):
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups # All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
if task.capacity_type == 'control': if task.capacity_type == 'control':
if not self.tm_models.instance_groups[self.controlplane_ig.name].has_remaining_capacity(control_impact=True):
continue
task.execution_node = control_instance.hostname task.execution_node = control_instance.hostname
execution_instance = self.tm_models.instances[control_instance.hostname].obj execution_instance = self.instances[control_instance.hostname].obj
task.log_lifecycle("controller_node_chosen") task.log_lifecycle("controller_node_chosen")
task.log_lifecycle("execution_node_chosen") task.log_lifecycle("execution_node_chosen")
self.start_task(task, self.controlplane_ig, execution_instance) self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
found_acceptable_queue = True found_acceptable_queue = True
continue continue
for instance_group in self.tm_models.instance_groups.get_instance_groups_from_task_cache(task): for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task):
if not self.tm_models.instance_groups[instance_group.name].has_remaining_capacity(task):
continue
if instance_group.is_container_group: if instance_group.is_container_group:
self.start_task(task, instance_group, None) self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
found_acceptable_queue = True found_acceptable_queue = True
break break
# at this point we know the instance group is NOT a container group # at this point we know the instance group is NOT a container group
# because if it was, it would have started the task and broke out of the loop. # because if it was, it would have started the task and broke out of the loop.
execution_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance( execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
task, instance_group_name=instance_group.name, add_hybrid_control_cost=True task, instance_group_name=instance_group.name, add_hybrid_control_cost=True
) or self.tm_models.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type) ) or self.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
if execution_instance: if execution_instance:
task.execution_node = execution_instance.hostname task.execution_node = execution_instance.hostname
@@ -662,8 +660,8 @@ class TaskManager(TaskBase):
task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity
) )
) )
execution_instance = self.tm_models.instances[execution_instance.hostname].obj execution_instance = self.instances[execution_instance.hostname].obj
self.start_task(task, instance_group, execution_instance) self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
found_acceptable_queue = True found_acceptable_queue = True
break break
else: else:

View File

@@ -15,18 +15,15 @@ logger = logging.getLogger('awx.main.scheduler')
class TaskManagerInstance: class TaskManagerInstance:
"""A class representing minimal data the task manager needs to represent an Instance.""" """A class representing minimal data the task manager needs to represent an Instance."""
def __init__(self, obj, **kwargs): def __init__(self, obj):
self.obj = obj self.obj = obj
self.node_type = obj.node_type self.node_type = obj.node_type
self.consumed_capacity = 0 self.consumed_capacity = 0
self.capacity = obj.capacity self.capacity = obj.capacity
self.hostname = obj.hostname self.hostname = obj.hostname
self.jobs_running = 0
def consume_capacity(self, impact, job_impact=False): def consume_capacity(self, impact):
self.consumed_capacity += impact self.consumed_capacity += impact
if job_impact:
self.jobs_running += 1
@property @property
def remaining_capacity(self): def remaining_capacity(self):
@@ -36,106 +33,9 @@ class TaskManagerInstance:
return remaining return remaining
class TaskManagerInstanceGroup:
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
def __init__(self, obj, task_manager_instances=None, **kwargs):
self.name = obj.name
self.is_container_group = obj.is_container_group
self.container_group_jobs = 0
self.container_group_consumed_forks = 0
_instances = obj.instances.all()
# We want the list of TaskManagerInstance objects because these are shared across the TaskManagerInstanceGroup objects.
# This way when we consume capacity on an instance that is in multiple groups, we tabulate across all the groups correctly.
self.instances = [task_manager_instances[instance.hostname] for instance in _instances if instance.hostname in task_manager_instances]
self.instance_hostnames = tuple([instance.hostname for instance in _instances if instance.hostname in task_manager_instances])
self.max_concurrent_jobs = obj.max_concurrent_jobs
self.max_forks = obj.max_forks
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
def consume_capacity(self, task):
"""We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level."""
if self.is_container_group:
self.container_group_jobs += 1
self.container_group_consumed_forks += task.task_impact
else:
raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.")
def get_remaining_instance_capacity(self):
return sum(inst.remaining_capacity for inst in self.instances)
def get_instance_capacity(self):
return sum(inst.capacity for inst in self.instances)
def get_consumed_instance_capacity(self):
return sum(inst.consumed_capacity for inst in self.instances)
def get_instance_jobs_running(self):
return sum(inst.jobs_running for inst in self.instances)
def get_jobs_running(self):
if self.is_container_group:
return self.container_group_jobs
return sum(inst.jobs_running for inst in self.instances)
def get_capacity(self):
"""This reports any type of capacity, including that of container group jobs.
Container groups don't really have capacity, but if they have max_forks set,
we can interperet that as how much capacity the user has defined them to have.
"""
if self.is_container_group:
return self.max_forks
return self.get_instance_capacity()
def get_consumed_capacity(self):
if self.is_container_group:
return self.container_group_consumed_forks
return self.get_consumed_instance_capacity()
def get_remaining_capacity(self):
return self.get_capacity() - self.get_consumed_capacity()
def has_remaining_capacity(self, task=None, control_impact=False):
"""Pass either a task or control_impact=True to determine if the IG has capacity to run the control task or job task."""
task_impact = self.control_task_impact if control_impact else task.task_impact
job_impact = 0 if control_impact else 1
task_string = f"task {task.log_format} with impact of {task_impact}" if task else f"control task with impact of {task_impact}"
# We only want to loop over instances if self.max_concurrent_jobs is set
if self.max_concurrent_jobs == 0:
# Override the calculated remaining capacity, because when max_concurrent_jobs == 0 we don't enforce any max
remaining_jobs = 0
else:
remaining_jobs = self.max_concurrent_jobs - self.get_jobs_running() - job_impact
# We only want to loop over instances if self.max_forks is set
if self.max_forks == 0:
# Override the calculated remaining capacity, because when max_forks == 0 we don't enforce any max
remaining_forks = 0
else:
remaining_forks = self.max_forks - self.get_consumed_capacity() - task_impact
if remaining_jobs < 0 or remaining_forks < 0:
# A value less than zero means the task will not fit on the group
if remaining_jobs < 0:
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_jobs} remaining jobs")
if remaining_forks < 0:
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_forks} remaining forks")
return False
# Returning true means there is enough remaining capacity on the group to run the task (or no instance group level limits are being set)
logger.debug(f"{task_string} can fit on instance group {self.name} with {remaining_forks} remaining forks and {remaining_jobs}")
return True
class TaskManagerInstances: class TaskManagerInstances:
def __init__(self, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled'), **kwargs): def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
self.instances_by_hostname = dict() self.instances_by_hostname = dict()
self.instance_groups_container_group_jobs = dict()
self.instance_groups_container_group_consumed_forks = dict()
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
if instances is None: if instances is None:
instances = ( instances = (
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True) Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
@@ -143,15 +43,18 @@ class TaskManagerInstances:
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled') .only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
) )
for instance in instances: for instance in instances:
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance, **kwargs) self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
def consume_capacity(self, task): # initialize remaining capacity based on currently waiting and running tasks
control_instance = self.instances_by_hostname.get(task.controller_node, '') for task in active_tasks:
execution_instance = self.instances_by_hostname.get(task.execution_node, '') if task.status not in ['waiting', 'running']:
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'): continue
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact, job_impact=True) control_instance = self.instances_by_hostname.get(task.controller_node, '')
if control_instance and control_instance.node_type in ('hybrid', 'control'): execution_instance = self.instances_by_hostname.get(task.execution_node, '')
self.instances_by_hostname[task.controller_node].consume_capacity(self.control_task_impact) if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact)
if control_instance and control_instance.node_type in ('hybrid', 'control'):
self.instances_by_hostname[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
def __getitem__(self, hostname): def __getitem__(self, hostname):
return self.instances_by_hostname.get(hostname) return self.instances_by_hostname.get(hostname)
@@ -161,57 +64,42 @@ class TaskManagerInstances:
class TaskManagerInstanceGroups: class TaskManagerInstanceGroups:
"""A class representing minimal data the task manager needs to represent all the InstanceGroups.""" """A class representing minimal data the task manager needs to represent an InstanceGroup."""
def __init__(self, task_manager_instances=None, instance_groups=None, instance_groups_queryset=None, **kwargs): def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
self.instance_groups = dict() self.instance_groups = dict()
self.task_manager_instances = task_manager_instances if task_manager_instances is not None else TaskManagerInstances()
self.controlplane_ig = None self.controlplane_ig = None
self.pk_ig_map = dict() self.pk_ig_map = dict()
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
self.controlplane_ig_name = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
if instance_groups is not None: # for testing if instance_groups is not None: # for testing
self.instance_groups = {ig.name: TaskManagerInstanceGroup(ig, self.task_manager_instances, **kwargs) for ig in instance_groups} self.instance_groups = instance_groups
self.pk_ig_map = {ig.pk: ig for ig in instance_groups}
else: else:
if instance_groups_queryset is None: if instance_groups_queryset is None:
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only( instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only('name', 'instances')
'name', 'instances', 'max_concurrent_jobs', 'max_forks', 'is_container_group'
)
for instance_group in instance_groups_queryset: for instance_group in instance_groups_queryset:
if instance_group.name == self.controlplane_ig_name: if instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
self.controlplane_ig = instance_group self.controlplane_ig = instance_group
self.instance_groups[instance_group.name] = TaskManagerInstanceGroup(instance_group, self.task_manager_instances, **kwargs) self.instance_groups[instance_group.name] = dict(
instances=[
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
],
)
self.pk_ig_map[instance_group.pk] = instance_group self.pk_ig_map[instance_group.pk] = instance_group
def __getitem__(self, ig_name):
return self.instance_groups.get(ig_name)
def __contains__(self, ig_name):
return ig_name in self.instance_groups
def get_remaining_capacity(self, group_name): def get_remaining_capacity(self, group_name):
return self.instance_groups[group_name].get_remaining_instance_capacity() instances = self.instance_groups[group_name]['instances']
return sum(inst.remaining_capacity for inst in instances)
def get_consumed_capacity(self, group_name): def get_consumed_capacity(self, group_name):
return self.instance_groups[group_name].get_consumed_capacity() instances = self.instance_groups[group_name]['instances']
return sum(inst.consumed_capacity for inst in instances)
def get_jobs_running(self, group_name):
return self.instance_groups[group_name].get_jobs_running()
def get_capacity(self, group_name):
return self.instance_groups[group_name].get_capacity()
def get_instances(self, group_name):
return self.instance_groups[group_name].instances
def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False): def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False):
impact = impact if impact else task.task_impact impact = impact if impact else task.task_impact
capacity_type = capacity_type if capacity_type else task.capacity_type capacity_type = capacity_type if capacity_type else task.capacity_type
instance_most_capacity = None instance_most_capacity = None
most_remaining_capacity = -1 most_remaining_capacity = -1
instances = self.instance_groups[instance_group_name].instances instances = self.instance_groups[instance_group_name]['instances']
for i in instances: for i in instances:
if i.node_type not in (capacity_type, 'hybrid'): if i.node_type not in (capacity_type, 'hybrid'):
@@ -219,7 +107,7 @@ class TaskManagerInstanceGroups:
would_be_remaining = i.remaining_capacity - impact would_be_remaining = i.remaining_capacity - impact
# hybrid nodes _always_ control their own tasks # hybrid nodes _always_ control their own tasks
if add_hybrid_control_cost and i.node_type == 'hybrid': if add_hybrid_control_cost and i.node_type == 'hybrid':
would_be_remaining -= self.control_task_impact would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity): if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
instance_most_capacity = i instance_most_capacity = i
most_remaining_capacity = would_be_remaining most_remaining_capacity = would_be_remaining
@@ -227,13 +115,10 @@ class TaskManagerInstanceGroups:
def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'): def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'):
largest_instance = None largest_instance = None
instances = self.instance_groups[instance_group_name].instances instances = self.instance_groups[instance_group_name]['instances']
for i in instances: for i in instances:
if i.node_type not in (capacity_type, 'hybrid'): if i.node_type not in (capacity_type, 'hybrid'):
continue continue
if i.capacity <= 0:
# We don't want to select an idle instance with 0 capacity
continue
if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity: if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity:
if largest_instance is None: if largest_instance is None:
largest_instance = i largest_instance = i
@@ -254,56 +139,3 @@ class TaskManagerInstanceGroups:
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}") logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
return task.global_instance_groups return task.global_instance_groups
return igs return igs
class TaskManagerModels:
def __init__(self, **kwargs):
# We want to avoid calls to settings over and over in loops, so cache this information here
kwargs['control_task_impact'] = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
kwargs['controlplane_ig_name'] = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
self.instances = TaskManagerInstances(**kwargs)
self.instance_groups = TaskManagerInstanceGroups(task_manager_instances=self.instances, **kwargs)
@classmethod
def init_with_consumed_capacity(cls, **kwargs):
tmm = cls(**kwargs)
tasks = kwargs.get('tasks', None)
if tasks is None:
instance_group_queryset = kwargs.get('instance_groups_queryset', None)
# No tasks were provided, so we will fetch them from the database
task_status_filter_list = kwargs.get('task_status_filter_list', ['running', 'waiting'])
task_fields = kwargs.get('task_fields', ('task_impact', 'controller_node', 'execution_node', 'instance_group'))
from awx.main.models import UnifiedJob
if instance_group_queryset is not None:
logger.debug("******************INSTANCE GROUP QUERYSET PASSED -- FILTERING TASKS ****************************")
# Sometimes things like the serializer pass a queryset that looks at not all instance groups. in this case,
# we also need to filter the tasks we look at
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list, instance_group__in=[ig.id for ig in instance_group_queryset]).only(
*task_fields
)
else:
# No instance group query set, look at all tasks in whole system
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list).only(*task_fields)
for task in tasks:
tmm.consume_capacity(task)
return tmm
def consume_capacity(self, task):
# Consume capacity on instances, which bubbles up to instance groups they are a member of
self.instances.consume_capacity(task)
# For container group jobs, additionally we must account for capacity consumed since
# The container groups have no instances to look at to track how many jobs/forks are consumed
if task.instance_group_id:
if not task.instance_group_id in self.instance_groups.pk_ig_map.keys():
logger.warn(
f"Task {task.log_format} assigned {task.instance_group_id} but this instance group not present in map of instance groups{self.instance_groups.pk_ig_map.keys()}"
)
else:
ig = self.instance_groups.pk_ig_map[task.instance_group_id]
if ig.is_container_group:
self.instance_groups[ig.name].consume_capacity(task)

View File

@@ -2,6 +2,8 @@ import json
import time import time
import logging import logging
from collections import deque from collections import deque
import os
import stat
# Django # Django
from django.conf import settings from django.conf import settings
@@ -204,6 +206,21 @@ class RunnerCallback:
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env) self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
# We opened a connection just for that save, close it here now # We opened a connection just for that save, close it here now
connections.close_all() connections.close_all()
elif status_data['status'] == 'failed':
# For encrypted ssh_key_data, ansible-runner worker will open and write the
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will
# read from this named pipe so that the key can be used in ansible-playbook.
# Once the podman container exits, the named pipe is deleted.
# However, if the podman container fails to start in the first place, e.g. the image
# name is incorrect, then this pipe is not cleaned up. Eventually ansible-runner
# processor will attempt to write artifacts to the private data dir via unstream_dir, requiring
# that it open this named pipe. This leads to a hang. Thus, before any artifacts
# are written by the processor, it's important to remove this ssh_key_data pipe.
private_data_dir = self.instance.job_env.get('AWX_PRIVATE_DATA_DIR', None)
if private_data_dir:
key_data_file = os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'ssh_key_data')
if os.path.exists(key_data_file) and stat.S_ISFIFO(os.stat(key_data_file).st_mode):
os.remove(key_data_file)
elif status_data['status'] == 'error': elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None) result_traceback = status_data.get('result_traceback', None)
if result_traceback: if result_traceback:

View File

@@ -426,7 +426,7 @@ class BaseTask(object):
""" """
instance.log_lifecycle("post_run") instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir): def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
""" """
Hook for any steps to run after job/task is marked as complete. Hook for any steps to run after job/task is marked as complete.
""" """
@@ -469,6 +469,7 @@ class BaseTask(object):
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running") self.instance.websocket_emit_status("running")
status, rc = 'error', None status, rc = 'error', None
fact_modification_times = {}
self.runner_callback.event_ct = 0 self.runner_callback.event_ct = 0
''' '''
@@ -497,6 +498,14 @@ class BaseTask(object):
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH): if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH) raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value # May have to serialize the value
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir) private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs) passwords = self.build_passwords(self.instance, kwargs)
@@ -637,7 +646,7 @@ class BaseTask(object):
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed') self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
try: try:
self.final_run_hook(self.instance, status, private_data_dir) self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception: except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format)) logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
@@ -758,10 +767,6 @@ class SourceControlMixin(BaseTask):
try: try:
original_branch = None original_branch = None
failed_reason = project.get_reason_if_failed()
if failed_reason:
self.update_model(self.instance.pk, status='failed', job_explanation=failed_reason)
raise RuntimeError(failed_reason)
project_path = project.get_project_path(check_if_exists=False) project_path = project.get_project_path(check_if_exists=False)
if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch): if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):
if os.path.exists(project_path): if os.path.exists(project_path):
@@ -1051,25 +1056,22 @@ class RunJob(SourceControlMixin, BaseTask):
error = _('Job could not start because no Execution Environment could be found.') error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error) self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error) raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
if job.inventory.kind == 'smart': if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not # cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code # ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory) update_smart_memberships_for_inventory(job.inventory)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if job.use_fact_cache:
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
def build_project_dir(self, job, private_data_dir): def build_project_dir(self, job, private_data_dir):
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch) self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
def post_run_hook(self, job, status): def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).post_run_hook(job, status) super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
job.refresh_from_db(fields=['job_env']) if not private_data_dir:
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
# If there's no private data dir, that means we didn't get into the # If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in # actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method # the pre_run_hook method
@@ -1077,11 +1079,9 @@ class RunJob(SourceControlMixin, BaseTask):
if job.use_fact_cache: if job.use_fact_cache:
job.finish_job_fact_cache( job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
self.facts_write_time, fact_modification_times,
) )
def final_run_hook(self, job, status, private_data_dir):
super(RunJob, self).final_run_hook(job, status, private_data_dir)
try: try:
inventory = job.inventory inventory = job.inventory
except Inventory.DoesNotExist: except Inventory.DoesNotExist:

View File

@@ -61,15 +61,10 @@ def read_receptor_config():
return yaml.safe_load(f) return yaml.safe_load(f)
def work_signing_enabled(config_data): def get_receptor_sockfile():
for section in config_data: data = read_receptor_config()
if 'work-signing' in section:
return True
return False
for section in data:
def get_receptor_sockfile(config_data):
for section in config_data:
for entry_name, entry_data in section.items(): for entry_name, entry_data in section.items():
if entry_name == 'control-service': if entry_name == 'control-service':
if 'filename' in entry_data: if 'filename' in entry_data:
@@ -80,11 +75,12 @@ def get_receptor_sockfile(config_data):
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile') raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
def get_tls_client(config_data, use_stream_tls=None): def get_tls_client(use_stream_tls=None):
if not use_stream_tls: if not use_stream_tls:
return None return None
for section in config_data: data = read_receptor_config()
for section in data:
for entry_name, entry_data in section.items(): for entry_name, entry_data in section.items():
if entry_name == 'tls-client': if entry_name == 'tls-client':
if 'name' in entry_data: if 'name' in entry_data:
@@ -92,12 +88,10 @@ def get_tls_client(config_data, use_stream_tls=None):
return None return None
def get_receptor_ctl(config_data=None): def get_receptor_ctl():
if config_data is None: receptor_sockfile = get_receptor_sockfile()
config_data = read_receptor_config()
receptor_sockfile = get_receptor_sockfile(config_data)
try: try:
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True)) return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
except RuntimeError: except RuntimeError:
return ReceptorControl(receptor_sockfile) return ReceptorControl(receptor_sockfile)
@@ -165,18 +159,15 @@ def run_until_complete(node, timing_data=None, **kwargs):
""" """
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout. Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
""" """
config_data = read_receptor_config() receptor_ctl = get_receptor_ctl()
receptor_ctl = get_receptor_ctl(config_data)
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS" use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
kwargs.setdefault('tlsclient', get_tls_client(config_data, use_stream_tls)) kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
kwargs.setdefault('ttl', '20s') kwargs.setdefault('ttl', '20s')
kwargs.setdefault('payload', '') kwargs.setdefault('payload', '')
if work_signing_enabled(config_data):
kwargs['signwork'] = True
transmit_start = time.time() transmit_start = time.time()
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, **kwargs) result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs)
unit_id = result['unitid'] unit_id = result['unitid']
run_start = time.time() run_start = time.time()
@@ -217,10 +208,7 @@ def run_until_complete(node, timing_data=None, **kwargs):
if state_name.lower() == 'failed': if state_name.lower() == 'failed':
work_detail = status.get('Detail', '') work_detail = status.get('Detail', '')
if work_detail: if work_detail:
if stdout: raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}\nstdout:\n{stdout}')
else:
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
else: else:
raise RemoteJobError(f'Unknown ansible-runner error on node {node}, stdout:\n{stdout}') raise RemoteJobError(f'Unknown ansible-runner error on node {node}, stdout:\n{stdout}')
@@ -311,8 +299,7 @@ class AWXReceptorJob:
def run(self): def run(self):
# We establish a connection to the Receptor socket # We establish a connection to the Receptor socket
self.config_data = read_receptor_config() receptor_ctl = get_receptor_ctl()
receptor_ctl = get_receptor_ctl(self.config_data)
res = None res = None
try: try:
@@ -337,7 +324,7 @@ class AWXReceptorJob:
if self.work_type == 'ansible-runner': if self.work_type == 'ansible-runner':
work_submit_kw['node'] = self.task.instance.execution_node work_submit_kw['node'] = self.task.instance.execution_node
use_stream_tls = get_conn_type(work_submit_kw['node'], receptor_ctl).name == "STREAMTLS" use_stream_tls = get_conn_type(work_submit_kw['node'], receptor_ctl).name == "STREAMTLS"
work_submit_kw['tlsclient'] = get_tls_client(self.config_data, use_stream_tls) work_submit_kw['tlsclient'] = get_tls_client(use_stream_tls)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
transmitter_future = executor.submit(self.transmit, sockin) transmitter_future = executor.submit(self.transmit, sockin)
@@ -487,9 +474,7 @@ class AWXReceptorJob:
@property @property
def sign_work(self): def sign_work(self):
if self.work_type in ('ansible-runner', 'local'): return True if self.work_type in ('ansible-runner', 'local') else False
return work_signing_enabled(self.config_data)
return False
@property @property
def work_type(self): def work_type(self):

View File

@@ -52,7 +52,6 @@ from awx.main.constants import ACTIVE_STATES
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils.common import ( from awx.main.utils.common import (
get_type_for_model,
ignore_inventory_computed_fields, ignore_inventory_computed_fields,
ignore_inventory_group_removal, ignore_inventory_group_removal,
ScheduleWorkflowManager, ScheduleWorkflowManager,
@@ -721,43 +720,45 @@ def handle_work_success(task_actual):
@task(queue=get_local_queuename) @task(queue=get_local_queuename)
def handle_work_error(task_actual): def handle_work_error(task_id, *args, **kwargs):
try: subtasks = kwargs.get('subtasks', None)
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
except ObjectDoesNotExist: first_instance = None
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id'])) first_instance_type = ''
return if subtasks is not None:
if not instance: for each_task in subtasks:
return try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warning("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly if first_instance is None:
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}') first_instance = instance
first_instance_type = each_task['type']
deps_of_deps = {} if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status in ('successful', 'failed'):
instance.status = 'failed'
for subtask in subtasks: instance.failed = True
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'): if not instance.job_explanation:
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
blame_job = deps_of_deps.get(subtask.id, instance) first_instance_type,
subtask.status = 'failed' first_instance.name,
subtask.failed = True first_instance.id,
if not subtask.job_explanation: )
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % ( instance.save()
get_type_for_model(type(blame_job)), instance.websocket_emit_status("failed")
blame_job.name,
blame_job.id,
)
subtask.save()
subtask.websocket_emit_status("failed")
for sub_subtask in subtask.get_jobs_fail_chain():
deps_of_deps[sub_subtask.id] = subtask
# We only send 1 job complete message since all the job completion message # We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of # handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a # what the job complete message handler does then we may want to send a
# completion event for each job here. # completion event for each job here.
schedule_manager_success_or_error(instance) if first_instance:
schedule_manager_success_or_error(first_instance)
@task(queue=get_local_queuename) @task(queue=get_local_queuename)

View File

@@ -3,6 +3,5 @@
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never", "ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"AWS_ACCESS_KEY_ID": "fooo", "AWS_ACCESS_KEY_ID": "fooo",
"AWS_SECRET_ACCESS_KEY": "fooo", "AWS_SECRET_ACCESS_KEY": "fooo",
"AWS_SECURITY_TOKEN": "fooo", "AWS_SECURITY_TOKEN": "fooo"
"AWS_SESSION_TOKEN": "fooo"
} }

View File

@@ -7,7 +7,7 @@ from awx.main.models.ha import Instance
from django.test.utils import override_settings from django.test.utils import override_settings
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, node_type='execution', memory=36000000000, cpu_capacity=6, mem_capacity=42) INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_capacity=6, mem_capacity=42)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -171,17 +171,13 @@ class TestKeyRegeneration:
def test_use_custom_key_with_empty_tower_secret_key_env_var(self): def test_use_custom_key_with_empty_tower_secret_key_env_var(self):
os.environ['TOWER_SECRET_KEY'] = '' os.environ['TOWER_SECRET_KEY'] = ''
with pytest.raises(SystemExit) as e: new_key = call_command('regenerate_secret_key', '--use-custom-key')
call_command('regenerate_secret_key', '--use-custom-key') assert settings.SECRET_KEY != new_key
assert e.type == SystemExit
assert e.value.code == 1
def test_use_custom_key_with_no_tower_secret_key_env_var(self): def test_use_custom_key_with_no_tower_secret_key_env_var(self):
os.environ.pop('TOWER_SECRET_KEY', None) os.environ.pop('TOWER_SECRET_KEY', None)
with pytest.raises(SystemExit) as e: new_key = call_command('regenerate_secret_key', '--use-custom-key')
call_command('regenerate_secret_key', '--use-custom-key') assert settings.SECRET_KEY != new_key
assert e.type == SystemExit
assert e.value.code == 1
def test_with_tower_secret_key_env_var(self): def test_with_tower_secret_key_env_var(self):
custom_key = 'MXSq9uqcwezBOChl/UfmbW1k4op+bC+FQtwPqgJ1u9XV' custom_key = 'MXSq9uqcwezBOChl/UfmbW1k4op+bC+FQtwPqgJ1u9XV'

View File

@@ -4,7 +4,7 @@ from awx.main.models import (
Instance, Instance,
InstanceGroup, InstanceGroup,
) )
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
class TestInstanceGroupInstanceMapping(TransactionTestCase): class TestInstanceGroupInstanceMapping(TransactionTestCase):
@@ -23,10 +23,11 @@ class TestInstanceGroupInstanceMapping(TransactionTestCase):
def test_mapping(self): def test_mapping(self):
self.sample_cluster() self.sample_cluster()
with self.assertNumQueries(3): with self.assertNumQueries(3):
instance_groups = TaskManagerInstanceGroups() instances = TaskManagerInstances([]) # empty task list
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances)
ig_instance_map = instance_groups.instance_groups ig_instance_map = instance_groups.instance_groups
assert set(i.hostname for i in ig_instance_map['ig_small'].instances) == set(['i1']) assert set(i.hostname for i in ig_instance_map['ig_small']['instances']) == set(['i1'])
assert set(i.hostname for i in ig_instance_map['ig_large'].instances) == set(['i2', 'i3']) assert set(i.hostname for i in ig_instance_map['ig_large']['instances']) == set(['i2', 'i3'])
assert set(i.hostname for i in ig_instance_map['default'].instances) == set(['i2']) assert set(i.hostname for i in ig_instance_map['default']['instances']) == set(['i2'])

View File

@@ -10,10 +10,6 @@ from awx.main.utils import (
create_temporary_fifo, create_temporary_fifo,
) )
from awx.main.scheduler import TaskManager
from . import create_job
@pytest.fixture @pytest.fixture
def containerized_job(default_instance_group, kube_credential, job_template_factory): def containerized_job(default_instance_group, kube_credential, job_template_factory):
@@ -38,50 +34,6 @@ def test_containerized_job(containerized_job):
assert containerized_job.instance_group.credential.kubernetes assert containerized_job.instance_group.credential.kubernetes
@pytest.mark.django_db
def test_max_concurrent_jobs_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
"""Construct a scenario where only 1 job will fit within the max_concurrent_jobs of the container group.
Since max_concurrent_jobs is set to 1, even though 2 jobs are in pending
and would be launched into the container group, only one will be started.
"""
containerized_job.unified_job_template.allow_simultaneous = True
containerized_job.unified_job_template.save()
default_instance_group = containerized_job.instance_group
default_instance_group.max_concurrent_jobs = 1
default_instance_group.save()
task_impact = 1
# Create a second job that should not be scheduled at first, blocked by the other
create_job(containerized_job.unified_job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db
def test_max_forks_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
"""Construct a scenario where only 1 job will fit within the max_forks of the container group.
In this case, we set the container_group max_forks to 10, and make the task_impact of a job 6.
Therefore, only 1 job will fit within the max of 10.
"""
containerized_job.unified_job_template.allow_simultaneous = True
containerized_job.unified_job_template.save()
default_instance_group = containerized_job.instance_group
default_instance_group.max_forks = 10
# Create a second job that should not be scheduled
create_job(containerized_job.unified_job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = 6
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
tm.schedule()
tm.start_task.assert_called_once()
@pytest.mark.django_db @pytest.mark.django_db
def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment): def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment):
containerized_job.execution_environment = default_job_execution_environment containerized_job.execution_environment = default_job_execution_environment

View File

@@ -23,7 +23,7 @@ def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_gr
mock_task_impact.return_value = 500 mock_task_impact.return_value = 500
with mocker.patch("awx.main.scheduler.TaskManager.start_task"): with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j2, ig2, i2)]) TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j2, ig2, [], i2)])
@pytest.mark.django_db @pytest.mark.django_db
@@ -54,7 +54,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
DependencyManager().schedule() DependencyManager().schedule()
TaskManager().schedule() TaskManager().schedule()
pu = p.project_updates.first() pu = p.project_updates.first()
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, controlplane_instance_group.instances.all()[0]) TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
pu.finished = pu.created + timedelta(seconds=1) pu.finished = pu.created + timedelta(seconds=1)
pu.status = "successful" pu.status = "successful"
pu.save() pu.save()
@@ -62,8 +62,8 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
DependencyManager().schedule() DependencyManager().schedule()
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_any_call(j1, ig1, i1) TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
TaskManager.start_task.assert_any_call(j2, ig2, i2) TaskManager.start_task.assert_any_call(j2, ig2, [], i2)
assert TaskManager.start_task.call_count == 2 assert TaskManager.start_task.call_count == 2
@@ -75,7 +75,7 @@ def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlpla
wfj.save() wfj.save()
with mocker.patch("awx.main.scheduler.TaskManager.start_task"): with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(wfj, None, None) TaskManager.start_task.assert_called_once_with(wfj, None, [], None)
assert wfj.instance_group is None assert wfj.instance_group is None
@@ -150,7 +150,7 @@ def test_failover_group_run(instance_factory, controlplane_instance_group, mocke
mock_task_impact.return_value = 500 mock_task_impact.return_value = 500
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule() tm.schedule()
mock_job.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j1_1, ig2, i2)]) mock_job.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j1_1, ig2, [], i2)])
assert mock_job.call_count == 2 assert mock_job.call_count == 2

View File

@@ -18,7 +18,7 @@ def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_grou
j = create_job(objects.job_template) j = create_job(objects.job_template)
with mocker.patch("awx.main.scheduler.TaskManager.start_task"): with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db
@@ -240,82 +240,12 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
mock_task_impact.return_value = 505 mock_task_impact.return_value = 505
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule() tm.schedule()
mock_job.assert_called_once_with(j1, controlplane_instance_group, instance) mock_job.assert_called_once_with(j1, controlplane_instance_group, [], instance)
j1.status = "successful" j1.status = "successful"
j1.save() j1.save()
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job: with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule() tm.schedule()
mock_job.assert_called_once_with(j2, controlplane_instance_group, instance) mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
@pytest.mark.django_db
def test_max_concurrent_jobs_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
"""When max_concurrent_jobs of an instance group is more restrictive than capacity of instances, enforce max_concurrent_jobs."""
instance = hybrid_instance
controlplane_instance_group = instance.rampart_groups.first()
# We will expect only 1 job to be started
controlplane_instance_group.max_concurrent_jobs = 1
controlplane_instance_group.save()
num_jobs = 3
jobs = []
for i in range(num_jobs):
jobs.append(
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
)
tm = TaskManager()
task_impact = 1
# Sanity check that multiple jobs would run if not for the max_concurrent_jobs setting.
assert task_impact * num_jobs < controlplane_instance_group.capacity
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
jobs[0].status = 'running'
jobs[0].controller_node = instance.hostname
jobs[0].execution_node = instance.hostname
jobs[0].instance_group = controlplane_instance_group
jobs[0].save()
# while that job is running, we should not start another job
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_not_called()
# now job is done, we should start one of the two other jobs
jobs[0].status = 'successful'
jobs[0].save()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db
def test_max_forks_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
"""When max_forks of an instance group is less than the capacity of instances, enforce max_forks."""
instance = hybrid_instance
controlplane_instance_group = instance.rampart_groups.first()
controlplane_instance_group.max_forks = 15
controlplane_instance_group.save()
task_impact = 10
num_jobs = 2
# Sanity check that 2 jobs would run if not for the max_forks setting.
assert controlplane_instance_group.max_forks < controlplane_instance_group.capacity
assert task_impact * num_jobs > controlplane_instance_group.max_forks
assert task_impact * num_jobs < controlplane_instance_group.capacity
for i in range(num_jobs):
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
tm = TaskManager()
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
mock_task_impact.return_value = task_impact
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
tm.schedule()
mock_job.assert_called_once()
@pytest.mark.django_db @pytest.mark.django_db
@@ -337,12 +267,12 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
pu = [x for x in p.project_updates.all()] pu = [x for x in p.project_updates.all()]
assert len(pu) == 1 assert len(pu) == 1
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
pu[0].status = "successful" pu[0].status = "successful"
pu[0].save() pu[0].save()
with mock.patch("awx.main.scheduler.TaskManager.start_task"): with mock.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db
@@ -365,12 +295,12 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
iu = [x for x in ii.inventory_updates.all()] iu = [x for x in ii.inventory_updates.all()]
assert len(iu) == 1 assert len(iu) == 1
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
iu[0].status = "successful" iu[0].status = "successful"
iu[0].save() iu[0].save()
with mock.patch("awx.main.scheduler.TaskManager.start_task"): with mock.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db
@@ -412,7 +342,7 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
mock_iu.assert_not_called() mock_iu.assert_not_called()
with mock.patch("awx.main.scheduler.TaskManager.start_task"): with mock.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db
@@ -442,7 +372,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
TaskManager().schedule() TaskManager().schedule()
pu = p.project_updates.first() pu = p.project_updates.first()
iu = ii.inventory_updates.first() iu = ii.inventory_updates.first()
TaskManager.start_task.assert_has_calls([mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)]) TaskManager.start_task.assert_has_calls(
[mock.call(iu, controlplane_instance_group, [j1, j2], instance), mock.call(pu, controlplane_instance_group, [j1, j2], instance)]
)
pu.status = "successful" pu.status = "successful"
pu.finished = pu.created + timedelta(seconds=1) pu.finished = pu.created + timedelta(seconds=1)
pu.save() pu.save()
@@ -451,7 +383,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
iu.save() iu.save()
with mock.patch("awx.main.scheduler.TaskManager.start_task"): with mock.patch("awx.main.scheduler.TaskManager.start_task"):
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_has_calls([mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)]) TaskManager.start_task.assert_has_calls(
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
)
pu = [x for x in p.project_updates.all()] pu = [x for x in p.project_updates.all()]
iu = [x for x in ii.inventory_updates.all()] iu = [x for x in ii.inventory_updates.all()]
assert len(pu) == 1 assert len(pu) == 1
@@ -475,7 +409,7 @@ def test_job_not_blocking_project_update(controlplane_instance_group, job_templa
project_update.status = "pending" project_update.status = "pending"
project_update.save() project_update.save()
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db
@@ -499,7 +433,7 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
DependencyManager().schedule() DependencyManager().schedule()
TaskManager().schedule() TaskManager().schedule()
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, instance) TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, [], instance)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from unittest import mock from unittest import mock
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate, Job from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate
from awx.main.models.activity_stream import ActivityStream from awx.main.models.activity_stream import ActivityStream
from awx.main.models.ha import Instance, InstanceGroup from awx.main.models.ha import Instance, InstanceGroup
from awx.main.tasks.system import apply_cluster_membership_policies from awx.main.tasks.system import apply_cluster_membership_policies
@@ -15,24 +15,6 @@ def test_default_tower_instance_group(default_instance_group, job_factory):
assert default_instance_group in job_factory().preferred_instance_groups assert default_instance_group in job_factory().preferred_instance_groups
@pytest.mark.django_db
@pytest.mark.parametrize('node_type', ('execution', 'control'))
@pytest.mark.parametrize('active', (True, False))
def test_get_cleanup_task_kwargs_active_jobs(node_type, active):
instance = Instance.objects.create(hostname='foobar', node_type=node_type)
job_kwargs = dict()
job_kwargs['controller_node' if node_type == 'control' else 'execution_node'] = instance.hostname
job_kwargs['status'] = 'running' if active else 'successful'
job = Job.objects.create(**job_kwargs)
kwargs = instance.get_cleanup_task_kwargs()
if active:
assert kwargs['exclude_strings'] == [f'awx_{job.pk}_']
else:
assert 'exclude_strings' not in kwargs
@pytest.mark.django_db @pytest.mark.django_db
class TestPolicyTaskScheduling: class TestPolicyTaskScheduling:
"""Tests make assertions about when the policy task gets scheduled""" """Tests make assertions about when the policy task gets scheduled"""

View File

@@ -121,8 +121,8 @@ def test_python_and_js_licenses():
return errors return errors
base_dir = settings.BASE_DIR base_dir = settings.BASE_DIR
api_licenses = index_licenses('%s/../licenses' % base_dir) api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
ui_licenses = index_licenses('%s/../licenses/ui' % base_dir) ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
api_requirements = read_api_requirements('%s/../requirements' % base_dir) api_requirements = read_api_requirements('%s/../requirements' % base_dir)
ui_requirements = read_ui_requirements('%s/ui' % base_dir) ui_requirements = read_ui_requirements('%s/ui' % base_dir)

View File

@@ -75,7 +75,6 @@ def test_encrypted_subfields(get, post, user, organization):
url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']}) url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})
response = get(url, u) response = get(url, u)
assert response.data['notification_configuration']['account_token'] == "$encrypted$" assert response.data['notification_configuration']['account_token'] == "$encrypted$"
with mock.patch.object(notification_template_actual.notification_class, "send_messages", assert_send): with mock.patch.object(notification_template_actual.notification_class, "send_messages", assert_send):
notification_template_actual.send("Test", {'body': "Test"}) notification_template_actual.send("Test", {'body': "Test"})
@@ -176,46 +175,3 @@ def test_custom_environment_injection(post, user, organization):
fake_send.side_effect = _send_side_effect fake_send.side_effect = _send_side_effect
template.send('subject', 'message') template.send('subject', 'message')
def mock_post(*args, **kwargs):
class MockGoodResponse:
def __init__(self):
self.status_code = 200
class MockRedirectResponse:
def __init__(self):
self.status_code = 301
self.headers = {"Location": "http://goodendpoint"}
if kwargs['url'] == "http://goodendpoint":
return MockGoodResponse()
else:
return MockRedirectResponse()
@pytest.mark.django_db
@mock.patch('requests.post', side_effect=mock_post)
def test_webhook_notification_pointed_to_a_redirect_launch_endpoint(post, admin, organization):
n1 = NotificationTemplate.objects.create(
name="test-webhook",
description="test webhook",
organization=organization,
notification_type="webhook",
notification_configuration=dict(
url="http://some.fake.url",
disable_ssl_verification=True,
http_method="POST",
headers={
"Content-Type": "application/json",
},
username=admin.username,
password=admin.password,
),
messages={
"success": {"message": "", "body": "{}"},
},
)
assert n1.send("", n1.messages.get("success").get("body")) == 1

View File

@@ -5,8 +5,8 @@ import tempfile
import shutil import shutil
from awx.main.tasks.jobs import RunJob from awx.main.tasks.jobs import RunJob
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate from awx.main.models import Instance, Job
@pytest.fixture @pytest.fixture
@@ -74,17 +74,3 @@ def test_does_not_run_reaped_job(mocker, mock_me):
job.refresh_from_db() job.refresh_from_db()
assert job.status == 'failed' assert job.status == 'failed'
mock_run.assert_not_called() mock_run.assert_not_called()
@pytest.mark.django_db
def test_handle_work_error_nested(project, inventory_source):
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
job = Job.objects.create(status='pending')
iu.dependent_jobs.add(pu)
job.dependent_jobs.add(pu, iu)
handle_work_error({'type': 'project_update', 'id': pu.id})
iu.refresh_from_db()
job.refresh_from_db()
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'

View File

@@ -1,7 +1,10 @@
import pytest import pytest
from unittest import mock
from unittest.mock import Mock
from decimal import Decimal from decimal import Decimal
from awx.main.models import Instance from awx.main.models import InstanceGroup, Instance
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
@pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3]) @pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3])
@@ -14,6 +17,83 @@ def test_capacity_adjustment_no_save(capacity_adjustment):
assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity)) assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity))
def T(impact):
j = mock.Mock(spec_set=['task_impact', 'capacity_type'])
j.task_impact = impact
j.capacity_type = 'execution'
return j
def Is(param):
"""
param:
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
"""
instances = []
if isinstance(param[0], tuple):
for (jobs_running, capacity) in param:
inst = Mock()
inst.capacity = capacity
inst.jobs_running = jobs_running
inst.node_type = 'execution'
instances.append(inst)
else:
for i in param:
inst = Mock()
inst.remaining_capacity = i
inst.node_type = 'execution'
instances.append(inst)
return instances
class TestInstanceGroup(object):
@pytest.mark.parametrize(
'task,instances,instance_fit_index,reason',
[
(T(100), Is([100]), 0, "Only one, pick it"),
(T(100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
(T(100), Is([50, 100]), 1, "First instance not as good as second instance"),
(T(100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
(T(100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
],
)
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
InstanceGroup(id=10)
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances}})
instance_picked = tm_igs.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
if instance_fit_index is None:
assert instance_picked is None, reason
else:
assert instance_picked == instances[instance_fit_index], reason
@pytest.mark.parametrize(
'instances,instance_fit_index,reason',
[
(Is([(0, 100)]), 0, "One idle instance, pick it"),
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
],
)
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
def filter_offline_instances(*args):
return filter(lambda i: i.capacity > 0, instances)
InstanceGroup(id=10)
instances_online_only = filter_offline_instances(instances)
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances_online_only}})
if instance_fit_index is None:
assert tm_igs.find_largest_idle_instance('controlplane') is None, reason
else:
assert tm_igs.find_largest_idle_instance('controlplane') == instances[instance_fit_index], reason
def test_cleanup_params_defaults(): def test_cleanup_params_defaults():
inst = Instance(hostname='foobar') inst = Instance(hostname='foobar')
assert inst.get_cleanup_task_kwargs(exclude_strings=['awx_423_']) == {'exclude_strings': ['awx_423_'], 'file_pattern': '/tmp/awx_*_*', 'grace_period': 60} assert inst.get_cleanup_task_kwargs(exclude_strings=['awx_423_']) == {'exclude_strings': ['awx_423_'], 'file_pattern': '/tmp/awx_*_*', 'grace_period': 60}

View File

@@ -36,14 +36,15 @@ def job(mocker, hosts, inventory):
def test_start_job_fact_cache(hosts, job, inventory, tmpdir): def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
for host in hosts: for host in hosts:
filepath = os.path.join(fact_cache, host.name) filepath = os.path.join(fact_cache, host.name)
assert os.path.exists(filepath) assert os.path.exists(filepath)
with open(filepath, 'r') as f: with open(filepath, 'r') as f:
assert f.read() == json.dumps(host.ansible_facts) assert f.read() == json.dumps(host.ansible_facts)
assert os.path.getmtime(filepath) <= last_modified assert filepath in modified_times
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker): def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
@@ -57,16 +58,18 @@ def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
) )
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
job.start_job_fact_cache(fact_cache, timeout=0) job.start_job_fact_cache(fact_cache, {}, 0)
# a file called "foo" should _not_ be written outside the facts dir # a file called "foo" should _not_ be written outside the facts dir
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts'] assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
ansible_facts_new = {"foo": "bar"} ansible_facts_new = {"foo": "bar"}
filepath = os.path.join(fact_cache, hosts[1].name) filepath = os.path.join(fact_cache, hosts[1].name)
@@ -80,20 +83,23 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
new_modification_time = time.time() + 3600 new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time)) os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]): for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2} assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == ansible_facts_new assert hosts[1].ansible_facts == ansible_facts_new
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified']) hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
for h in hosts: for h in hosts:
filepath = os.path.join(fact_cache, h.name) filepath = os.path.join(fact_cache, h.name)
@@ -103,22 +109,26 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
new_modification_time = time.time() + 3600 new_modification_time = time.time() + 3600
os.utime(filepath, (new_modification_time, new_modification_time)) os.utime(filepath, (new_modification_time, new_modification_time))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
bulk_update.assert_not_called() for h in hosts:
h.save.assert_not_called()
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir): def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified = job.start_job_fact_cache(fact_cache, timeout=0) modified_times = {}
job.start_job_fact_cache(fact_cache, modified_times, 0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') for h in hosts:
h.save = mocker.Mock()
os.remove(os.path.join(fact_cache, hosts[1].name)) os.remove(os.path.join(fact_cache, hosts[1].name))
job.finish_job_fact_cache(fact_cache, last_modified) job.finish_job_fact_cache(fact_cache, modified_times)
for host in (hosts[0], hosts[2], hosts[3]): for host in (hosts[0], hosts[2], hosts[3]):
host.save.assert_not_called()
assert host.ansible_facts == {"a": 1, "b": 2} assert host.ansible_facts == {"a": 1, "b": 2}
assert host.ansible_facts_modified is None assert host.ansible_facts_modified is None
assert hosts[1].ansible_facts == {} assert hosts[1].ansible_facts == {}
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified']) hosts[1].save.assert_called_once_with()

View File

@@ -27,12 +27,11 @@ def test_send_messages_as_POST():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=None, auth=None,
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -58,12 +57,11 @@ def test_send_messages_as_PUT():
] ]
) )
requests_mock.put.assert_called_once_with( requests_mock.put.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=None, auth=None,
data=json.dumps({'text': 'test body 2'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body 2'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -89,12 +87,11 @@ def test_send_messages_with_username():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=('userstring', None), auth=('userstring', None),
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -120,12 +117,11 @@ def test_send_messages_with_password():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=(None, 'passwordstring'), auth=(None, 'passwordstring'),
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -151,12 +147,11 @@ def test_send_messages_with_username_and_password():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=('userstring', 'passwordstring'), auth=('userstring', 'passwordstring'),
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -182,12 +177,11 @@ def test_send_messages_with_no_verify_ssl():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=None, auth=None,
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'}, headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
verify=False, verify=False,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1
@@ -213,7 +207,7 @@ def test_send_messages_with_additional_headers():
] ]
) )
requests_mock.post.assert_called_once_with( requests_mock.post.assert_called_once_with(
url='http://example.com', 'http://example.com',
auth=None, auth=None,
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'), data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
headers={ headers={
@@ -223,6 +217,5 @@ def test_send_messages_with_additional_headers():
'X-Test-Header2': 'test-content-2', 'X-Test-Header2': 'test-content-2',
}, },
verify=True, verify=True,
allow_redirects=False,
) )
assert sent_messages == 1 assert sent_messages == 1

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
class FakeMeta(object): class FakeMeta(object):
@@ -16,64 +16,38 @@ class FakeObject(object):
class Job(FakeObject): class Job(FakeObject):
def __init__(self, **kwargs): task_impact = 43
self.task_impact = kwargs.get('task_impact', 43) is_container_group_task = False
self.is_container_group_task = kwargs.get('is_container_group_task', False) controller_node = ''
self.controller_node = kwargs.get('controller_node', '') execution_node = ''
self.execution_node = kwargs.get('execution_node', '')
self.instance_group = kwargs.get('instance_group', None)
self.instance_group_id = self.instance_group.id if self.instance_group else None
self.capacity_type = kwargs.get('capacity_type', 'execution')
def log_format(self): def log_format(self):
return 'job 382 (fake)' return 'job 382 (fake)'
class Instances(FakeObject):
def add(self, *args):
for instance in args:
self.obj.instance_list.append(instance)
def all(self):
return self.obj.instance_list
class InstanceGroup(FakeObject):
def __init__(self, **kwargs):
super(InstanceGroup, self).__init__(**kwargs)
self.instance_list = []
self.pk = self.id = kwargs.get('id', 1)
@property
def instances(self):
mgr = Instances(obj=self)
return mgr
@property
def is_container_group(self):
return False
@property
def max_concurrent_jobs(self):
return 0
@property
def max_forks(self):
return 0
class Instance(FakeObject):
def __init__(self, **kwargs):
self.node_type = kwargs.get('node_type', 'hybrid')
self.capacity = kwargs.get('capacity', 0)
self.hostname = kwargs.get('hostname', 'fakehostname')
self.consumed_capacity = 0
self.jobs_running = 0
@pytest.fixture @pytest.fixture
def sample_cluster(): def sample_cluster():
def stand_up_cluster(): def stand_up_cluster():
class Instances(FakeObject):
def add(self, *args):
for instance in args:
self.obj.instance_list.append(instance)
def all(self):
return self.obj.instance_list
class InstanceGroup(FakeObject):
def __init__(self, **kwargs):
super(InstanceGroup, self).__init__(**kwargs)
self.instance_list = []
@property
def instances(self):
mgr = Instances(obj=self)
return mgr
class Instance(FakeObject):
pass
ig_small = InstanceGroup(name='ig_small') ig_small = InstanceGroup(name='ig_small')
ig_large = InstanceGroup(name='ig_large') ig_large = InstanceGroup(name='ig_large')
@@ -92,12 +66,14 @@ def sample_cluster():
@pytest.fixture @pytest.fixture
def create_ig_manager(): def create_ig_manager():
def _rf(ig_list, tasks): def _rf(ig_list, tasks):
tm_models = TaskManagerModels.init_with_consumed_capacity( instances = TaskManagerInstances(tasks, instances=set(inst for ig in ig_list for inst in ig.instance_list))
tasks=tasks,
instances=set(inst for ig in ig_list for inst in ig.instance_list), seed_igs = {}
instance_groups=ig_list, for ig in ig_list:
) seed_igs[ig.name] = {'instances': [instances[inst.hostname] for inst in ig.instance_list]}
return tm_models.instance_groups
instance_groups = TaskManagerInstanceGroups(instance_groups=seed_igs)
return instance_groups
return _rf return _rf
@@ -150,75 +126,3 @@ def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):
# Cross-links between groups not visible to current user, # Cross-links between groups not visible to current user,
# so a naieve accounting of capacities is returned instead # so a naieve accounting of capacities is returned instead
assert instance_groups_mgr.get_consumed_capacity('default') == 43 assert instance_groups_mgr.get_consumed_capacity('default') == 43
def Is(param):
"""
param:
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
"""
instances = []
if isinstance(param[0], tuple):
for index, (jobs_running, capacity) in enumerate(param):
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
inst.jobs_running = jobs_running
instances.append(inst)
else:
for index, capacity in enumerate(param):
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
inst.node_type = 'execution'
instances.append(inst)
return instances
class TestSelectBestInstanceForTask(object):
@pytest.mark.parametrize(
'task,instances,instance_fit_index,reason',
[
(Job(task_impact=100), Is([100]), 0, "Only one, pick it"),
(Job(task_impact=100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
(Job(task_impact=100), Is([50, 100]), 1, "First instance not as good as second instance"),
(Job(task_impact=100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
(Job(task_impact=100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
],
)
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
ig = InstanceGroup(id=10, name='controlplane')
tasks = []
for instance in instances:
ig.instances.add(instance)
for _ in range(instance.jobs_running):
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
instance_picked = tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
if instance_fit_index is None:
assert instance_picked is None, reason
else:
assert instance_picked.hostname == instances[instance_fit_index].hostname, reason
@pytest.mark.parametrize(
'instances,instance_fit_index,reason',
[
(Is([(0, 100)]), 0, "One idle instance, pick it"),
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
],
)
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
ig = InstanceGroup(id=10, name='controlplane')
tasks = []
for instance in instances:
ig.instances.add(instance)
for _ in range(instance.jobs_running):
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
if instance_fit_index is None:
assert tm_models.instance_groups.find_largest_idle_instance('controlplane') is None, reason
else:
assert tm_models.instance_groups.find_largest_idle_instance('controlplane').hostname == instances[instance_fit_index].hostname, reason

View File

@@ -11,12 +11,11 @@ import os
import subprocess import subprocess
import re import re
import stat import stat
import sys
import urllib.parse import urllib.parse
import threading import threading
import contextlib import contextlib
import tempfile import tempfile
import functools from functools import reduce, wraps
# Django # Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
@@ -74,7 +73,6 @@ __all__ = [
'NullablePromptPseudoField', 'NullablePromptPseudoField',
'model_instance_diff', 'model_instance_diff',
'parse_yaml_or_json', 'parse_yaml_or_json',
'is_testing',
'RequireDebugTrueOrTest', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'has_model_field_prefetched',
'set_environ', 'set_environ',
@@ -90,7 +88,6 @@ __all__ = [
'deepmerge', 'deepmerge',
'get_event_partition_epoch', 'get_event_partition_epoch',
'cleanup_new_process', 'cleanup_new_process',
'log_excess_runtime',
] ]
@@ -147,19 +144,6 @@ def underscore_to_camelcase(s):
return ''.join(x.capitalize() or '_' for x in s.split('_')) return ''.join(x.capitalize() or '_' for x in s.split('_'))
@functools.cache
def is_testing(argv=None):
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
class RequireDebugTrueOrTest(logging.Filter): class RequireDebugTrueOrTest(logging.Filter):
""" """
Logging filter to output when in DEBUG mode or running tests. Logging filter to output when in DEBUG mode or running tests.
@@ -168,7 +152,7 @@ class RequireDebugTrueOrTest(logging.Filter):
def filter(self, record): def filter(self, record):
from django.conf import settings from django.conf import settings
return settings.DEBUG or is_testing() return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError): class IllegalArgumentError(ValueError):
@@ -190,7 +174,7 @@ def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
cache = cache or get_memoize_cache() cache = cache or get_memoize_cache()
def memoize_decorator(f): def memoize_decorator(f):
@functools.wraps(f) @wraps(f)
def _memoizer(*args, **kwargs): def _memoizer(*args, **kwargs):
if track_function: if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs)) cache_dict_key = slugify('%r %r' % (args, kwargs))
@@ -1008,7 +992,7 @@ def getattrd(obj, name, default=NoDefaultProvided):
""" """
try: try:
return functools.reduce(getattr, name.split("."), obj) return reduce(getattr, name.split("."), obj)
except AttributeError: except AttributeError:
if default != NoDefaultProvided: if default != NoDefaultProvided:
return default return default
@@ -1204,7 +1188,7 @@ def cleanup_new_process(func):
Cleanup django connection, cache connection, before executing new thread or processes entry point, func. Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
""" """
@functools.wraps(func) @wraps(func)
def wrapper_cleanup_new_process(*args, **kwargs): def wrapper_cleanup_new_process(*args, **kwargs):
from awx.conf.settings import SettingsWrapper # noqa from awx.conf.settings import SettingsWrapper # noqa
@@ -1216,30 +1200,15 @@ def cleanup_new_process(func):
return wrapper_cleanup_new_process return wrapper_cleanup_new_process
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False): def log_excess_runtime(func_logger, cutoff=5.0):
def log_excess_runtime_decorator(func): def log_excess_runtime_decorator(func):
@functools.wraps(func) @wraps(func)
def _new_func(*args, **kwargs): def _new_func(*args, **kwargs):
start_time = time.time() start_time = time.time()
log_data = {'name': repr(func.__name__)} return_value = func(*args, **kwargs)
delta = time.time() - start_time
if add_log_data: if delta > cutoff:
return_value = func(*args, log_data=log_data, **kwargs) logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
else:
return_value = func(*args, **kwargs)
log_data['delta'] = time.time() - start_time
if isinstance(return_value, dict):
log_data.update(return_value)
if msg is None:
record_msg = 'Running {name} took {delta:.2f}s'
else:
record_msg = msg
if log_data['delta'] > cutoff:
func_logger.info(record_msg.format(**log_data))
elif log_data['delta'] > debug_cutoff:
func_logger.debug(record_msg.format(**log_data))
return return_value return return_value
return _new_func return _new_func

View File

@@ -110,7 +110,7 @@ if settings.COLOR_LOGS is True:
# logs rendered with cyan text # logs rendered with cyan text
previous_level_map = self.level_map.copy() previous_level_map = self.level_map.copy()
if record.name == "awx.analytics.job_lifecycle": if record.name == "awx.analytics.job_lifecycle":
self.level_map[logging.INFO] = (None, 'cyan', True) self.level_map[logging.DEBUG] = (None, 'cyan', True)
msg = super(ColorHandler, self).colorize(line, record) msg = super(ColorHandler, self).colorize(line, record)
self.level_map = previous_level_map self.level_map = previous_level_map
return msg return msg

View File

@@ -118,7 +118,7 @@ class WebsocketTask:
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.") logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
except Exception as e: except Exception as e:
# Early on, this is our canary. I'm not sure what exceptions we can really encounter. # Early on, this is our canary. I'm not sure what exceptions we can really encounter.
logger.exception(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.") logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
else: else:
logger.warning(f"Connection from {self.name} to {self.remote_host} list.") logger.warning(f"Connection from {self.name} to {self.remote_host} list.")

View File

@@ -10,6 +10,28 @@ import socket
from datetime import timedelta from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
def IS_TESTING(argv=None):
return is_testing(argv)
if "pytest" in sys.modules: if "pytest" in sys.modules:
from unittest import mock from unittest import mock
@@ -18,13 +40,9 @@ if "pytest" in sys.modules:
else: else:
import ldap import ldap
DEBUG = True DEBUG = True
SQL_DEBUG = DEBUG SQL_DEBUG = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# FIXME: it would be nice to cycle back around and allow this to be # FIXME: it would be nice to cycle back around and allow this to be
# BigAutoField going forward, but we'd have to be explicit about our # BigAutoField going forward, but we'd have to be explicit about our
# existing models. # existing models.
@@ -83,7 +101,7 @@ USE_L10N = True
USE_TZ = True USE_TZ = True
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.join(BASE_DIR, 'static')] STATICFILES_DIRS = (os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.join(BASE_DIR, 'static'))
# Absolute filesystem path to the directory where static file are collected via # Absolute filesystem path to the directory where static file are collected via
# the collectstatic command. # the collectstatic command.
@@ -236,14 +254,6 @@ START_TASK_LIMIT = 100
TASK_MANAGER_TIMEOUT = 300 TASK_MANAGER_TIMEOUT = 300
TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60 TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60
# Number of seconds _in addition to_ the task manager timeout a job can stay
# in waiting without being reaped
JOB_WAITING_GRACE_PERIOD = 60
# Number of seconds after a container group job finished time to wait
# before the awx_k8s_reaper task will tear down the pods
K8S_POD_REAPER_GRACE_PERIOD = 60
# Disallow sending session cookies over insecure connections # Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True
@@ -304,13 +314,11 @@ INSTALLED_APPS = [
'django.contrib.messages', 'django.contrib.messages',
'django.contrib.sessions', 'django.contrib.sessions',
'django.contrib.sites', 'django.contrib.sites',
# daphne has to be installed before django.contrib.staticfiles for the app to startup
# According to channels 4.0 docs you install daphne instead of channels now
'daphne',
'django.contrib.staticfiles', 'django.contrib.staticfiles',
'oauth2_provider', 'oauth2_provider',
'rest_framework', 'rest_framework',
'django_extensions', 'django_extensions',
'channels',
'polymorphic', 'polymorphic',
'taggit', 'taggit',
'social_django', 'social_django',
@@ -853,7 +861,6 @@ LOGGING = {
'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs 'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs
'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs 'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs
'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False}, 'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False},
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsbroadcast', 'external_logger'], 'level': 'INFO', 'propagate': False},
'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False}, 'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False},
'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False}, 'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False},
'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'}, 'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'},
@@ -986,13 +993,6 @@ DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
DEFAULT_EXECUTION_QUEUE_NAME = 'default' DEFAULT_EXECUTION_QUEUE_NAME = 'default'
# pod spec used when the default execution queue is a container group, e.g. when deploying on k8s/ocp with the operator # pod spec used when the default execution queue is a container group, e.g. when deploying on k8s/ocp with the operator
DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE = '' DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE = ''
# Max number of concurrently consumed forks for the default execution queue
# Zero means no limit
DEFAULT_EXECUTION_QUEUE_MAX_FORKS = 0
# Max number of concurrently running jobs for the default execution queue
# Zero means no limit
DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS = 0
# Name of the default controlplane queue # Name of the default controlplane queue
DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane' DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane'
@@ -1004,5 +1004,16 @@ DEFAULT_CONTAINER_RUN_OPTIONS = ['--network', 'slirp4netns:enable_ipv6=true']
# Mount exposed paths as hostPath resource in k8s/ocp # Mount exposed paths as hostPath resource in k8s/ocp
AWX_MOUNT_ISOLATED_PATHS_ON_K8S = False AWX_MOUNT_ISOLATED_PATHS_ON_K8S = False
# Time out task managers if they take longer than this many seconds
TASK_MANAGER_TIMEOUT = 300
# Number of seconds _in addition to_ the task manager timeout a job can stay
# in waiting without being reaped
JOB_WAITING_GRACE_PERIOD = 60
# Number of seconds after a container group job finished time to wait
# before the awx_k8s_reaper task will tear down the pods
K8S_POD_REAPER_GRACE_PERIOD = 60
# This is overridden downstream via /etc/tower/conf.d/cluster_host_id.py # This is overridden downstream via /etc/tower/conf.d/cluster_host_id.py
CLUSTER_HOST_ID = socket.gethostname() CLUSTER_HOST_ID = socket.gethostname()

View File

@@ -114,7 +114,7 @@ if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
# this needs to stay at the bottom of this file # this needs to stay at the bottom of this file
try: try:
if os.getenv('AWX_KUBE_DEVEL', False): if os.getenv('AWX_KUBE_DEVEL', False):
include(optional('development_kube.py'), scope=locals()) include(optional('minikube.py'), scope=locals())
else: else:
include(optional('local_*.py'), scope=locals()) include(optional('local_*.py'), scope=locals())
except ImportError: except ImportError:

View File

@@ -1,4 +1,4 @@
BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖' BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
BROADCAST_WEBSOCKET_PORT = 8052 BROADCAST_WEBSOCKET_PORT = 8013
BROADCAST_WEBSOCKET_VERIFY_CERT = False BROADCAST_WEBSOCKET_VERIFY_CERT = False
BROADCAST_WEBSOCKET_PROTOCOL = 'http' BROADCAST_WEBSOCKET_PROTOCOL = 'http'

115
awx/ui/package-lock.json generated
View File

@@ -7,9 +7,9 @@
"name": "ui", "name": "ui",
"dependencies": { "dependencies": {
"@lingui/react": "3.14.0", "@lingui/react": "3.14.0",
"@patternfly/patternfly": "4.217.1", "@patternfly/patternfly": "4.210.2",
"@patternfly/react-core": "^4.264.0", "@patternfly/react-core": "^4.239.0",
"@patternfly/react-icons": "4.92.10", "@patternfly/react-icons": "4.90.0",
"@patternfly/react-table": "4.108.0", "@patternfly/react-table": "4.108.0",
"ace-builds": "^1.10.1", "ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
@@ -22,7 +22,7 @@
"has-ansi": "5.0.1", "has-ansi": "5.0.1",
"html-entities": "2.3.2", "html-entities": "2.3.2",
"js-yaml": "4.1.0", "js-yaml": "4.1.0",
"luxon": "^3.1.1", "luxon": "^3.0.3",
"prop-types": "^15.8.1", "prop-types": "^15.8.1",
"react": "17.0.2", "react": "17.0.2",
"react-ace": "^10.1.0", "react-ace": "^10.1.0",
@@ -3747,35 +3747,26 @@
"dev": true "dev": true
}, },
"node_modules/@patternfly/patternfly": { "node_modules/@patternfly/patternfly": {
"version": "4.217.1", "version": "4.210.2",
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.217.1.tgz", "resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.210.2.tgz",
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw==" "integrity": "sha512-aZiW24Bxi6uVmk5RyNTp+6q6ThtlJZotNRJfWVeGuwu1UlbBuV4DFa1bpjA6jfTZpfEpX2YL5+R+4ZVSCFAVdw=="
}, },
"node_modules/@patternfly/react-core": { "node_modules/@patternfly/react-core": {
"version": "4.264.0", "version": "4.239.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.239.0.tgz",
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==", "integrity": "sha512-6CmYABCJLUXTlzCk6C3WouMNZpS0BCT+aHU8CvYpFQ/NrpYp3MJaDsYbqgCRWV42rmIO5iXun/4WhXBJzJEoQg==",
"dependencies": { "dependencies": {
"@patternfly/react-icons": "^4.93.0", "@patternfly/react-icons": "^4.90.0",
"@patternfly/react-styles": "^4.92.0", "@patternfly/react-styles": "^4.89.0",
"@patternfly/react-tokens": "^4.94.0", "@patternfly/react-tokens": "^4.91.0",
"focus-trap": "6.9.2", "focus-trap": "6.9.2",
"react-dropzone": "9.0.0", "react-dropzone": "9.0.0",
"tippy.js": "5.1.2", "tippy.js": "5.1.2",
"tslib": "^2.0.0" "tslib": "^2.0.0"
}, },
"peerDependencies": { "peerDependencies": {
"react": "^16.8 || ^17 || ^18", "react": "^16.8.0 || ^17.0.0",
"react-dom": "^16.8 || ^17 || ^18" "react-dom": "^16.8.0 || ^17.0.0"
}
},
"node_modules/@patternfly/react-core/node_modules/@patternfly/react-icons": {
"version": "4.93.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
"peerDependencies": {
"react": "^16.8 || ^17 || ^18",
"react-dom": "^16.8 || ^17 || ^18"
} }
}, },
"node_modules/@patternfly/react-core/node_modules/tslib": { "node_modules/@patternfly/react-core/node_modules/tslib": {
@@ -3784,18 +3775,18 @@
"integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==" "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw=="
}, },
"node_modules/@patternfly/react-icons": { "node_modules/@patternfly/react-icons": {
"version": "4.92.10", "version": "4.90.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.92.10.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.90.0.tgz",
"integrity": "sha512-vwCy7b+OyyuvLDSLqLUG2DkJZgMDogjld8tJTdAaG8HiEhC1sJPZac+5wD7AuS3ym/sQolS4vYtNiVDnMEORxA==", "integrity": "sha512-qEnQKbxbUgyiosiKSkeKEBwmhgJwWEqniIAFyoxj+kpzAdeu7ueWe5iBbqo06mvDOedecFiM5mIE1N0MXwk8Yw==",
"peerDependencies": { "peerDependencies": {
"react": "^16.8 || ^17 || ^18", "react": "^16.8.0 || ^17.0.0",
"react-dom": "^16.8 || ^17 || ^18" "react-dom": "^16.8.0 || ^17.0.0"
} }
}, },
"node_modules/@patternfly/react-styles": { "node_modules/@patternfly/react-styles": {
"version": "4.92.0", "version": "4.89.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.89.0.tgz",
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA==" "integrity": "sha512-SkT+qx3Xqu70T5s+i/AUT2hI2sKAPDX4ffeiJIUDu/oyWiFdk+/9DEivnLSyJMruroXXN33zKibvzb5rH7DKTQ=="
}, },
"node_modules/@patternfly/react-table": { "node_modules/@patternfly/react-table": {
"version": "4.108.0", "version": "4.108.0",
@@ -3820,9 +3811,9 @@
"integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
}, },
"node_modules/@patternfly/react-tokens": { "node_modules/@patternfly/react-tokens": {
"version": "4.94.0", "version": "4.91.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.91.0.tgz",
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw==" "integrity": "sha512-QeQCy8o8E/16fAr8mxqXIYRmpTsjCHJXi5p5jmgEDFmYMesN6Pqfv6N5D0FHb+CIaNOZWRps7GkWvlIMIE81sw=="
}, },
"node_modules/@pmmmwh/react-refresh-webpack-plugin": { "node_modules/@pmmmwh/react-refresh-webpack-plugin": {
"version": "0.5.4", "version": "0.5.4",
@@ -15477,9 +15468,9 @@
} }
}, },
"node_modules/luxon": { "node_modules/luxon": {
"version": "3.1.1", "version": "3.0.3",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw==", "integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w==",
"engines": { "engines": {
"node": ">=12" "node": ">=12"
} }
@@ -25098,30 +25089,24 @@
"dev": true "dev": true
}, },
"@patternfly/patternfly": { "@patternfly/patternfly": {
"version": "4.217.1", "version": "4.210.2",
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.217.1.tgz", "resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.210.2.tgz",
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw==" "integrity": "sha512-aZiW24Bxi6uVmk5RyNTp+6q6ThtlJZotNRJfWVeGuwu1UlbBuV4DFa1bpjA6jfTZpfEpX2YL5+R+4ZVSCFAVdw=="
}, },
"@patternfly/react-core": { "@patternfly/react-core": {
"version": "4.264.0", "version": "4.239.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.239.0.tgz",
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==", "integrity": "sha512-6CmYABCJLUXTlzCk6C3WouMNZpS0BCT+aHU8CvYpFQ/NrpYp3MJaDsYbqgCRWV42rmIO5iXun/4WhXBJzJEoQg==",
"requires": { "requires": {
"@patternfly/react-icons": "^4.93.0", "@patternfly/react-icons": "^4.90.0",
"@patternfly/react-styles": "^4.92.0", "@patternfly/react-styles": "^4.89.0",
"@patternfly/react-tokens": "^4.94.0", "@patternfly/react-tokens": "^4.91.0",
"focus-trap": "6.9.2", "focus-trap": "6.9.2",
"react-dropzone": "9.0.0", "react-dropzone": "9.0.0",
"tippy.js": "5.1.2", "tippy.js": "5.1.2",
"tslib": "^2.0.0" "tslib": "^2.0.0"
}, },
"dependencies": { "dependencies": {
"@patternfly/react-icons": {
"version": "4.93.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
"requires": {}
},
"tslib": { "tslib": {
"version": "2.3.1", "version": "2.3.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
@@ -25130,15 +25115,15 @@
} }
}, },
"@patternfly/react-icons": { "@patternfly/react-icons": {
"version": "4.92.10", "version": "4.90.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.92.10.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.90.0.tgz",
"integrity": "sha512-vwCy7b+OyyuvLDSLqLUG2DkJZgMDogjld8tJTdAaG8HiEhC1sJPZac+5wD7AuS3ym/sQolS4vYtNiVDnMEORxA==", "integrity": "sha512-qEnQKbxbUgyiosiKSkeKEBwmhgJwWEqniIAFyoxj+kpzAdeu7ueWe5iBbqo06mvDOedecFiM5mIE1N0MXwk8Yw==",
"requires": {} "requires": {}
}, },
"@patternfly/react-styles": { "@patternfly/react-styles": {
"version": "4.92.0", "version": "4.89.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.89.0.tgz",
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA==" "integrity": "sha512-SkT+qx3Xqu70T5s+i/AUT2hI2sKAPDX4ffeiJIUDu/oyWiFdk+/9DEivnLSyJMruroXXN33zKibvzb5rH7DKTQ=="
}, },
"@patternfly/react-table": { "@patternfly/react-table": {
"version": "4.108.0", "version": "4.108.0",
@@ -25161,9 +25146,9 @@
} }
}, },
"@patternfly/react-tokens": { "@patternfly/react-tokens": {
"version": "4.94.0", "version": "4.91.0",
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz", "resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.91.0.tgz",
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw==" "integrity": "sha512-QeQCy8o8E/16fAr8mxqXIYRmpTsjCHJXi5p5jmgEDFmYMesN6Pqfv6N5D0FHb+CIaNOZWRps7GkWvlIMIE81sw=="
}, },
"@pmmmwh/react-refresh-webpack-plugin": { "@pmmmwh/react-refresh-webpack-plugin": {
"version": "0.5.4", "version": "0.5.4",
@@ -34225,9 +34210,9 @@
} }
}, },
"luxon": { "luxon": {
"version": "3.1.1", "version": "3.0.3",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw==" "integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w=="
}, },
"lz-string": { "lz-string": {
"version": "1.4.4", "version": "1.4.4",

View File

@@ -7,9 +7,9 @@
}, },
"dependencies": { "dependencies": {
"@lingui/react": "3.14.0", "@lingui/react": "3.14.0",
"@patternfly/patternfly": "4.217.1", "@patternfly/patternfly": "4.210.2",
"@patternfly/react-core": "^4.264.0", "@patternfly/react-core": "^4.239.0",
"@patternfly/react-icons": "4.92.10", "@patternfly/react-icons": "4.90.0",
"@patternfly/react-table": "4.108.0", "@patternfly/react-table": "4.108.0",
"ace-builds": "^1.10.1", "ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
@@ -22,7 +22,7 @@
"has-ansi": "5.0.1", "has-ansi": "5.0.1",
"html-entities": "2.3.2", "html-entities": "2.3.2",
"js-yaml": "4.1.0", "js-yaml": "4.1.0",
"luxon": "^3.1.1", "luxon": "^3.0.3",
"prop-types": "^15.8.1", "prop-types": "^15.8.1",
"react": "17.0.2", "react": "17.0.2",
"react-ace": "^10.1.0", "react-ace": "^10.1.0",

View File

@@ -20,10 +20,6 @@ class Hosts extends Base {
return this.http.get(`${this.baseUrl}${id}/all_groups/`, { params }); return this.http.get(`${this.baseUrl}${id}/all_groups/`, { params });
} }
readGroups(id, params) {
return this.http.get(`${this.baseUrl}${id}/groups/`, { params });
}
readGroupsOptions(id) { readGroupsOptions(id) {
return this.http.options(`${this.baseUrl}${id}/groups/`); return this.http.options(`${this.baseUrl}${id}/groups/`);
} }

View File

@@ -153,10 +153,6 @@ function CredentialsStep({
}))} }))}
value={selectedType && selectedType.id} value={selectedType && selectedType.id}
onChange={(e, id) => { onChange={(e, id) => {
// Reset query params when the category of credentials is changed
history.replace({
search: '',
});
setSelectedType(types.find((o) => o.id === parseInt(id, 10))); setSelectedType(types.find((o) => o.id === parseInt(id, 10)));
}} }}
/> />

View File

@@ -3,7 +3,6 @@ import { act } from 'react-dom/test-utils';
import { Formik } from 'formik'; import { Formik } from 'formik';
import { CredentialsAPI, CredentialTypesAPI } from 'api'; import { CredentialsAPI, CredentialTypesAPI } from 'api';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import { createMemoryHistory } from 'history';
import CredentialsStep from './CredentialsStep'; import CredentialsStep from './CredentialsStep';
jest.mock('../../../api/models/CredentialTypes'); jest.mock('../../../api/models/CredentialTypes');
@@ -165,41 +164,6 @@ describe('CredentialsStep', () => {
}); });
}); });
test('should reset query params (credential.page) when selected credential type is changed', async () => {
let wrapper;
const history = createMemoryHistory({
initialEntries: ['?credential.page=2'],
});
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<CredentialsStep allowCredentialsWithPasswords />
</Formik>,
{
context: { router: { history } },
}
);
});
wrapper.update();
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type: 1,
order_by: 'name',
page: 2,
page_size: 5,
});
await act(async () => {
wrapper.find('AnsibleSelect').invoke('onChange')({}, 3);
});
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type: 3,
order_by: 'name',
page: 1,
page_size: 5,
});
});
test("error should be shown when a credential that prompts for passwords is selected on a step that doesn't allow it", async () => { test("error should be shown when a credential that prompts for passwords is selected on a step that doesn't allow it", async () => {
let wrapper; let wrapper;
await act(async () => { await act(async () => {

View File

@@ -173,10 +173,6 @@ function MultiCredentialsLookup({
}))} }))}
value={selectedType && selectedType.id} value={selectedType && selectedType.id}
onChange={(e, id) => { onChange={(e, id) => {
// Reset query params when the category of credentials is changed
history.replace({
search: '',
});
setSelectedType( setSelectedType(
credentialTypes.find((o) => o.id === parseInt(id, 10)) credentialTypes.find((o) => o.id === parseInt(id, 10))
); );

View File

@@ -6,7 +6,6 @@ import {
mountWithContexts, mountWithContexts,
waitForElement, waitForElement,
} from '../../../testUtils/enzymeHelpers'; } from '../../../testUtils/enzymeHelpers';
import { createMemoryHistory } from 'history';
import MultiCredentialsLookup from './MultiCredentialsLookup'; import MultiCredentialsLookup from './MultiCredentialsLookup';
jest.mock('../../api'); jest.mock('../../api');
@@ -229,53 +228,6 @@ describe('<Formik><MultiCredentialsLookup /></Formik>', () => {
]); ]);
}); });
test('should reset query params (credentials.page) when selected credential type is changed', async () => {
const history = createMemoryHistory({
initialEntries: ['?credentials.page=2'],
});
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<MultiCredentialsLookup
value={credentials}
tooltip="This is credentials look up"
onChange={() => {}}
onError={() => {}}
/>
</Formik>,
{
context: { router: { history } },
}
);
});
const searchButton = await waitForElement(
wrapper,
'Button[aria-label="Search"]'
);
await act(async () => {
searchButton.invoke('onClick')();
});
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type: 400,
order_by: 'name',
page: 2,
page_size: 5,
});
const select = await waitForElement(wrapper, 'AnsibleSelect');
await act(async () => {
select.invoke('onChange')({}, 500);
});
wrapper.update();
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type: 500,
order_by: 'name',
page: 1,
page_size: 5,
});
});
test('should only add 1 credential per credential type except vault(see below)', async () => { test('should only add 1 credential per credential type except vault(see below)', async () => {
const onChange = jest.fn(); const onChange = jest.fn();
await act(async () => { await act(async () => {

View File

@@ -416,14 +416,8 @@ function ScheduleForm({
if (options.end === 'onDate') { if (options.end === 'onDate') {
if ( if (
DateTime.fromFormat( DateTime.fromISO(values.startDate) >=
`${values.startDate} ${values.startTime}`, DateTime.fromISO(options.endDate)
'yyyy-LL-dd h:mm a'
).toMillis() >=
DateTime.fromFormat(
`${options.endDate} ${options.endTime}`,
'yyyy-LL-dd h:mm a'
).toMillis()
) { ) {
freqErrors.endDate = t`Please select an end date/time that comes after the start date/time.`; freqErrors.endDate = t`Please select an end date/time that comes after the start date/time.`;
} }

View File

@@ -900,36 +900,6 @@ describe('<ScheduleForm />', () => {
); );
}); });
test('should create schedule with the same start and end date provided that the end date is at a later time', async () => {
const today = DateTime.now().toFormat('yyyy-LL-dd');
const laterTime = DateTime.now().plus({ hours: 1 }).toFormat('h:mm a');
await act(async () => {
wrapper.find('DatePicker[aria-label="End date"]').prop('onChange')(
today,
new Date(today)
);
});
wrapper.update();
expect(
wrapper
.find('FormGroup[data-cy="schedule-End date/time"]')
.prop('helperTextInvalid')
).toBe(
'Please select an end date/time that comes after the start date/time.'
);
await act(async () => {
wrapper.find('TimePicker[aria-label="End time"]').prop('onChange')(
laterTime
);
});
wrapper.update();
expect(
wrapper
.find('FormGroup[data-cy="schedule-End date/time"]')
.prop('helperTextInvalid')
).toBe(undefined);
});
test('error shown when on day number is not between 1 and 31', async () => { test('error shown when on day number is not between 1 and 31', async () => {
await act(async () => { await act(async () => {
wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([ wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([

View File

@@ -24,10 +24,12 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
const { id } = useParams(); const { id } = useParams();
const relevantResults = relatedJobs.filter( const relevantResults = relatedJobs.filter(
({ job: jobId, summary_fields }) => ({
jobId && job: jobId,
`${jobId}` !== id && summary_fields: {
summary_fields.job.type !== 'workflow_approval' unified_job_template: { unified_job_type },
},
}) => jobId && `${jobId}` !== id && unified_job_type !== 'workflow_approval'
); );
const [isOpen, setIsOpen] = useState(false); const [isOpen, setIsOpen] = useState(false);
@@ -99,14 +101,16 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
{sortedJobs?.map((node) => ( {sortedJobs?.map((node) => (
<SelectOption <SelectOption
key={node.id} key={node.id}
to={`/jobs/${JOB_URL_SEGMENT_MAP[node.summary_fields.job.type]}/${ to={`/jobs/${
node.summary_fields.job?.id JOB_URL_SEGMENT_MAP[
}/output`} node.summary_fields.unified_job_template.unified_job_type
]
}/${node.summary_fields.job?.id}/output`}
component={Link} component={Link}
value={node.summary_fields.job.name} value={node.summary_fields.unified_job_template.name}
> >
{stringIsUUID(node.identifier) {stringIsUUID(node.identifier)
? node.summary_fields.job.name ? node.summary_fields.unified_job_template.name
: node.identifier} : node.identifier}
</SelectOption> </SelectOption>
))} ))}

View File

@@ -1,85 +0,0 @@
import React from 'react';
import { within, render, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import WorkflowOutputNavigation from './WorkflowOutputNavigation';
import { createMemoryHistory } from 'history';
import { I18nProvider } from '@lingui/react';
import { i18n } from '@lingui/core';
import { en } from 'make-plural/plurals';
import english from '../../../src/locales/en/messages';
import { Router } from 'react-router-dom';
jest.mock('react-router-dom', () => ({
...jest.requireActual('react-router-dom'),
useParams: () => ({
id: 1,
}),
}));
const jobs = [
{
id: 1,
summary_fields: {
job: {
name: 'Ansible',
type: 'project_update',
id: 1,
status: 'successful',
},
},
job: 4,
},
{
id: 2,
summary_fields: {
job: {
name: 'Durham',
type: 'job',
id: 2,
status: 'successful',
},
},
job: 3,
},
{
id: 3,
summary_fields: {
job: {
name: 'Red hat',
type: 'job',
id: 3,
status: 'successful',
},
},
job: 2,
},
];
describe('<WorkflowOuputNavigation/>', () => {
test('Should open modal and deprovision node', async () => {
i18n.loadLocaleData({ en: { plurals: en } });
i18n.load({ en: english });
i18n.activate('en');
const user = userEvent.setup();
const ref = jest
.spyOn(React, 'useRef')
.mockReturnValueOnce({ current: 'div' });
const history = createMemoryHistory({
initialEntries: ['jobs/playbook/2/output'],
});
render(
<I18nProvider i18n={i18n}>
<Router history={history}>
<WorkflowOutputNavigation relatedJobs={jobs} parentRef={ref} />
</Router>
</I18nProvider>
);
const button = screen.getByRole('button');
await user.click(button);
await waitFor(() => screen.getByText('Workflow Nodes'));
await waitFor(() => screen.getByText('Red hat'));
await waitFor(() => screen.getByText('Durham'));
await waitFor(() => screen.getByText('Ansible'));
});
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -282,7 +282,7 @@ const mockInputSources = {
summary_fields: { summary_fields: {
source_credential: { source_credential: {
id: 20, id: 20,
name: 'CyberArk Conjur Secrets Manager Lookup', name: 'CyberArk Conjur Secret Lookup',
description: '', description: '',
kind: 'conjur', kind: 'conjur',
cloud: false, cloud: false,
@@ -301,7 +301,7 @@ const mockInputSources = {
summary_fields: { summary_fields: {
source_credential: { source_credential: {
id: 20, id: 20,
name: 'CyberArk Conjur Secrets Manager Lookup', name: 'CyberArk Conjur Secret Lookup',
description: '', description: '',
kind: 'conjur', kind: 'conjur',
cloud: false, cloud: false,

View File

@@ -36,14 +36,14 @@ const mockCredentialTypeDetail = {
url: '/api/v2/credential_types/20/', url: '/api/v2/credential_types/20/',
related: { related: {
named_url: named_url:
'/api/v2/credential_types/CyberArk Conjur Secrets Manager Lookup+external/', '/api/v2/credential_types/CyberArk Conjur Secret Lookup+external/',
credentials: '/api/v2/credential_types/20/credentials/', credentials: '/api/v2/credential_types/20/credentials/',
activity_stream: '/api/v2/credential_types/20/activity_stream/', activity_stream: '/api/v2/credential_types/20/activity_stream/',
}, },
summary_fields: { user_capabilities: { edit: false, delete: false } }, summary_fields: { user_capabilities: { edit: false, delete: false } },
created: '2020-05-18T21:53:35.398260Z', created: '2020-05-18T21:53:35.398260Z',
modified: '2020-05-18T21:54:05.451444Z', modified: '2020-05-18T21:54:05.451444Z',
name: 'CyberArk Conjur Secrets Manager Lookup', name: 'CyberArk Conjur Secret Lookup',
description: '', description: '',
kind: 'external', kind: 'external',
namespace: 'conjur', namespace: 'conjur',

View File

@@ -465,7 +465,7 @@
}, },
"created": "2020-05-18T21:53:35.370730Z", "created": "2020-05-18T21:53:35.370730Z",
"modified": "2020-05-18T21:54:05.436400Z", "modified": "2020-05-18T21:54:05.436400Z",
"name": "CyberArk Central Credential Provider Lookup", "name": "CyberArk AIM Central Credential Provider Lookup",
"description": "", "description": "",
"kind": "external", "kind": "external",
"namespace": "aim", "namespace": "aim",
@@ -546,7 +546,7 @@
}, },
"created": "2020-05-18T21:53:35.398260Z", "created": "2020-05-18T21:53:35.398260Z",
"modified": "2020-05-18T21:54:05.451444Z", "modified": "2020-05-18T21:54:05.451444Z",
"name": "CyberArk Conjur Secrets Manager Lookup", "name": "CyberArk Conjur Secret Lookup",
"description": "", "description": "",
"kind": "external", "kind": "external",
"namespace": "conjur", "namespace": "conjur",

View File

@@ -3,7 +3,7 @@
"type": "credential", "type": "credential",
"url": "/api/v2/credentials/1/", "url": "/api/v2/credentials/1/",
"related": { "related": {
"named_url": "/api/v2/credentials/CyberArk Conjur Secrets Manager Lookup+external++/", "named_url": "/api/v2/credentials/CyberArk Conjur Secret Lookup++CyberArk Conjur Secret Lookup+external++/",
"created_by": "/api/v2/users/1/", "created_by": "/api/v2/users/1/",
"modified_by": "/api/v2/users/1/", "modified_by": "/api/v2/users/1/",
"activity_stream": "/api/v2/credentials/1/activity_stream/", "activity_stream": "/api/v2/credentials/1/activity_stream/",
@@ -19,7 +19,7 @@
"summary_fields": { "summary_fields": {
"credential_type": { "credential_type": {
"id": 20, "id": 20,
"name": "CyberArk Conjur Secrets Manager Lookup", "name": "CyberArk Conjur Secret Lookup",
"description": "" "description": ""
}, },
"created_by": { "created_by": {
@@ -69,7 +69,7 @@
}, },
"created": "2020-05-19T12:51:36.956029Z", "created": "2020-05-19T12:51:36.956029Z",
"modified": "2020-05-19T12:51:36.956086Z", "modified": "2020-05-19T12:51:36.956086Z",
"name": "CyberArk Conjur Secrets Manager Lookup", "name": "CyberArk Conjur Secret Lookup",
"description": "", "description": "",
"organization": null, "organization": null,
"credential_type": 20, "credential_type": 20,

View File

@@ -29,10 +29,6 @@ function ContainerGroupAdd() {
try { try {
const { data: response } = await InstanceGroupsAPI.create({ const { data: response } = await InstanceGroupsAPI.create({
name: values.name, name: values.name,
max_forks: values.max_forks ? values.max_forks : 0,
max_concurrent_jobs: values.max_concurrent_jobs
? values.max_concurrent_jobs
: 0,
credential: values?.credential?.id, credential: values?.credential?.id,
pod_spec_override: values.override pod_spec_override: values.override
? getPodSpecValue(values.pod_spec_override) ? getPodSpecValue(values.pod_spec_override)

View File

@@ -33,8 +33,6 @@ const initialPodSpec = {
const instanceGroupCreateData = { const instanceGroupCreateData = {
name: 'Fuz', name: 'Fuz',
credential: { id: 71, name: 'CG' }, credential: { id: 71, name: 'CG' },
max_concurrent_jobs: 0,
max_forks: 0,
pod_spec_override: pod_spec_override:
'apiVersion: v1\nkind: Pod\nmetadata:\n namespace: default\nspec:\n containers:\n - image: ansible/ansible-runner\n tty: true\n stdin: true\n imagePullPolicy: Always\n args:\n - sleep\n - infinity\n - test', 'apiVersion: v1\nkind: Pod\nmetadata:\n namespace: default\nspec:\n containers:\n - image: ansible/ansible-runner\n tty: true\n stdin: true\n imagePullPolicy: Always\n args:\n - sleep\n - infinity\n - test',
}; };

View File

@@ -9,12 +9,7 @@ import AlertModal from 'components/AlertModal';
import ErrorDetail from 'components/ErrorDetail'; import ErrorDetail from 'components/ErrorDetail';
import { CardBody, CardActionsRow } from 'components/Card'; import { CardBody, CardActionsRow } from 'components/Card';
import DeleteButton from 'components/DeleteButton'; import DeleteButton from 'components/DeleteButton';
import { import { Detail, DetailList, UserDateDetail } from 'components/DetailList';
Detail,
DetailList,
UserDateDetail,
DetailBadge,
} from 'components/DetailList';
import useRequest, { useDismissableError } from 'hooks/useRequest'; import useRequest, { useDismissableError } from 'hooks/useRequest';
import { jsonToYaml, isJsonString } from 'util/yaml'; import { jsonToYaml, isJsonString } from 'util/yaml';
import { InstanceGroupsAPI } from 'api'; import { InstanceGroupsAPI } from 'api';
@@ -52,20 +47,6 @@ function ContainerGroupDetails({ instanceGroup }) {
value={t`Container group`} value={t`Container group`}
dataCy="container-group-type" dataCy="container-group-type"
/> />
<DetailBadge
label={t`Max concurrent jobs`}
dataCy="instance-group-max-concurrent-jobs"
helpText={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_concurrent_jobs}
/>
<DetailBadge
label={t`Max forks`}
dataCy="instance-group-max-forks"
helpText={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_forks}
/>
{instanceGroup.summary_fields.credential && ( {instanceGroup.summary_fields.credential && (
<Detail <Detail
label={t`Credential`} label={t`Credential`}

View File

@@ -23,8 +23,6 @@ const instanceGroup = {
created: '2020-09-03T18:26:47.113934Z', created: '2020-09-03T18:26:47.113934Z',
modified: '2020-09-03T19:34:23.244694Z', modified: '2020-09-03T19:34:23.244694Z',
capacity: 0, capacity: 0,
max_concurrent_jobs: 0,
max_forks: 0,
committed_capacity: 0, committed_capacity: 0,
consumed_capacity: 0, consumed_capacity: 0,
percent_capacity_remaining: 0.0, percent_capacity_remaining: 0.0,

View File

@@ -39,10 +39,6 @@ function ContainerGroupEdit({ instanceGroup }) {
name: values.name, name: values.name,
credential: values.credential ? values.credential.id : null, credential: values.credential ? values.credential.id : null,
pod_spec_override: values.override ? values.pod_spec_override : null, pod_spec_override: values.override ? values.pod_spec_override : null,
max_forks: values.max_forks ? values.max_forks : 0,
max_concurrent_jobs: values.max_concurrent_jobs
? values.max_concurrent_jobs
: 0,
is_container_group: true, is_container_group: true,
}); });
history.push(detailsIUrl); history.push(detailsIUrl);

View File

@@ -34,8 +34,6 @@ const instanceGroup = {
policy_instance_percentage: 0, policy_instance_percentage: 0,
policy_instance_minimum: 0, policy_instance_minimum: 0,
policy_instance_list: [], policy_instance_list: [],
max_concurrent_jobs: 0,
max_forks: 0,
pod_spec_override: '', pod_spec_override: '',
summary_fields: { summary_fields: {
credential: { credential: {
@@ -146,8 +144,6 @@ describe('<ContainerGroupEdit/>', () => {
...updatedInstanceGroup, ...updatedInstanceGroup,
credential: 12, credential: 12,
pod_spec_override: null, pod_spec_override: null,
max_concurrent_jobs: 0,
max_forks: 0,
is_container_group: true, is_container_group: true,
}); });
expect(history.location.pathname).toEqual( expect(history.location.pathname).toEqual(

View File

@@ -42,8 +42,6 @@ const instanceGroup = {
credential: null, credential: null,
policy_instance_percentage: 100, policy_instance_percentage: 100,
policy_instance_minimum: 0, policy_instance_minimum: 0,
max_concurrent_jobs: 0,
max_forks: 0,
policy_instance_list: ['receptor-1', 'receptor-2'], policy_instance_list: ['receptor-1', 'receptor-2'],
pod_spec_override: '', pod_spec_override: '',
summary_fields: { summary_fields: {

View File

@@ -73,20 +73,6 @@ function InstanceGroupDetails({ instanceGroup }) {
dataCy="instance-group-policy-instance-percentage" dataCy="instance-group-policy-instance-percentage"
content={`${instanceGroup.policy_instance_percentage} %`} content={`${instanceGroup.policy_instance_percentage} %`}
/> />
<DetailBadge
label={t`Max concurrent jobs`}
dataCy="instance-group-max-concurrent-jobs"
helpText={t`Maximum number of jobs to run concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_concurrent_jobs}
/>
<DetailBadge
label={t`Max forks`}
dataCy="instance-group-max-forks"
helpText={t`Maximum number of forks to allow across all jobs running concurrently on this group.
Zero means no limit will be enforced.`}
content={instanceGroup.max_forks}
/>
{instanceGroup.capacity ? ( {instanceGroup.capacity ? (
<DetailBadge <DetailBadge
label={t`Used capacity`} label={t`Used capacity`}

Some files were not shown because too many files have changed in this diff Show More