mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
183 Commits
UI-Feature
...
21.11.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea9c52aca6 | ||
|
|
a7ebce1fef | ||
|
|
5de9cf748d | ||
|
|
ebea78943d | ||
|
|
1e33bc4020 | ||
|
|
e9ad01e806 | ||
|
|
8a4059d266 | ||
|
|
01a7076267 | ||
|
|
32b6aec66b | ||
|
|
884ab424d5 | ||
|
|
7e55305c45 | ||
|
|
e9a1582b70 | ||
|
|
51ef1e808d | ||
|
|
11fbfc2063 | ||
|
|
f6395c69dd | ||
|
|
ca07bc85cb | ||
|
|
b87dd6dc56 | ||
|
|
f8d46d5e71 | ||
|
|
ce0a456ecc | ||
|
|
5775ff1422 | ||
|
|
82e8bcd2bb | ||
|
|
d73cc501d5 | ||
|
|
7e40a4daed | ||
|
|
47e824dd11 | ||
|
|
4643b816fe | ||
|
|
79d9329cfa | ||
|
|
6492c03965 | ||
|
|
98107301a5 | ||
|
|
4810099158 | ||
|
|
1aca9929ab | ||
|
|
2aa58bc17d | ||
|
|
b99a434dee | ||
|
|
6cee99a9f9 | ||
|
|
ee509aea56 | ||
|
|
b5452a48f8 | ||
|
|
68e555824d | ||
|
|
0c980fa7d5 | ||
|
|
e34ce8c795 | ||
|
|
58bad6cfa9 | ||
|
|
3543644e0e | ||
|
|
36c0d07b30 | ||
|
|
03b0281fde | ||
|
|
6f6f04a071 | ||
|
|
239827a9cf | ||
|
|
ac9871b36f | ||
|
|
f739908ccf | ||
|
|
cf1ec07eab | ||
|
|
d968b648de | ||
|
|
5dd0eab806 | ||
|
|
41f3f381ec | ||
|
|
ac8cff75ce | ||
|
|
94b34b801c | ||
|
|
8f6849fc22 | ||
|
|
821b1701bf | ||
|
|
b7f2825909 | ||
|
|
e87e041a2a | ||
|
|
cc336e791c | ||
|
|
c2a3c3b285 | ||
|
|
7b8dcc98e7 | ||
|
|
d5011492bf | ||
|
|
e363ddf470 | ||
|
|
987709cdb3 | ||
|
|
f04ac3c798 | ||
|
|
71a6baccdb | ||
|
|
d07076b686 | ||
|
|
7129f3e8cd | ||
|
|
df61a5cea1 | ||
|
|
a4b950f79b | ||
|
|
8be739d255 | ||
|
|
ca54195099 | ||
|
|
f0fcfdde39 | ||
|
|
80b1ba4a35 | ||
|
|
51f8e362dc | ||
|
|
737d6d8c8b | ||
|
|
beaf6b6058 | ||
|
|
aad1fbcef8 | ||
|
|
0b96d617ac | ||
|
|
fe768a159b | ||
|
|
c1ebea858b | ||
|
|
da9b8135e8 | ||
|
|
76cecf3f6b | ||
|
|
7b2938f515 | ||
|
|
916b5642d2 | ||
|
|
e524d3df3e | ||
|
|
01e9a611ea | ||
|
|
ef29589940 | ||
|
|
cec2d2dfb9 | ||
|
|
15b7ad3570 | ||
|
|
36ff9cbc6d | ||
|
|
ed74d80ecb | ||
|
|
a0b8215c06 | ||
|
|
f88b993b18 | ||
|
|
4a7f4d0ed4 | ||
|
|
6e08c3567f | ||
|
|
adbcb5c5e4 | ||
|
|
8054c6aedc | ||
|
|
58734a33c4 | ||
|
|
2832f28014 | ||
|
|
e5057691ee | ||
|
|
a0cfd8501c | ||
|
|
99b643bd77 | ||
|
|
305b39d8e5 | ||
|
|
bb047baeba | ||
|
|
9637aad37e | ||
|
|
fbc06ec623 | ||
|
|
57430afc55 | ||
|
|
7aae7e8ed4 | ||
|
|
a67d107a58 | ||
|
|
642003e207 | ||
|
|
ec7e2284df | ||
|
|
ff7facdfa2 | ||
|
|
6df4e62132 | ||
|
|
6289bfb639 | ||
|
|
95e4b2064f | ||
|
|
48eba60be4 | ||
|
|
c7efa8b4e0 | ||
|
|
657b5cb1aa | ||
|
|
06daebbecf | ||
|
|
fb37f22bf4 | ||
|
|
71f326b705 | ||
|
|
6508ab4a33 | ||
|
|
bf871bd427 | ||
|
|
e403c603d6 | ||
|
|
4b7b3c7c7d | ||
|
|
1cdd2cad67 | ||
|
|
86856f242a | ||
|
|
65c3db8cb8 | ||
|
|
7fa9dcbc2a | ||
|
|
7cfb957de3 | ||
|
|
d0d467e863 | ||
|
|
eaccf32aa3 | ||
|
|
a8fdb22ab3 | ||
|
|
ae79f94a48 | ||
|
|
40499a4084 | ||
|
|
b36fa93005 | ||
|
|
8839b4e90b | ||
|
|
7866135d6c | ||
|
|
fe48dc412f | ||
|
|
3a25c4221f | ||
|
|
7e1be3ef94 | ||
|
|
b2f8ca09ba | ||
|
|
c7692f5c56 | ||
|
|
3b24afa7f2 | ||
|
|
2b3f3e2043 | ||
|
|
68614b83c0 | ||
|
|
a1edc75c11 | ||
|
|
4b0e7a5cde | ||
|
|
01c6ac1b14 | ||
|
|
f0481d0a60 | ||
|
|
fd2a8b8531 | ||
|
|
239959a4c9 | ||
|
|
84f2b91105 | ||
|
|
9d7b249b20 | ||
|
|
5bd15dd48d | ||
|
|
d03348c6e4 | ||
|
|
5faeff6bec | ||
|
|
b94a126c02 | ||
|
|
eedd146643 | ||
|
|
d30c5ca9cd | ||
|
|
a3b21b261c | ||
|
|
d1d60c9ef1 | ||
|
|
925e055bb3 | ||
|
|
9f40d7a05c | ||
|
|
d34f6af830 | ||
|
|
163ccfd410 | ||
|
|
968c316c0c | ||
|
|
2fdce43f9e | ||
|
|
fa305a7bfa | ||
|
|
8b9db837ca | ||
|
|
1106367962 | ||
|
|
f9bb26ad33 | ||
|
|
271613b86d | ||
|
|
ac57f5cb28 | ||
|
|
b269ed48ee | ||
|
|
c39172f516 | ||
|
|
9b047c2af6 | ||
|
|
f0d6bc0dc8 | ||
|
|
7590301ae7 | ||
|
|
0db75fdbfd | ||
|
|
878035c13b | ||
|
|
2cc971a43f | ||
|
|
9d77c54612 | ||
|
|
ef651a3a21 |
10
.github/triage_replies.md
vendored
10
.github/triage_replies.md
vendored
@@ -53,6 +53,16 @@ https://github.com/ansible/awx/#get-involved \
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### Red Hat Support Team
|
||||
- Hi! \
|
||||
\
|
||||
It appears that you are using an RPM build for RHEL. Please reach out to the Red Hat support team and submit a ticket. \
|
||||
\
|
||||
Here is the link to do so: \
|
||||
\
|
||||
https://access.redhat.com/support \
|
||||
\
|
||||
Thank you for your submission and for supporting AWX!
|
||||
|
||||
|
||||
## Common
|
||||
|
||||
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@@ -2,6 +2,7 @@
|
||||
name: CI
|
||||
env:
|
||||
BRANCH: ${{ github.base_ref || 'devel' }}
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
@@ -144,3 +145,22 @@ jobs:
|
||||
env:
|
||||
AWX_TEST_IMAGE: awx
|
||||
AWX_TEST_VERSION: ci
|
||||
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Run sanity tests
|
||||
run: make test_collection_sanity
|
||||
env:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
2
.github/workflows/devel_images.yml
vendored
2
.github/workflows/devel_images.yml
vendored
@@ -1,5 +1,7 @@
|
||||
---
|
||||
name: Build/Push Development Images
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
||||
7
.github/workflows/e2e_test.yml
vendored
7
.github/workflows/e2e_test.yml
vendored
@@ -1,9 +1,12 @@
|
||||
---
|
||||
name: E2E Tests
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
jobs:
|
||||
e2e-test:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'qe:e2e')
|
||||
runs-on: ubuntu-latest
|
||||
@@ -104,5 +107,3 @@ jobs:
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
---
|
||||
name: Feature branch deletion cleanup
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
delete:
|
||||
branches:
|
||||
|
||||
19
.github/workflows/promote.yml
vendored
19
.github/workflows/promote.yml
vendored
@@ -1,5 +1,9 @@
|
||||
---
|
||||
name: Promote Release
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
@@ -34,9 +38,13 @@ jobs:
|
||||
- name: Build collection and publish to galaxy
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz; \
|
||||
fi
|
||||
|
||||
- name: Set official pypi info
|
||||
run: echo pypi_repo=pypi >> $GITHUB_ENV
|
||||
@@ -48,6 +56,7 @@ jobs:
|
||||
|
||||
- name: Build awxkit and upload to pypi
|
||||
run: |
|
||||
git reset --hard
|
||||
cd awxkit && python3 setup.py bdist_wheel
|
||||
twine upload \
|
||||
-r ${{ env.pypi_repo }} \
|
||||
@@ -70,4 +79,6 @@ jobs:
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
|
||||
19
.github/workflows/stage.yml
vendored
19
.github/workflows/stage.yml
vendored
@@ -1,5 +1,9 @@
|
||||
---
|
||||
name: Stage Release
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -80,6 +84,20 @@ jobs:
|
||||
-e push=yes \
|
||||
-e awx_official=yes
|
||||
|
||||
- name: Log in to GHCR
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Log in to Quay
|
||||
run: |
|
||||
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
|
||||
|
||||
- name: tag awx-ee:latest with version input
|
||||
run: |
|
||||
docker pull quay.io/ansible/awx-ee:latest
|
||||
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
|
||||
- name: Build and stage awx-operator
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
@@ -99,6 +117,7 @@ jobs:
|
||||
env:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
AWX_EE_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
|
||||
4
.github/workflows/upload_schema.yml
vendored
4
.github/workflows/upload_schema.yml
vendored
@@ -1,5 +1,9 @@
|
||||
---
|
||||
name: Upload API Schema
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
||||
@@ -12,7 +12,7 @@ recursive-include awx/plugins *.ps1
|
||||
recursive-include requirements *.txt
|
||||
recursive-include requirements *.yml
|
||||
recursive-include config *
|
||||
recursive-include docs/licenses *
|
||||
recursive-include licenses *
|
||||
recursive-exclude awx devonly.py*
|
||||
recursive-exclude awx/api/tests *
|
||||
recursive-exclude awx/main/tests *
|
||||
|
||||
55
Makefile
55
Makefile
@@ -6,7 +6,20 @@ CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
|
||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
# args for the ansible-test sanity command
|
||||
COLLECTION_SANITY_ARGS ?= --docker
|
||||
# collection unit testing directories
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
# collection integration test directories (defaults to all)
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
# args for collection install
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -34,7 +47,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 setuptools_scm[toml]==6.4.2 wheel==0.36.2
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -118,7 +131,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -288,19 +301,13 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi && \
|
||||
pip install ansible-core && \
|
||||
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
@@ -330,8 +337,13 @@ install_collection: build_collection
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
test_collection_sanity:
|
||||
rm -rf awx_collection_build/
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
@@ -389,18 +401,18 @@ $(UI_BUILD_FLAG_FILE):
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
touch $@
|
||||
|
||||
ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@@ -452,7 +464,7 @@ awx/projects:
|
||||
COMPOSE_UP_OPTS ?=
|
||||
COMPOSE_OPTS ?=
|
||||
CONTROL_PLANE_NODE_COUNT ?= 1
|
||||
EXECUTION_NODE_COUNT ?= 2
|
||||
EXECUTION_NODE_COUNT ?= 0
|
||||
MINIKUBE_CONTAINER_GROUP ?= false
|
||||
MINIKUBE_SETUP ?= false # if false, run minikube separately
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS ?=
|
||||
@@ -593,13 +605,12 @@ pot: $(UI_BUILD_FLAG_FILE)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
|
||||
LANG = "en_us"
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -96,6 +96,15 @@ register(
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
register(
|
||||
'ALLOW_METRICS_FOR_ANONYMOUS_USERS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Allow anonymous users to poll metrics'),
|
||||
help_text=_('If true, anonymous users are allowed to poll metrics.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
|
||||
def authentication_validate(serializer, attrs):
|
||||
|
||||
@@ -113,7 +113,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -5040,12 +5040,10 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
capacity = serializers.SerializerMethodField()
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.IntegerField(
|
||||
help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance group'), read_only=True
|
||||
)
|
||||
jobs_running = serializers.SerializerMethodField()
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_container_group = serializers.BooleanField(
|
||||
@@ -5071,6 +5069,22 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
label=_('Policy Instance Minimum'),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
|
||||
)
|
||||
max_concurrent_jobs = serializers.IntegerField(
|
||||
default=0,
|
||||
min_value=0,
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Max Concurrent Jobs'),
|
||||
help_text=_("Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced."),
|
||||
)
|
||||
max_forks = serializers.IntegerField(
|
||||
default=0,
|
||||
min_value=0,
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Max Forks'),
|
||||
help_text=_("Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced."),
|
||||
)
|
||||
policy_instance_list = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
required=False,
|
||||
@@ -5092,6 +5106,8 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
"consumed_capacity",
|
||||
"percent_capacity_remaining",
|
||||
"jobs_running",
|
||||
"max_concurrent_jobs",
|
||||
"max_forks",
|
||||
"jobs_total",
|
||||
"instances",
|
||||
"is_container_group",
|
||||
@@ -5173,28 +5189,39 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'task_manager_igs' not in self.context:
|
||||
instance_groups_queryset = None
|
||||
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
|
||||
if self.parent: # Is ListView:
|
||||
instance_groups_queryset = self.parent.instance
|
||||
|
||||
instances = TaskManagerInstances(jobs_qs)
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'],
|
||||
instance_groups_queryset=instance_groups_queryset,
|
||||
)
|
||||
|
||||
self.context['task_manager_igs'] = instance_groups
|
||||
self.context['task_manager_igs'] = tm_models.instance_groups
|
||||
return self.context['task_manager_igs']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_consumed_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity:
|
||||
return 0.0
|
||||
def get_capacity(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
|
||||
return ig_mgr.get_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
capacity = self.get_capacity(obj)
|
||||
if not capacity:
|
||||
return 0.0
|
||||
consumed_capacity = self.get_consumed_capacity(obj)
|
||||
return float("{0:.2f}".format(((float(capacity) - float(consumed_capacity)) / (float(capacity))) * 100))
|
||||
|
||||
def get_instances(self, obj):
|
||||
return obj.instances.count()
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return len(ig_mgr.get_instances(obj.name))
|
||||
|
||||
def get_jobs_running(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_jobs_running(obj.name)
|
||||
|
||||
|
||||
class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
@@ -344,6 +344,13 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
@@ -31,9 +33,14 @@ class MetricsView(APIView):
|
||||
|
||||
renderer_classes = [renderers.PlainTextRenderer, renderers.PrometheusJSONRenderer, renderers.BrowsableAPIRenderer]
|
||||
|
||||
def initialize_request(self, request, *args, **kwargs):
|
||||
if settings.ALLOW_METRICS_FOR_ANONYMOUS_USERS:
|
||||
self.permission_classes = (AllowAny,)
|
||||
return super(APIView, self).initialize_request(request, *args, **kwargs)
|
||||
|
||||
def get(self, request):
|
||||
'''Show Metrics Details'''
|
||||
if request.user.is_superuser or request.user.is_system_auditor:
|
||||
if settings.ALLOW_METRICS_FOR_ANONYMOUS_USERS or request.user.is_superuser or request.user.is_system_auditor:
|
||||
metrics_to_show = ''
|
||||
if not request.query_params.get('subsystemonly', "0") == "1":
|
||||
metrics_to_show += metrics().decode('UTF-8')
|
||||
|
||||
@@ -16,7 +16,7 @@ from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.ha import Instance, InstanceGroup, schedule_policy_task
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.inventory import Inventory
|
||||
@@ -107,6 +107,11 @@ class InstanceGroupMembershipMixin(object):
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
|
||||
# sometimes removing an instance has a non-obvious consequence
|
||||
# this is almost always true if policy_instance_percentage or _minimum is non-zero
|
||||
# after removing a single instance, the other memberships need to be re-balanced
|
||||
schedule_policy_task()
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@@ -6237,4 +6237,5 @@ msgstr "%s se está actualizando."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
|
||||
@@ -721,7 +721,7 @@ msgstr "DTSTART valide obligatoire dans rrule. La valeur doit commencer par : DT
|
||||
#: awx/api/serializers.py:4657
|
||||
msgid ""
|
||||
"DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ."
|
||||
msgstr "DTSTART ne peut correspondre à une DateHeure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
|
||||
msgstr "DTSTART ne peut correspondre à une date-heure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
|
||||
|
||||
#: awx/api/serializers.py:4659
|
||||
msgid "Multiple DTSTART is not supported."
|
||||
@@ -6239,4 +6239,5 @@ msgstr "%s est en cours de mise à niveau."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Cette page sera rafraîchie une fois terminée."
|
||||
msgstr "Cette page sera rafraîchie une fois terminée."
|
||||
|
||||
|
||||
@@ -6237,4 +6237,5 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2697,46 +2697,66 @@ class ActivityStreamAccess(BaseAccess):
|
||||
# 'job_template', 'job', 'project', 'project_update', 'workflow_job',
|
||||
# 'inventory_source', 'workflow_job_template'
|
||||
|
||||
inventory_set = Inventory.accessible_objects(self.user, 'read_role')
|
||||
credential_set = Credential.accessible_objects(self.user, 'read_role')
|
||||
q = Q(user=self.user)
|
||||
inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role')
|
||||
if inventory_set:
|
||||
q |= (
|
||||
Q(ad_hoc_command__inventory__in=inventory_set)
|
||||
| Q(inventory__in=inventory_set)
|
||||
| Q(host__inventory__in=inventory_set)
|
||||
| Q(group__inventory__in=inventory_set)
|
||||
| Q(inventory_source__inventory__in=inventory_set)
|
||||
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
|
||||
)
|
||||
|
||||
credential_set = Credential.accessible_pk_qs(self.user, 'read_role')
|
||||
if credential_set:
|
||||
q |= Q(credential__in=credential_set)
|
||||
|
||||
auditing_orgs = (
|
||||
(Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role'))
|
||||
.distinct()
|
||||
.values_list('id', flat=True)
|
||||
)
|
||||
project_set = Project.accessible_objects(self.user, 'read_role')
|
||||
jt_set = JobTemplate.accessible_objects(self.user, 'read_role')
|
||||
team_set = Team.accessible_objects(self.user, 'read_role')
|
||||
wfjt_set = WorkflowJobTemplate.accessible_objects(self.user, 'read_role')
|
||||
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
|
||||
token_set = OAuth2TokenAccess(self.user).filtered_queryset()
|
||||
if auditing_orgs:
|
||||
q |= (
|
||||
Q(user__in=auditing_orgs.values('member_role__members'))
|
||||
| Q(organization__in=auditing_orgs)
|
||||
| Q(notification_template__organization__in=auditing_orgs)
|
||||
| Q(notification__notification_template__organization__in=auditing_orgs)
|
||||
| Q(label__organization__in=auditing_orgs)
|
||||
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
|
||||
)
|
||||
|
||||
return qs.filter(
|
||||
Q(ad_hoc_command__inventory__in=inventory_set)
|
||||
| Q(o_auth2_application__in=app_set)
|
||||
| Q(o_auth2_access_token__in=token_set)
|
||||
| Q(user__in=auditing_orgs.values('member_role__members'))
|
||||
| Q(user=self.user)
|
||||
| Q(organization__in=auditing_orgs)
|
||||
| Q(inventory__in=inventory_set)
|
||||
| Q(host__inventory__in=inventory_set)
|
||||
| Q(group__inventory__in=inventory_set)
|
||||
| Q(inventory_source__inventory__in=inventory_set)
|
||||
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
|
||||
| Q(credential__in=credential_set)
|
||||
| Q(team__in=team_set)
|
||||
| Q(project__in=project_set)
|
||||
| Q(project_update__project__in=project_set)
|
||||
| Q(job_template__in=jt_set)
|
||||
| Q(job__job_template__in=jt_set)
|
||||
| Q(workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job__workflow_job_template__in=wfjt_set)
|
||||
| Q(notification_template__organization__in=auditing_orgs)
|
||||
| Q(notification__notification_template__organization__in=auditing_orgs)
|
||||
| Q(label__organization__in=auditing_orgs)
|
||||
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
|
||||
).distinct()
|
||||
project_set = Project.accessible_pk_qs(self.user, 'read_role')
|
||||
if project_set:
|
||||
q |= Q(project__in=project_set) | Q(project_update__project__in=project_set)
|
||||
|
||||
jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role')
|
||||
if jt_set:
|
||||
q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set)
|
||||
|
||||
wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role')
|
||||
if wfjt_set:
|
||||
q |= (
|
||||
Q(workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job__workflow_job_template__in=wfjt_set)
|
||||
)
|
||||
|
||||
team_set = Team.accessible_pk_qs(self.user, 'read_role')
|
||||
if team_set:
|
||||
q |= Q(team__in=team_set)
|
||||
|
||||
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
|
||||
if app_set:
|
||||
q |= Q(o_auth2_application__in=app_set)
|
||||
|
||||
token_set = OAuth2TokenAccess(self.user).filtered_queryset()
|
||||
if token_set:
|
||||
q |= Q(o_auth2_access_token__in=token_set)
|
||||
|
||||
return qs.filter(q).distinct()
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import datetime
|
||||
import asyncio
|
||||
import logging
|
||||
import aioredis
|
||||
import redis
|
||||
import redis.asyncio
|
||||
import re
|
||||
|
||||
from prometheus_client import (
|
||||
@@ -82,7 +82,7 @@ class BroadcastWebsocketStatsManager:
|
||||
|
||||
async def run_loop(self):
|
||||
try:
|
||||
redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
|
||||
redis_conn = await redis.asyncio.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
|
||||
await redis_conn.set(self._redis_key, stats_data_str)
|
||||
@@ -122,8 +122,8 @@ class BroadcastWebsocketStats:
|
||||
'Number of messages received, to be forwarded, by the broadcast websocket system',
|
||||
registry=self._registry,
|
||||
)
|
||||
self._messages_received = Gauge(
|
||||
f'awx_{self.remote_name}_messages_received',
|
||||
self._messages_received_current_conn = Gauge(
|
||||
f'awx_{self.remote_name}_messages_received_currrent_conn',
|
||||
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
|
||||
registry=self._registry,
|
||||
)
|
||||
@@ -144,13 +144,13 @@ class BroadcastWebsocketStats:
|
||||
|
||||
def record_message_received(self):
|
||||
self._internal_messages_received_per_minute.record()
|
||||
self._messages_received.inc()
|
||||
self._messages_received_current_conn.inc()
|
||||
self._messages_received_total.inc()
|
||||
|
||||
def record_connection_established(self):
|
||||
self._connection.state('connected')
|
||||
self._connection_start.set_to_current_time()
|
||||
self._messages_received.set(0)
|
||||
self._messages_received_current_conn.set(0)
|
||||
|
||||
def record_connection_lost(self):
|
||||
self._connection.state('disconnected')
|
||||
|
||||
@@ -16,7 +16,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
|
||||
"""
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
@@ -237,11 +237,8 @@ def projects_by_scm_type(since, **kwargs):
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
|
||||
tm_instances = TaskManagerInstances(
|
||||
active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
|
||||
)
|
||||
for tm_instance in tm_instances.instances_by_hostname.values():
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
for tm_instance in tm_models.instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
'uuid': instance.uuid,
|
||||
|
||||
@@ -569,7 +569,7 @@ register(
|
||||
register(
|
||||
'LOG_AGGREGATOR_LOGGERS',
|
||||
field_class=fields.StringListField,
|
||||
default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
|
||||
default=['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket'],
|
||||
label=_('Loggers Sending Data to Log Aggregator Form'),
|
||||
help_text=_(
|
||||
'List of loggers that will send HTTP logs to the collector, these can '
|
||||
@@ -577,7 +577,8 @@ register(
|
||||
'awx - service logs\n'
|
||||
'activity_stream - activity stream records\n'
|
||||
'job_events - callback data from Ansible job events\n'
|
||||
'system_tracking - facts gathered from scan jobs.'
|
||||
'system_tracking - facts gathered from scan jobs\n'
|
||||
'broadcast_websocket - errors pertaining to websockets broadcast metrics\n'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
|
||||
@@ -9,10 +9,16 @@ aim_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'url',
|
||||
'label': _('CyberArk AIM URL'),
|
||||
'label': _('CyberArk CCP URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
},
|
||||
{
|
||||
'id': 'webservice_id',
|
||||
'label': _('Web Service ID'),
|
||||
'type': 'string',
|
||||
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
|
||||
},
|
||||
{
|
||||
'id': 'app_id',
|
||||
'label': _('Application ID'),
|
||||
@@ -64,10 +70,13 @@ def aim_backend(**kwargs):
|
||||
client_cert = kwargs.get('client_cert', None)
|
||||
client_key = kwargs.get('client_key', None)
|
||||
verify = kwargs['verify']
|
||||
webservice_id = kwargs['webservice_id']
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
|
||||
query_params = {
|
||||
'AppId': app_id,
|
||||
@@ -78,7 +87,7 @@ def aim_backend(**kwargs):
|
||||
query_params['reason'] = reason
|
||||
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
|
||||
|
||||
with CertFiles(client_cert, client_key) as cert:
|
||||
res = requests.get(
|
||||
@@ -92,4 +101,4 @@ def aim_backend(**kwargs):
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import copy
|
||||
import os
|
||||
import pathlib
|
||||
import time
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from .plugin import CredentialPlugin, CertFiles, raise_for_status
|
||||
@@ -247,7 +248,15 @@ def kv_backend(**kwargs):
|
||||
request_url = urljoin(url, '/'.join(['v1'] + path_segments)).rstrip('/')
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
request_retries = 0
|
||||
while request_retries < 5:
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
# https://developer.hashicorp.com/vault/docs/enterprise/consistency
|
||||
if response.status_code == 412:
|
||||
request_retries += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
raise_for_status(response)
|
||||
|
||||
json = response.json()
|
||||
@@ -289,8 +298,15 @@ def ssh_backend(**kwargs):
|
||||
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
|
||||
request_retries = 0
|
||||
while request_retries < 5:
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
# https://developer.hashicorp.com/vault/docs/enterprise/consistency
|
||||
if resp.status_code == 412:
|
||||
request_retries += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
raise_for_status(resp)
|
||||
return resp.json()['data']['signed_key']
|
||||
|
||||
|
||||
@@ -3,14 +3,12 @@ import logging
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, transaction, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.db import transaction, connection as django_connection
|
||||
from django_guid import set_guid
|
||||
|
||||
import psutil
|
||||
@@ -64,6 +62,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
"""
|
||||
|
||||
MAX_RETRIES = 2
|
||||
INDIVIDUAL_EVENT_RETRIES = 3
|
||||
last_stats = time.time()
|
||||
last_flush = time.time()
|
||||
total = 0
|
||||
@@ -164,38 +163,48 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
else: # only calculate the seconds if the created time already has been set
|
||||
metrics_total_job_event_processing_seconds += e.modified - e.created
|
||||
metrics_duration_to_save = time.perf_counter()
|
||||
saved_events = []
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
metrics_bulk_events_saved += len(events)
|
||||
saved_events = events
|
||||
self.buff[cls] = []
|
||||
except Exception as exc:
|
||||
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
|
||||
# If the database is flaking, let ensure_connection throw a general exception
|
||||
# will be caught by the outer loop, which goes into a proper sleep and retry loop
|
||||
django_connection.ensure_connection()
|
||||
logger.warning(f'Error in events bulk_create, will try indiviually, error: {str(exc)}')
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale
|
||||
consecutive_errors = 0
|
||||
events_saved = 0
|
||||
metrics_events_batch_save_errors += 1
|
||||
for e in events:
|
||||
for e in events.copy():
|
||||
try:
|
||||
e.save()
|
||||
events_saved += 1
|
||||
consecutive_errors = 0
|
||||
metrics_singular_events_saved += 1
|
||||
events.remove(e)
|
||||
saved_events.append(e) # Importantly, remove successfully saved events from the buffer
|
||||
except Exception as exc_indv:
|
||||
consecutive_errors += 1
|
||||
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
|
||||
if consecutive_errors >= 5:
|
||||
raise
|
||||
metrics_singular_events_saved += events_saved
|
||||
if events_saved == 0:
|
||||
raise
|
||||
retry_count = getattr(e, '_retry_count', 0) + 1
|
||||
e._retry_count = retry_count
|
||||
|
||||
# special sanitization logic for postgres treatment of NUL 0x00 char
|
||||
if (retry_count == 1) and isinstance(exc_indv, ValueError) and ("\x00" in e.stdout):
|
||||
e.stdout = e.stdout.replace("\x00", "")
|
||||
|
||||
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
|
||||
logger.error(f'Hit max retries ({retry_count}) saving individual Event error: {str(exc_indv)}\ndata:\n{e.__dict__}')
|
||||
events.remove(e)
|
||||
else:
|
||||
logger.info(f'Database Error Saving individual Event uuid={e.uuid} try={retry_count}, error: {str(exc_indv)}')
|
||||
|
||||
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
||||
for e in events:
|
||||
for e in saved_events:
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
metrics_events_broadcast += 1
|
||||
emit_event_detail(e)
|
||||
if getattr(e, '_notification_trigger_event', False):
|
||||
job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
if (metrics_bulk_events_saved + metrics_singular_events_saved) > 0:
|
||||
@@ -267,20 +276,16 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
try:
|
||||
self.flush(force=flush)
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError) as exc:
|
||||
except Exception as exc:
|
||||
# Aside form bugs, exceptions here are assumed to be due to database flake
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
self.buff = {}
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}')
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Flushing Job Events')
|
||||
django_connection.close()
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
except Exception:
|
||||
logger.exception(f'Callback Task Processor Raised Unexpected Exception processing event data:\n{body}')
|
||||
|
||||
@@ -38,7 +38,14 @@ class Command(BaseCommand):
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
100,
|
||||
0,
|
||||
[],
|
||||
is_container_group=True,
|
||||
pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE,
|
||||
max_forks=settings.DEFAULT_EXECUTION_QUEUE_MAX_FORKS,
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
|
||||
|
||||
@@ -32,8 +32,14 @@ class Command(BaseCommand):
|
||||
def handle(self, **options):
|
||||
self.old_key = settings.SECRET_KEY
|
||||
custom_key = os.environ.get("TOWER_SECRET_KEY")
|
||||
if options.get("use_custom_key") and custom_key:
|
||||
self.new_key = custom_key
|
||||
if options.get("use_custom_key"):
|
||||
if custom_key:
|
||||
self.new_key = custom_key
|
||||
else:
|
||||
print("Use custom key was specified but the env var TOWER_SECRET_KEY was not available")
|
||||
import sys
|
||||
|
||||
sys.exit(1)
|
||||
else:
|
||||
self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip()
|
||||
self._notification_templates()
|
||||
|
||||
@@ -17,7 +17,9 @@ class InstanceNotFound(Exception):
|
||||
|
||||
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
|
||||
def __init__(
|
||||
self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None, max_forks=None, max_concurrent_jobs=None
|
||||
):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.instance_percent = instance_percent
|
||||
@@ -25,6 +27,8 @@ class RegisterQueue:
|
||||
self.hostname_list = hostname_list
|
||||
self.is_container_group = is_container_group
|
||||
self.pod_spec_override = pod_spec_override
|
||||
self.max_forks = max_forks
|
||||
self.max_concurrent_jobs = max_concurrent_jobs
|
||||
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
@@ -45,6 +49,14 @@ class RegisterQueue:
|
||||
ig.pod_spec_override = self.pod_spec_override
|
||||
changed = True
|
||||
|
||||
if self.max_forks and (ig.max_forks != self.max_forks):
|
||||
ig.max_forks = self.max_forks
|
||||
changed = True
|
||||
|
||||
if self.max_concurrent_jobs and (ig.max_concurrent_jobs != self.max_concurrent_jobs):
|
||||
ig.max_concurrent_jobs = self.max_concurrent_jobs
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
ig.save()
|
||||
|
||||
|
||||
@@ -158,7 +158,11 @@ class InstanceManager(models.Manager):
|
||||
return (False, instance)
|
||||
|
||||
# Create new instance, and fill in default values
|
||||
create_defaults = {'node_state': Instance.States.INSTALLED, 'capacity': 0}
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'listener_port': 27199,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {}
|
||||
|
||||
@@ -1,24 +1,14 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-21 21:29
|
||||
|
||||
from django.db import migrations
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("awx")
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
sources = InventorySource.objects.filter(update_on_project_update=True)
|
||||
for src in sources:
|
||||
if src.update_on_launch == False:
|
||||
src.update_on_launch = True
|
||||
src.save(update_fields=['update_on_launch'])
|
||||
logger.info(f"Setting update_on_launch to True for {src}")
|
||||
proj = src.source_project
|
||||
if proj and proj.scm_update_on_launch is False:
|
||||
proj.scm_update_on_launch = True
|
||||
proj.save(update_fields=['scm_update_on_launch'])
|
||||
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
|
||||
InventorySource.objects.filter(update_on_project_update=True).update(update_on_launch=True)
|
||||
|
||||
Project = apps.get_model('main', 'Project')
|
||||
Project.objects.filter(scm_inventory_sources__update_on_project_update=True).update(scm_update_on_launch=True)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
23
awx/main/migrations/0173_instancegroup_max_limits.py
Normal file
23
awx/main/migrations/0173_instancegroup_max_limits.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 3.2.13 on 2022-10-24 18:22
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0172_prevent_instance_fallback'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='max_concurrent_jobs',
|
||||
field=models.IntegerField(default=0, help_text='Maximum number of concurrent jobs to run on this group. Zero means no limit.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='max_forks',
|
||||
field=models.IntegerField(default=0, help_text='Max forks to execute on this group. Zero means no limit.'),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0174_ensure_org_ee_admin_roles.py
Normal file
18
awx/main/migrations/0174_ensure_org_ee_admin_roles.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 21:11
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.migrations import _rbac as rbac
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0173_instancegroup_max_limits'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(rbac.create_roles),
|
||||
]
|
||||
@@ -15,6 +15,7 @@ def aws(cred, env, private_data_dir):
|
||||
|
||||
if cred.has_input('security_token'):
|
||||
env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='')
|
||||
env['AWS_SESSION_TOKEN'] = env['AWS_SECURITY_TOKEN']
|
||||
|
||||
|
||||
def gce(cred, env, private_data_dir):
|
||||
|
||||
@@ -379,6 +379,8 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
default='',
|
||||
)
|
||||
)
|
||||
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
|
||||
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
|
||||
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
|
||||
policy_instance_minimum = models.IntegerField(default=0, help_text=_("Static minimum number of Instances to automatically assign to this group"))
|
||||
policy_instance_list = JSONBlob(
|
||||
@@ -392,6 +394,8 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
if self.is_container_group:
|
||||
return self.max_forks
|
||||
return sum(inst.capacity for inst in self.instances.all())
|
||||
|
||||
@property
|
||||
|
||||
@@ -567,17 +567,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
# Use .job_host_summaries.all() to get jobs affecting this host.
|
||||
# Use .job_events.all() to get events affecting this host.
|
||||
|
||||
'''
|
||||
We don't use timestamp, but we may in the future.
|
||||
'''
|
||||
|
||||
def update_ansible_facts(self, module, facts, timestamp=None):
|
||||
if module == "ansible":
|
||||
self.ansible_facts.update(facts)
|
||||
else:
|
||||
self.ansible_facts[module] = facts
|
||||
self.save()
|
||||
|
||||
def get_effective_host_name(self):
|
||||
"""
|
||||
Return the name of the host that will be used in actual ansible
|
||||
|
||||
@@ -44,7 +44,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -857,8 +857,11 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
|
||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
|
||||
if timeout is None:
|
||||
@@ -869,6 +872,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
else:
|
||||
hosts = self._get_inventory_hosts()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
@@ -878,23 +883,38 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the file so we can check if it changed later
|
||||
modification_times[filepath] = os.path.getmtime(filepath)
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
def finish_job_fact_cache(self, destination, modification_times):
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote it pre-playbook run...
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if modified > modification_times.get(filepath, 0):
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
@@ -902,7 +922,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
@@ -913,12 +933,21 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
host.save()
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
|
||||
@@ -1351,12 +1351,12 @@ class UnifiedJob(
|
||||
if required in defined_fields and not credential.has_input(required):
|
||||
missing_credential_inputs.append(required)
|
||||
|
||||
if missing_credential_inputs:
|
||||
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
|
||||
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
|
||||
)
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
if missing_credential_inputs:
|
||||
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
|
||||
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
|
||||
)
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
|
||||
needed = self.get_passwords_needed_to_start()
|
||||
try:
|
||||
|
||||
@@ -27,8 +27,8 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@@ -43,8 +43,7 @@ from awx.main.utils.common import task_manager_bulk_reschedule, is_testing
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
@@ -71,7 +70,12 @@ class TaskBase:
|
||||
# is called later.
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.start_time = time.time()
|
||||
|
||||
# We want to avoid calling settings in loops, so cache these settings at init time
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT
|
||||
self.control_task_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
|
||||
for m in self.subsystem_metrics.METRICS:
|
||||
if m.startswith(self.prefix):
|
||||
self.subsystem_metrics.set(m, 0)
|
||||
@@ -79,7 +83,7 @@ class TaskBase:
|
||||
def timed_out(self):
|
||||
"""Return True/False if we have met or exceeded the timeout for the task manager."""
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed >= settings.TASK_MANAGER_TIMEOUT:
|
||||
if elapsed >= self.task_manager_timeout:
|
||||
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
|
||||
return True
|
||||
return False
|
||||
@@ -471,9 +475,8 @@ class TaskManager(TaskBase):
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
self.dependency_graph = DependencyGraph()
|
||||
self.instances = TaskManagerInstances(self.all_tasks)
|
||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||
self.tm_models = TaskManagerModels()
|
||||
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
@@ -504,8 +507,16 @@ class TaskManager(TaskBase):
|
||||
return None
|
||||
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
def start_task(self, task, instance_group, instance=None):
|
||||
# Just like for process_running_tasks, add the job to the dependency graph and
|
||||
# ask the TaskManagerInstanceGroups object to update consumed capacity on all
|
||||
# implicated instances and container groups.
|
||||
self.dependency_graph.add_job(task)
|
||||
if instance_group is not None:
|
||||
task.instance_group = instance_group
|
||||
# We need the instance group assigned to correctly account for container group max_concurrent_jobs and max_forks
|
||||
self.tm_models.consume_capacity(task)
|
||||
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
@@ -513,20 +524,6 @@ class TaskManager(TaskBase):
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
# update capacity for control node and execution node
|
||||
if task.controller_node:
|
||||
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
if task.execution_node:
|
||||
self.instances[task.execution_node].consume_capacity(task.task_impact)
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
@@ -546,7 +543,6 @@ class TaskManager(TaskBase):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = instance_group
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||
@@ -559,6 +555,7 @@ class TaskManager(TaskBase):
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
@@ -566,7 +563,7 @@ class TaskManager(TaskBase):
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
@@ -580,6 +577,7 @@ class TaskManager(TaskBase):
|
||||
if type(task) is WorkflowJob:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
self.dependency_graph.add_job(task)
|
||||
self.tm_models.consume_capacity(task)
|
||||
|
||||
@timeit
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
@@ -604,18 +602,18 @@ class TaskManager(TaskBase):
|
||||
if isinstance(task, WorkflowJob):
|
||||
# Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
|
||||
# Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
self.start_task(task, None, None)
|
||||
continue
|
||||
|
||||
found_acceptable_queue = False
|
||||
|
||||
# Determine if there is control capacity for the task
|
||||
if task.capacity_type == 'control':
|
||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_impact = task.task_impact + self.control_task_impact
|
||||
else:
|
||||
control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, impact=control_impact, capacity_type='control'
|
||||
control_impact = self.control_task_impact
|
||||
control_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=self.controlplane_ig.name, impact=control_impact, capacity_type='control'
|
||||
)
|
||||
if not control_instance:
|
||||
self.task_needs_capacity(task, tasks_to_update_job_explanation)
|
||||
@@ -626,25 +624,29 @@ class TaskManager(TaskBase):
|
||||
|
||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||
if task.capacity_type == 'control':
|
||||
if not self.tm_models.instance_groups[self.controlplane_ig.name].has_remaining_capacity(control_impact=True):
|
||||
continue
|
||||
task.execution_node = control_instance.hostname
|
||||
execution_instance = self.instances[control_instance.hostname].obj
|
||||
execution_instance = self.tm_models.instances[control_instance.hostname].obj
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
|
||||
self.start_task(task, self.controlplane_ig, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
continue
|
||||
|
||||
for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task):
|
||||
for instance_group in self.tm_models.instance_groups.get_instance_groups_from_task_cache(task):
|
||||
if not self.tm_models.instance_groups[instance_group.name].has_remaining_capacity(task):
|
||||
continue
|
||||
if instance_group.is_container_group:
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
||||
self.start_task(task, instance_group, None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
# at this point we know the instance group is NOT a container group
|
||||
# because if it was, it would have started the task and broke out of the loop.
|
||||
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
execution_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=instance_group.name, add_hybrid_control_cost=True
|
||||
) or self.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
|
||||
) or self.tm_models.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
|
||||
|
||||
if execution_instance:
|
||||
task.execution_node = execution_instance.hostname
|
||||
@@ -660,8 +662,8 @@ class TaskManager(TaskBase):
|
||||
task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity
|
||||
)
|
||||
)
|
||||
execution_instance = self.instances[execution_instance.hostname].obj
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
execution_instance = self.tm_models.instances[execution_instance.hostname].obj
|
||||
self.start_task(task, instance_group, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -15,15 +15,18 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
class TaskManagerInstance:
|
||||
"""A class representing minimal data the task manager needs to represent an Instance."""
|
||||
|
||||
def __init__(self, obj):
|
||||
def __init__(self, obj, **kwargs):
|
||||
self.obj = obj
|
||||
self.node_type = obj.node_type
|
||||
self.consumed_capacity = 0
|
||||
self.capacity = obj.capacity
|
||||
self.hostname = obj.hostname
|
||||
self.jobs_running = 0
|
||||
|
||||
def consume_capacity(self, impact):
|
||||
def consume_capacity(self, impact, job_impact=False):
|
||||
self.consumed_capacity += impact
|
||||
if job_impact:
|
||||
self.jobs_running += 1
|
||||
|
||||
@property
|
||||
def remaining_capacity(self):
|
||||
@@ -33,9 +36,106 @@ class TaskManagerInstance:
|
||||
return remaining
|
||||
|
||||
|
||||
class TaskManagerInstanceGroup:
|
||||
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
|
||||
|
||||
def __init__(self, obj, task_manager_instances=None, **kwargs):
|
||||
self.name = obj.name
|
||||
self.is_container_group = obj.is_container_group
|
||||
self.container_group_jobs = 0
|
||||
self.container_group_consumed_forks = 0
|
||||
_instances = obj.instances.all()
|
||||
# We want the list of TaskManagerInstance objects because these are shared across the TaskManagerInstanceGroup objects.
|
||||
# This way when we consume capacity on an instance that is in multiple groups, we tabulate across all the groups correctly.
|
||||
self.instances = [task_manager_instances[instance.hostname] for instance in _instances if instance.hostname in task_manager_instances]
|
||||
self.instance_hostnames = tuple([instance.hostname for instance in _instances if instance.hostname in task_manager_instances])
|
||||
self.max_concurrent_jobs = obj.max_concurrent_jobs
|
||||
self.max_forks = obj.max_forks
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
|
||||
def consume_capacity(self, task):
|
||||
"""We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level."""
|
||||
if self.is_container_group:
|
||||
self.container_group_jobs += 1
|
||||
self.container_group_consumed_forks += task.task_impact
|
||||
else:
|
||||
raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.")
|
||||
|
||||
def get_remaining_instance_capacity(self):
|
||||
return sum(inst.remaining_capacity for inst in self.instances)
|
||||
|
||||
def get_instance_capacity(self):
|
||||
return sum(inst.capacity for inst in self.instances)
|
||||
|
||||
def get_consumed_instance_capacity(self):
|
||||
return sum(inst.consumed_capacity for inst in self.instances)
|
||||
|
||||
def get_instance_jobs_running(self):
|
||||
return sum(inst.jobs_running for inst in self.instances)
|
||||
|
||||
def get_jobs_running(self):
|
||||
if self.is_container_group:
|
||||
return self.container_group_jobs
|
||||
return sum(inst.jobs_running for inst in self.instances)
|
||||
|
||||
def get_capacity(self):
|
||||
"""This reports any type of capacity, including that of container group jobs.
|
||||
|
||||
Container groups don't really have capacity, but if they have max_forks set,
|
||||
we can interperet that as how much capacity the user has defined them to have.
|
||||
"""
|
||||
if self.is_container_group:
|
||||
return self.max_forks
|
||||
return self.get_instance_capacity()
|
||||
|
||||
def get_consumed_capacity(self):
|
||||
if self.is_container_group:
|
||||
return self.container_group_consumed_forks
|
||||
return self.get_consumed_instance_capacity()
|
||||
|
||||
def get_remaining_capacity(self):
|
||||
return self.get_capacity() - self.get_consumed_capacity()
|
||||
|
||||
def has_remaining_capacity(self, task=None, control_impact=False):
|
||||
"""Pass either a task or control_impact=True to determine if the IG has capacity to run the control task or job task."""
|
||||
task_impact = self.control_task_impact if control_impact else task.task_impact
|
||||
job_impact = 0 if control_impact else 1
|
||||
task_string = f"task {task.log_format} with impact of {task_impact}" if task else f"control task with impact of {task_impact}"
|
||||
|
||||
# We only want to loop over instances if self.max_concurrent_jobs is set
|
||||
if self.max_concurrent_jobs == 0:
|
||||
# Override the calculated remaining capacity, because when max_concurrent_jobs == 0 we don't enforce any max
|
||||
remaining_jobs = 0
|
||||
else:
|
||||
remaining_jobs = self.max_concurrent_jobs - self.get_jobs_running() - job_impact
|
||||
|
||||
# We only want to loop over instances if self.max_forks is set
|
||||
if self.max_forks == 0:
|
||||
# Override the calculated remaining capacity, because when max_forks == 0 we don't enforce any max
|
||||
remaining_forks = 0
|
||||
else:
|
||||
remaining_forks = self.max_forks - self.get_consumed_capacity() - task_impact
|
||||
|
||||
if remaining_jobs < 0 or remaining_forks < 0:
|
||||
# A value less than zero means the task will not fit on the group
|
||||
if remaining_jobs < 0:
|
||||
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_jobs} remaining jobs")
|
||||
if remaining_forks < 0:
|
||||
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_forks} remaining forks")
|
||||
return False
|
||||
|
||||
# Returning true means there is enough remaining capacity on the group to run the task (or no instance group level limits are being set)
|
||||
logger.debug(f"{task_string} can fit on instance group {self.name} with {remaining_forks} remaining forks and {remaining_jobs}")
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerInstances:
|
||||
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
|
||||
def __init__(self, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled'), **kwargs):
|
||||
self.instances_by_hostname = dict()
|
||||
self.instance_groups_container_group_jobs = dict()
|
||||
self.instance_groups_container_group_consumed_forks = dict()
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
|
||||
if instances is None:
|
||||
instances = (
|
||||
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
|
||||
@@ -43,18 +143,15 @@ class TaskManagerInstances:
|
||||
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
|
||||
)
|
||||
for instance in instances:
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance, **kwargs)
|
||||
|
||||
# initialize remaining capacity based on currently waiting and running tasks
|
||||
for task in active_tasks:
|
||||
if task.status not in ['waiting', 'running']:
|
||||
continue
|
||||
control_instance = self.instances_by_hostname.get(task.controller_node, '')
|
||||
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
|
||||
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
|
||||
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact)
|
||||
if control_instance and control_instance.node_type in ('hybrid', 'control'):
|
||||
self.instances_by_hostname[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
def consume_capacity(self, task):
|
||||
control_instance = self.instances_by_hostname.get(task.controller_node, '')
|
||||
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
|
||||
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
|
||||
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact, job_impact=True)
|
||||
if control_instance and control_instance.node_type in ('hybrid', 'control'):
|
||||
self.instances_by_hostname[task.controller_node].consume_capacity(self.control_task_impact)
|
||||
|
||||
def __getitem__(self, hostname):
|
||||
return self.instances_by_hostname.get(hostname)
|
||||
@@ -64,42 +161,57 @@ class TaskManagerInstances:
|
||||
|
||||
|
||||
class TaskManagerInstanceGroups:
|
||||
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
|
||||
"""A class representing minimal data the task manager needs to represent all the InstanceGroups."""
|
||||
|
||||
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
||||
def __init__(self, task_manager_instances=None, instance_groups=None, instance_groups_queryset=None, **kwargs):
|
||||
self.instance_groups = dict()
|
||||
self.task_manager_instances = task_manager_instances if task_manager_instances is not None else TaskManagerInstances()
|
||||
self.controlplane_ig = None
|
||||
self.pk_ig_map = dict()
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
self.controlplane_ig_name = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
|
||||
if instance_groups is not None: # for testing
|
||||
self.instance_groups = instance_groups
|
||||
self.instance_groups = {ig.name: TaskManagerInstanceGroup(ig, self.task_manager_instances, **kwargs) for ig in instance_groups}
|
||||
self.pk_ig_map = {ig.pk: ig for ig in instance_groups}
|
||||
else:
|
||||
if instance_groups_queryset is None:
|
||||
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only('name', 'instances')
|
||||
for instance_group in instance_groups_queryset:
|
||||
if instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
self.controlplane_ig = instance_group
|
||||
self.instance_groups[instance_group.name] = dict(
|
||||
instances=[
|
||||
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
||||
],
|
||||
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only(
|
||||
'name', 'instances', 'max_concurrent_jobs', 'max_forks', 'is_container_group'
|
||||
)
|
||||
for instance_group in instance_groups_queryset:
|
||||
if instance_group.name == self.controlplane_ig_name:
|
||||
self.controlplane_ig = instance_group
|
||||
self.instance_groups[instance_group.name] = TaskManagerInstanceGroup(instance_group, self.task_manager_instances, **kwargs)
|
||||
self.pk_ig_map[instance_group.pk] = instance_group
|
||||
|
||||
def __getitem__(self, ig_name):
|
||||
return self.instance_groups.get(ig_name)
|
||||
|
||||
def __contains__(self, ig_name):
|
||||
return ig_name in self.instance_groups
|
||||
|
||||
def get_remaining_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.remaining_capacity for inst in instances)
|
||||
return self.instance_groups[group_name].get_remaining_instance_capacity()
|
||||
|
||||
def get_consumed_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.consumed_capacity for inst in instances)
|
||||
return self.instance_groups[group_name].get_consumed_capacity()
|
||||
|
||||
def get_jobs_running(self, group_name):
|
||||
return self.instance_groups[group_name].get_jobs_running()
|
||||
|
||||
def get_capacity(self, group_name):
|
||||
return self.instance_groups[group_name].get_capacity()
|
||||
|
||||
def get_instances(self, group_name):
|
||||
return self.instance_groups[group_name].instances
|
||||
|
||||
def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False):
|
||||
impact = impact if impact else task.task_impact
|
||||
capacity_type = capacity_type if capacity_type else task.capacity_type
|
||||
instance_most_capacity = None
|
||||
most_remaining_capacity = -1
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
instances = self.instance_groups[instance_group_name].instances
|
||||
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
@@ -107,7 +219,7 @@ class TaskManagerInstanceGroups:
|
||||
would_be_remaining = i.remaining_capacity - impact
|
||||
# hybrid nodes _always_ control their own tasks
|
||||
if add_hybrid_control_cost and i.node_type == 'hybrid':
|
||||
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
would_be_remaining -= self.control_task_impact
|
||||
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
most_remaining_capacity = would_be_remaining
|
||||
@@ -115,10 +227,13 @@ class TaskManagerInstanceGroups:
|
||||
|
||||
def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'):
|
||||
largest_instance = None
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
instances = self.instance_groups[instance_group_name].instances
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
if i.capacity <= 0:
|
||||
# We don't want to select an idle instance with 0 capacity
|
||||
continue
|
||||
if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity:
|
||||
if largest_instance is None:
|
||||
largest_instance = i
|
||||
@@ -139,3 +254,56 @@ class TaskManagerInstanceGroups:
|
||||
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
|
||||
return task.global_instance_groups
|
||||
return igs
|
||||
|
||||
|
||||
class TaskManagerModels:
|
||||
def __init__(self, **kwargs):
|
||||
# We want to avoid calls to settings over and over in loops, so cache this information here
|
||||
kwargs['control_task_impact'] = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
kwargs['controlplane_ig_name'] = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
self.instances = TaskManagerInstances(**kwargs)
|
||||
self.instance_groups = TaskManagerInstanceGroups(task_manager_instances=self.instances, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def init_with_consumed_capacity(cls, **kwargs):
|
||||
tmm = cls(**kwargs)
|
||||
tasks = kwargs.get('tasks', None)
|
||||
|
||||
if tasks is None:
|
||||
instance_group_queryset = kwargs.get('instance_groups_queryset', None)
|
||||
# No tasks were provided, so we will fetch them from the database
|
||||
task_status_filter_list = kwargs.get('task_status_filter_list', ['running', 'waiting'])
|
||||
task_fields = kwargs.get('task_fields', ('task_impact', 'controller_node', 'execution_node', 'instance_group'))
|
||||
from awx.main.models import UnifiedJob
|
||||
|
||||
if instance_group_queryset is not None:
|
||||
logger.debug("******************INSTANCE GROUP QUERYSET PASSED -- FILTERING TASKS ****************************")
|
||||
# Sometimes things like the serializer pass a queryset that looks at not all instance groups. in this case,
|
||||
# we also need to filter the tasks we look at
|
||||
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list, instance_group__in=[ig.id for ig in instance_group_queryset]).only(
|
||||
*task_fields
|
||||
)
|
||||
else:
|
||||
# No instance group query set, look at all tasks in whole system
|
||||
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list).only(*task_fields)
|
||||
|
||||
for task in tasks:
|
||||
tmm.consume_capacity(task)
|
||||
|
||||
return tmm
|
||||
|
||||
def consume_capacity(self, task):
|
||||
# Consume capacity on instances, which bubbles up to instance groups they are a member of
|
||||
self.instances.consume_capacity(task)
|
||||
|
||||
# For container group jobs, additionally we must account for capacity consumed since
|
||||
# The container groups have no instances to look at to track how many jobs/forks are consumed
|
||||
if task.instance_group_id:
|
||||
if not task.instance_group_id in self.instance_groups.pk_ig_map.keys():
|
||||
logger.warn(
|
||||
f"Task {task.log_format} assigned {task.instance_group_id} but this instance group not present in map of instance groups{self.instance_groups.pk_ig_map.keys()}"
|
||||
)
|
||||
else:
|
||||
ig = self.instance_groups.pk_ig_map[task.instance_group_id]
|
||||
if ig.is_container_group:
|
||||
self.instance_groups[ig.name].consume_capacity(task)
|
||||
|
||||
@@ -390,6 +390,7 @@ class BaseTask(object):
|
||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
|
||||
emitted_lockfile_log = False
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
@@ -401,6 +402,9 @@ class BaseTask(object):
|
||||
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
else:
|
||||
if not emitted_lockfile_log:
|
||||
logger.info(f"exception acquiring lock {lock_path}: {e}")
|
||||
emitted_lockfile_log = True
|
||||
time.sleep(1.0)
|
||||
self.instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
@@ -426,7 +430,7 @@ class BaseTask(object):
|
||||
"""
|
||||
instance.log_lifecycle("post_run")
|
||||
|
||||
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
|
||||
def final_run_hook(self, instance, status, private_data_dir):
|
||||
"""
|
||||
Hook for any steps to run after job/task is marked as complete.
|
||||
"""
|
||||
@@ -469,7 +473,6 @@ class BaseTask(object):
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
fact_modification_times = {}
|
||||
self.runner_callback.event_ct = 0
|
||||
|
||||
'''
|
||||
@@ -498,14 +501,6 @@ class BaseTask(object):
|
||||
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
||||
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
self.instance.start_job_fact_cache(
|
||||
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
|
||||
fact_modification_times,
|
||||
)
|
||||
|
||||
# May have to serialize the value
|
||||
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
|
||||
passwords = self.build_passwords(self.instance, kwargs)
|
||||
@@ -646,7 +641,7 @@ class BaseTask(object):
|
||||
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
|
||||
|
||||
try:
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
|
||||
self.final_run_hook(self.instance, status, private_data_dir)
|
||||
except Exception:
|
||||
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
|
||||
|
||||
@@ -1066,12 +1061,19 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# ran inside of the event saving code
|
||||
update_smart_memberships_for_inventory(job.inventory)
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
||||
if not private_data_dir:
|
||||
def post_run_hook(self, job, status):
|
||||
super(RunJob, self).post_run_hook(job, status)
|
||||
job.refresh_from_db(fields=['job_env'])
|
||||
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
||||
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
|
||||
# If there's no private data dir, that means we didn't get into the
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
@@ -1079,9 +1081,11 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
fact_modification_times,
|
||||
self.facts_write_time,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir)
|
||||
try:
|
||||
inventory = job.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
|
||||
@@ -61,10 +61,15 @@ def read_receptor_config():
|
||||
return yaml.safe_load(f)
|
||||
|
||||
|
||||
def get_receptor_sockfile():
|
||||
data = read_receptor_config()
|
||||
def work_signing_enabled(config_data):
|
||||
for section in config_data:
|
||||
if 'work-signing' in section:
|
||||
return True
|
||||
return False
|
||||
|
||||
for section in data:
|
||||
|
||||
def get_receptor_sockfile(config_data):
|
||||
for section in config_data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'control-service':
|
||||
if 'filename' in entry_data:
|
||||
@@ -75,12 +80,11 @@ def get_receptor_sockfile():
|
||||
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
|
||||
|
||||
|
||||
def get_tls_client(use_stream_tls=None):
|
||||
def get_tls_client(config_data, use_stream_tls=None):
|
||||
if not use_stream_tls:
|
||||
return None
|
||||
|
||||
data = read_receptor_config()
|
||||
for section in data:
|
||||
for section in config_data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'tls-client':
|
||||
if 'name' in entry_data:
|
||||
@@ -88,10 +92,12 @@ def get_tls_client(use_stream_tls=None):
|
||||
return None
|
||||
|
||||
|
||||
def get_receptor_ctl():
|
||||
receptor_sockfile = get_receptor_sockfile()
|
||||
def get_receptor_ctl(config_data=None):
|
||||
if config_data is None:
|
||||
config_data = read_receptor_config()
|
||||
receptor_sockfile = get_receptor_sockfile(config_data)
|
||||
try:
|
||||
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
|
||||
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True))
|
||||
except RuntimeError:
|
||||
return ReceptorControl(receptor_sockfile)
|
||||
|
||||
@@ -159,15 +165,18 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
"""
|
||||
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
|
||||
"""
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
config_data = read_receptor_config()
|
||||
receptor_ctl = get_receptor_ctl(config_data)
|
||||
|
||||
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
|
||||
kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
|
||||
kwargs.setdefault('tlsclient', get_tls_client(config_data, use_stream_tls))
|
||||
kwargs.setdefault('ttl', '20s')
|
||||
kwargs.setdefault('payload', '')
|
||||
if work_signing_enabled(config_data):
|
||||
kwargs['signwork'] = True
|
||||
|
||||
transmit_start = time.time()
|
||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs)
|
||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, **kwargs)
|
||||
|
||||
unit_id = result['unitid']
|
||||
run_start = time.time()
|
||||
@@ -302,7 +311,8 @@ class AWXReceptorJob:
|
||||
|
||||
def run(self):
|
||||
# We establish a connection to the Receptor socket
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
self.config_data = read_receptor_config()
|
||||
receptor_ctl = get_receptor_ctl(self.config_data)
|
||||
|
||||
res = None
|
||||
try:
|
||||
@@ -327,7 +337,7 @@ class AWXReceptorJob:
|
||||
if self.work_type == 'ansible-runner':
|
||||
work_submit_kw['node'] = self.task.instance.execution_node
|
||||
use_stream_tls = get_conn_type(work_submit_kw['node'], receptor_ctl).name == "STREAMTLS"
|
||||
work_submit_kw['tlsclient'] = get_tls_client(use_stream_tls)
|
||||
work_submit_kw['tlsclient'] = get_tls_client(self.config_data, use_stream_tls)
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
transmitter_future = executor.submit(self.transmit, sockin)
|
||||
@@ -401,9 +411,11 @@ class AWXReceptorJob:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
state_name = unit_status.get('StateName', None)
|
||||
stdout_size = unit_status.get('StdoutSize', 0)
|
||||
except Exception:
|
||||
detail = ''
|
||||
state_name = ''
|
||||
stdout_size = 0
|
||||
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
@@ -414,9 +426,16 @@ class AWXReceptorJob:
|
||||
return
|
||||
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
receptor_output = ''
|
||||
if state_name == 'Failed' and self.task.runner_callback.event_ct == 0:
|
||||
# if receptor work unit failed and no events were emitted, work results may
|
||||
# contain useful information about why the job failed. In case stdout is
|
||||
# massive, only ask for last 1000 bytes
|
||||
startpos = max(stdout_size - 1000, 0)
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
||||
lines = resultfile.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
elif detail:
|
||||
@@ -477,7 +496,9 @@ class AWXReceptorJob:
|
||||
|
||||
@property
|
||||
def sign_work(self):
|
||||
return True if self.work_type in ('ansible-runner', 'local') else False
|
||||
if self.work_type in ('ansible-runner', 'local'):
|
||||
return work_signing_enabled(self.config_data)
|
||||
return False
|
||||
|
||||
@property
|
||||
def work_type(self):
|
||||
|
||||
@@ -52,6 +52,7 @@ from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
ScheduleWorkflowManager,
|
||||
@@ -720,45 +721,43 @@ def handle_work_success(task_actual):
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
first_instance = None
|
||||
first_instance_type = ''
|
||||
if subtasks is not None:
|
||||
for each_task in subtasks:
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
|
||||
if not instance:
|
||||
# Unknown task type
|
||||
logger.warning("Unknown task type: {}".format(each_task['type']))
|
||||
continue
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
|
||||
continue
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
if first_instance is None:
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly
|
||||
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}')
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status in ('successful', 'failed'):
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
first_instance_type,
|
||||
first_instance.name,
|
||||
first_instance.id,
|
||||
)
|
||||
instance.save()
|
||||
instance.websocket_emit_status("failed")
|
||||
deps_of_deps = {}
|
||||
|
||||
for subtask in subtasks:
|
||||
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'):
|
||||
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity
|
||||
blame_job = deps_of_deps.get(subtask.id, instance)
|
||||
subtask.status = 'failed'
|
||||
subtask.failed = True
|
||||
if not subtask.job_explanation:
|
||||
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(blame_job)),
|
||||
blame_job.name,
|
||||
blame_job.id,
|
||||
)
|
||||
subtask.save()
|
||||
subtask.websocket_emit_status("failed")
|
||||
|
||||
for sub_subtask in subtask.get_jobs_fail_chain():
|
||||
deps_of_deps[sub_subtask.id] = subtask
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
if first_instance:
|
||||
schedule_manager_success_or_error(first_instance)
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
|
||||
@@ -3,5 +3,6 @@
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo"
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"AWS_SESSION_TOKEN": "fooo"
|
||||
}
|
||||
@@ -1,7 +1,15 @@
|
||||
import pytest
|
||||
import time
|
||||
from unittest import mock
|
||||
from uuid import uuid4
|
||||
|
||||
from django.test import TransactionTestCase
|
||||
|
||||
from awx.main.dispatch.worker.callback import job_stats_wrapup, CallbackBrokerWorker
|
||||
|
||||
from awx.main.dispatch.worker.callback import job_stats_wrapup
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.inventory import InventoryUpdate, InventorySource
|
||||
from awx.main.models.events import InventoryUpdateEvent
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -24,3 +32,108 @@ def test_wrapup_does_send_notifications(mocker):
|
||||
job.refresh_from_db()
|
||||
assert job.host_status_counts == {}
|
||||
mock.assert_called_once_with('succeeded')
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
|
||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def turn_off_websockets(self):
|
||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||
yield
|
||||
|
||||
def get_worker(self):
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
return CallbackBrokerWorker()
|
||||
|
||||
def event_create_kwargs(self):
|
||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||
|
||||
def test_flush_with_valid_event(self):
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events}
|
||||
worker.flush()
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
|
||||
def test_flush_with_invalid_event(self):
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='bad', counter=-2, **kwargs),
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good2', **kwargs),
|
||||
]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[1].uuid).count() == 0
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[2].uuid).count() == 1
|
||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||
|
||||
def test_duplicate_key_not_saved_twice(self):
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
|
||||
# put current saved event in buffer (error case)
|
||||
worker.buff = {InventoryUpdateEvent: [InventoryUpdateEvent.objects.get(uuid=events[0].uuid)]}
|
||||
worker.last_flush = time.time() - 2.0
|
||||
# here, the bulk_create will fail with UNIQUE constraint violation, but individual saves should resolve it
|
||||
worker.flush()
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
|
||||
def test_give_up_on_bad_event(self):
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
for i in range(5):
|
||||
worker.last_flush = time.time() - 2.0
|
||||
worker.flush()
|
||||
|
||||
# Could not save, should be logged, and buffer should be cleared
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||
|
||||
def test_postgres_invalid_NUL_char(self):
|
||||
# In postgres, text fields reject NUL character, 0x00
|
||||
# tests use sqlite3 which will not raise an error
|
||||
# but we can still test that it is sanitized before saving
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||
assert "\x00" in events[0].stdout # sanity
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create', side_effect=ValueError):
|
||||
with mock.patch.object(events[0], 'save', side_effect=ValueError):
|
||||
worker.flush()
|
||||
|
||||
assert "\x00" not in events[0].stdout
|
||||
|
||||
worker.last_flush = time.time() - 2.0
|
||||
worker.flush()
|
||||
|
||||
event = InventoryUpdateEvent.objects.get(uuid=events[0].uuid)
|
||||
assert "\x00" not in event.stdout
|
||||
|
||||
@@ -171,13 +171,17 @@ class TestKeyRegeneration:
|
||||
|
||||
def test_use_custom_key_with_empty_tower_secret_key_env_var(self):
|
||||
os.environ['TOWER_SECRET_KEY'] = ''
|
||||
new_key = call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert settings.SECRET_KEY != new_key
|
||||
with pytest.raises(SystemExit) as e:
|
||||
call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert e.type == SystemExit
|
||||
assert e.value.code == 1
|
||||
|
||||
def test_use_custom_key_with_no_tower_secret_key_env_var(self):
|
||||
os.environ.pop('TOWER_SECRET_KEY', None)
|
||||
new_key = call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert settings.SECRET_KEY != new_key
|
||||
with pytest.raises(SystemExit) as e:
|
||||
call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert e.type == SystemExit
|
||||
assert e.value.code == 1
|
||||
|
||||
def test_with_tower_secret_key_env_var(self):
|
||||
custom_key = 'MXSq9uqcwezBOChl/UfmbW1k4op+bC+FQtwPqgJ1u9XV'
|
||||
|
||||
@@ -4,7 +4,7 @@ from awx.main.models import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
|
||||
|
||||
class TestInstanceGroupInstanceMapping(TransactionTestCase):
|
||||
@@ -23,11 +23,10 @@ class TestInstanceGroupInstanceMapping(TransactionTestCase):
|
||||
def test_mapping(self):
|
||||
self.sample_cluster()
|
||||
with self.assertNumQueries(3):
|
||||
instances = TaskManagerInstances([]) # empty task list
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances)
|
||||
instance_groups = TaskManagerInstanceGroups()
|
||||
|
||||
ig_instance_map = instance_groups.instance_groups
|
||||
|
||||
assert set(i.hostname for i in ig_instance_map['ig_small']['instances']) == set(['i1'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_large']['instances']) == set(['i2', 'i3'])
|
||||
assert set(i.hostname for i in ig_instance_map['default']['instances']) == set(['i2'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_small'].instances) == set(['i1'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_large'].instances) == set(['i2', 'i3'])
|
||||
assert set(i.hostname for i in ig_instance_map['default'].instances) == set(['i2'])
|
||||
|
||||
@@ -10,6 +10,10 @@ from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
from awx.main.scheduler import TaskManager
|
||||
|
||||
from . import create_job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def containerized_job(default_instance_group, kube_credential, job_template_factory):
|
||||
@@ -34,6 +38,50 @@ def test_containerized_job(containerized_job):
|
||||
assert containerized_job.instance_group.credential.kubernetes
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_concurrent_jobs_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
|
||||
"""Construct a scenario where only 1 job will fit within the max_concurrent_jobs of the container group.
|
||||
|
||||
Since max_concurrent_jobs is set to 1, even though 2 jobs are in pending
|
||||
and would be launched into the container group, only one will be started.
|
||||
"""
|
||||
containerized_job.unified_job_template.allow_simultaneous = True
|
||||
containerized_job.unified_job_template.save()
|
||||
default_instance_group = containerized_job.instance_group
|
||||
default_instance_group.max_concurrent_jobs = 1
|
||||
default_instance_group.save()
|
||||
task_impact = 1
|
||||
# Create a second job that should not be scheduled at first, blocked by the other
|
||||
create_job(containerized_job.unified_job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_forks_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
|
||||
"""Construct a scenario where only 1 job will fit within the max_forks of the container group.
|
||||
|
||||
In this case, we set the container_group max_forks to 10, and make the task_impact of a job 6.
|
||||
Therefore, only 1 job will fit within the max of 10.
|
||||
"""
|
||||
containerized_job.unified_job_template.allow_simultaneous = True
|
||||
containerized_job.unified_job_template.save()
|
||||
default_instance_group = containerized_job.instance_group
|
||||
default_instance_group.max_forks = 10
|
||||
# Create a second job that should not be scheduled
|
||||
create_job(containerized_job.unified_job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 6
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm.schedule()
|
||||
tm.start_task.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment):
|
||||
containerized_job.execution_environment = default_job_execution_environment
|
||||
|
||||
@@ -23,7 +23,7 @@ def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_gr
|
||||
mock_task_impact.return_value = 500
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j2, ig2, [], i2)])
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j2, ig2, i2)])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -54,7 +54,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
|
||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, controlplane_instance_group.instances.all()[0])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
pu.save()
|
||||
@@ -62,8 +62,8 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
|
||||
TaskManager.start_task.assert_any_call(j2, ig2, [], i2)
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, i1)
|
||||
TaskManager.start_task.assert_any_call(j2, ig2, i2)
|
||||
assert TaskManager.start_task.call_count == 2
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlpla
|
||||
wfj.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, [], None)
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, None)
|
||||
assert wfj.instance_group is None
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ def test_failover_group_run(instance_factory, controlplane_instance_group, mocke
|
||||
mock_task_impact.return_value = 500
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j1_1, ig2, [], i2)])
|
||||
mock_job.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j1_1, ig2, i2)])
|
||||
assert mock_job.call_count == 2
|
||||
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_grou
|
||||
j = create_job(objects.job_template)
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -240,12 +240,82 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
|
||||
mock_task_impact.return_value = 505
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j1, controlplane_instance_group, instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j2, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_concurrent_jobs_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
"""When max_concurrent_jobs of an instance group is more restrictive than capacity of instances, enforce max_concurrent_jobs."""
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
# We will expect only 1 job to be started
|
||||
controlplane_instance_group.max_concurrent_jobs = 1
|
||||
controlplane_instance_group.save()
|
||||
num_jobs = 3
|
||||
jobs = []
|
||||
for i in range(num_jobs):
|
||||
jobs.append(
|
||||
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
|
||||
)
|
||||
tm = TaskManager()
|
||||
task_impact = 1
|
||||
|
||||
# Sanity check that multiple jobs would run if not for the max_concurrent_jobs setting.
|
||||
assert task_impact * num_jobs < controlplane_instance_group.capacity
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
jobs[0].status = 'running'
|
||||
jobs[0].controller_node = instance.hostname
|
||||
jobs[0].execution_node = instance.hostname
|
||||
jobs[0].instance_group = controlplane_instance_group
|
||||
jobs[0].save()
|
||||
|
||||
# while that job is running, we should not start another job
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_not_called()
|
||||
# now job is done, we should start one of the two other jobs
|
||||
jobs[0].status = 'successful'
|
||||
jobs[0].save()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_forks_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
"""When max_forks of an instance group is less than the capacity of instances, enforce max_forks."""
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
controlplane_instance_group.max_forks = 15
|
||||
controlplane_instance_group.save()
|
||||
task_impact = 10
|
||||
num_jobs = 2
|
||||
# Sanity check that 2 jobs would run if not for the max_forks setting.
|
||||
assert controlplane_instance_group.max_forks < controlplane_instance_group.capacity
|
||||
assert task_impact * num_jobs > controlplane_instance_group.max_forks
|
||||
assert task_impact * num_jobs < controlplane_instance_group.capacity
|
||||
for i in range(num_jobs):
|
||||
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -267,12 +337,12 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
assert len(pu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
||||
pu[0].status = "successful"
|
||||
pu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -295,12 +365,12 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -342,7 +412,7 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
||||
mock_iu.assert_not_called()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -372,9 +442,7 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(iu, controlplane_instance_group, [j1, j2], instance), mock.call(pu, controlplane_instance_group, [j1, j2], instance)]
|
||||
)
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)])
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -383,9 +451,7 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||
)
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)])
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(pu) == 1
|
||||
@@ -409,7 +475,7 @@ def test_job_not_blocking_project_update(controlplane_instance_group, job_templa
|
||||
project_update.status = "pending"
|
||||
project_update.save()
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -433,7 +499,7 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
||||
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -121,8 +121,8 @@ def test_python_and_js_licenses():
|
||||
return errors
|
||||
|
||||
base_dir = settings.BASE_DIR
|
||||
api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
|
||||
ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
|
||||
api_licenses = index_licenses('%s/../licenses' % base_dir)
|
||||
ui_licenses = index_licenses('%s/../licenses/ui' % base_dir)
|
||||
api_requirements = read_api_requirements('%s/../requirements' % base_dir)
|
||||
ui_requirements = read_ui_requirements('%s/ui' % base_dir)
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error
|
||||
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -74,3 +74,17 @@ def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_work_error_nested(project, inventory_source):
|
||||
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
|
||||
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
|
||||
job = Job.objects.create(status='pending')
|
||||
iu.dependent_jobs.add(pu)
|
||||
job.dependent_jobs.add(pu, iu)
|
||||
handle_work_error({'type': 'project_update', 'id': pu.id})
|
||||
iu.refresh_from_db()
|
||||
job.refresh_from_db()
|
||||
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
|
||||
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
from unittest.mock import Mock
|
||||
from decimal import Decimal
|
||||
|
||||
from awx.main.models import InstanceGroup, Instance
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
@pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3])
|
||||
@@ -17,83 +14,6 @@ def test_capacity_adjustment_no_save(capacity_adjustment):
|
||||
assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity))
|
||||
|
||||
|
||||
def T(impact):
|
||||
j = mock.Mock(spec_set=['task_impact', 'capacity_type'])
|
||||
j.task_impact = impact
|
||||
j.capacity_type = 'execution'
|
||||
return j
|
||||
|
||||
|
||||
def Is(param):
|
||||
"""
|
||||
param:
|
||||
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
|
||||
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
|
||||
"""
|
||||
|
||||
instances = []
|
||||
if isinstance(param[0], tuple):
|
||||
for (jobs_running, capacity) in param:
|
||||
inst = Mock()
|
||||
inst.capacity = capacity
|
||||
inst.jobs_running = jobs_running
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
else:
|
||||
for i in param:
|
||||
inst = Mock()
|
||||
inst.remaining_capacity = i
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
return instances
|
||||
|
||||
|
||||
class TestInstanceGroup(object):
|
||||
@pytest.mark.parametrize(
|
||||
'task,instances,instance_fit_index,reason',
|
||||
[
|
||||
(T(100), Is([100]), 0, "Only one, pick it"),
|
||||
(T(100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
|
||||
(T(100), Is([50, 100]), 1, "First instance not as good as second instance"),
|
||||
(T(100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
|
||||
(T(100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
|
||||
],
|
||||
)
|
||||
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
|
||||
InstanceGroup(id=10)
|
||||
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances}})
|
||||
|
||||
instance_picked = tm_igs.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert instance_picked is None, reason
|
||||
else:
|
||||
assert instance_picked == instances[instance_fit_index], reason
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'instances,instance_fit_index,reason',
|
||||
[
|
||||
(Is([(0, 100)]), 0, "One idle instance, pick it"),
|
||||
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
|
||||
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
|
||||
],
|
||||
)
|
||||
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
|
||||
def filter_offline_instances(*args):
|
||||
return filter(lambda i: i.capacity > 0, instances)
|
||||
|
||||
InstanceGroup(id=10)
|
||||
instances_online_only = filter_offline_instances(instances)
|
||||
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances_online_only}})
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert tm_igs.find_largest_idle_instance('controlplane') is None, reason
|
||||
else:
|
||||
assert tm_igs.find_largest_idle_instance('controlplane') == instances[instance_fit_index], reason
|
||||
|
||||
|
||||
def test_cleanup_params_defaults():
|
||||
inst = Instance(hostname='foobar')
|
||||
assert inst.get_cleanup_task_kwargs(exclude_strings=['awx_423_']) == {'exclude_strings': ['awx_423_'], 'file_pattern': '/tmp/awx_*_*', 'grace_period': 60}
|
||||
|
||||
@@ -36,15 +36,14 @@ def job(mocker, hosts, inventory):
|
||||
|
||||
def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
assert os.path.exists(filepath)
|
||||
with open(filepath, 'r') as f:
|
||||
assert f.read() == json.dumps(host.ansible_facts)
|
||||
assert filepath in modified_times
|
||||
assert os.path.getmtime(filepath) <= last_modified
|
||||
|
||||
|
||||
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
@@ -58,18 +57,16 @@ def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
)
|
||||
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
job.start_job_fact_cache(fact_cache, {}, 0)
|
||||
job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
# a file called "foo" should _not_ be written outside the facts dir
|
||||
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
ansible_facts_new = {"foo": "bar"}
|
||||
filepath = os.path.join(fact_cache, hosts[1].name)
|
||||
@@ -83,23 +80,20 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
host.save.assert_not_called()
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
for h in hosts:
|
||||
filepath = os.path.join(fact_cache, h.name)
|
||||
@@ -109,26 +103,22 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for h in hosts:
|
||||
h.save.assert_not_called()
|
||||
bulk_update.assert_not_called()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
os.remove(os.path.join(fact_cache, hosts[1].name))
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
host.save.assert_not_called()
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == {}
|
||||
hosts[1].save.assert_called_once_with()
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
|
||||
|
||||
class FakeMeta(object):
|
||||
@@ -16,38 +16,64 @@ class FakeObject(object):
|
||||
|
||||
|
||||
class Job(FakeObject):
|
||||
task_impact = 43
|
||||
is_container_group_task = False
|
||||
controller_node = ''
|
||||
execution_node = ''
|
||||
def __init__(self, **kwargs):
|
||||
self.task_impact = kwargs.get('task_impact', 43)
|
||||
self.is_container_group_task = kwargs.get('is_container_group_task', False)
|
||||
self.controller_node = kwargs.get('controller_node', '')
|
||||
self.execution_node = kwargs.get('execution_node', '')
|
||||
self.instance_group = kwargs.get('instance_group', None)
|
||||
self.instance_group_id = self.instance_group.id if self.instance_group else None
|
||||
self.capacity_type = kwargs.get('capacity_type', 'execution')
|
||||
|
||||
def log_format(self):
|
||||
return 'job 382 (fake)'
|
||||
|
||||
|
||||
class Instances(FakeObject):
|
||||
def add(self, *args):
|
||||
for instance in args:
|
||||
self.obj.instance_list.append(instance)
|
||||
|
||||
def all(self):
|
||||
return self.obj.instance_list
|
||||
|
||||
|
||||
class InstanceGroup(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
super(InstanceGroup, self).__init__(**kwargs)
|
||||
self.instance_list = []
|
||||
self.pk = self.id = kwargs.get('id', 1)
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
mgr = Instances(obj=self)
|
||||
return mgr
|
||||
|
||||
@property
|
||||
def is_container_group(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def max_concurrent_jobs(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def max_forks(self):
|
||||
return 0
|
||||
|
||||
|
||||
class Instance(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
self.node_type = kwargs.get('node_type', 'hybrid')
|
||||
self.capacity = kwargs.get('capacity', 0)
|
||||
self.hostname = kwargs.get('hostname', 'fakehostname')
|
||||
self.consumed_capacity = 0
|
||||
self.jobs_running = 0
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_cluster():
|
||||
def stand_up_cluster():
|
||||
class Instances(FakeObject):
|
||||
def add(self, *args):
|
||||
for instance in args:
|
||||
self.obj.instance_list.append(instance)
|
||||
|
||||
def all(self):
|
||||
return self.obj.instance_list
|
||||
|
||||
class InstanceGroup(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
super(InstanceGroup, self).__init__(**kwargs)
|
||||
self.instance_list = []
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
mgr = Instances(obj=self)
|
||||
return mgr
|
||||
|
||||
class Instance(FakeObject):
|
||||
pass
|
||||
|
||||
ig_small = InstanceGroup(name='ig_small')
|
||||
ig_large = InstanceGroup(name='ig_large')
|
||||
@@ -66,14 +92,12 @@ def sample_cluster():
|
||||
@pytest.fixture
|
||||
def create_ig_manager():
|
||||
def _rf(ig_list, tasks):
|
||||
instances = TaskManagerInstances(tasks, instances=set(inst for ig in ig_list for inst in ig.instance_list))
|
||||
|
||||
seed_igs = {}
|
||||
for ig in ig_list:
|
||||
seed_igs[ig.name] = {'instances': [instances[inst.hostname] for inst in ig.instance_list]}
|
||||
|
||||
instance_groups = TaskManagerInstanceGroups(instance_groups=seed_igs)
|
||||
return instance_groups
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
tasks=tasks,
|
||||
instances=set(inst for ig in ig_list for inst in ig.instance_list),
|
||||
instance_groups=ig_list,
|
||||
)
|
||||
return tm_models.instance_groups
|
||||
|
||||
return _rf
|
||||
|
||||
@@ -126,3 +150,75 @@ def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):
|
||||
# Cross-links between groups not visible to current user,
|
||||
# so a naieve accounting of capacities is returned instead
|
||||
assert instance_groups_mgr.get_consumed_capacity('default') == 43
|
||||
|
||||
|
||||
def Is(param):
|
||||
"""
|
||||
param:
|
||||
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
|
||||
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
|
||||
"""
|
||||
|
||||
instances = []
|
||||
if isinstance(param[0], tuple):
|
||||
for index, (jobs_running, capacity) in enumerate(param):
|
||||
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
|
||||
inst.jobs_running = jobs_running
|
||||
instances.append(inst)
|
||||
else:
|
||||
for index, capacity in enumerate(param):
|
||||
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
return instances
|
||||
|
||||
|
||||
class TestSelectBestInstanceForTask(object):
|
||||
@pytest.mark.parametrize(
|
||||
'task,instances,instance_fit_index,reason',
|
||||
[
|
||||
(Job(task_impact=100), Is([100]), 0, "Only one, pick it"),
|
||||
(Job(task_impact=100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
|
||||
(Job(task_impact=100), Is([50, 100]), 1, "First instance not as good as second instance"),
|
||||
(Job(task_impact=100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
|
||||
(Job(task_impact=100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
|
||||
],
|
||||
)
|
||||
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
|
||||
ig = InstanceGroup(id=10, name='controlplane')
|
||||
tasks = []
|
||||
for instance in instances:
|
||||
ig.instances.add(instance)
|
||||
for _ in range(instance.jobs_running):
|
||||
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
|
||||
instance_picked = tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert instance_picked is None, reason
|
||||
else:
|
||||
assert instance_picked.hostname == instances[instance_fit_index].hostname, reason
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'instances,instance_fit_index,reason',
|
||||
[
|
||||
(Is([(0, 100)]), 0, "One idle instance, pick it"),
|
||||
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
|
||||
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
|
||||
],
|
||||
)
|
||||
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
|
||||
ig = InstanceGroup(id=10, name='controlplane')
|
||||
tasks = []
|
||||
for instance in instances:
|
||||
ig.instances.add(instance)
|
||||
for _ in range(instance.jobs_running):
|
||||
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert tm_models.instance_groups.find_largest_idle_instance('controlplane') is None, reason
|
||||
else:
|
||||
assert tm_models.instance_groups.find_largest_idle_instance('controlplane').hostname == instances[instance_fit_index].hostname, reason
|
||||
|
||||
@@ -90,6 +90,7 @@ __all__ = [
|
||||
'deepmerge',
|
||||
'get_event_partition_epoch',
|
||||
'cleanup_new_process',
|
||||
'log_excess_runtime',
|
||||
]
|
||||
|
||||
|
||||
@@ -1215,15 +1216,30 @@ def cleanup_new_process(func):
|
||||
return wrapper_cleanup_new_process
|
||||
|
||||
|
||||
def log_excess_runtime(func_logger, cutoff=5.0):
|
||||
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False):
|
||||
def log_excess_runtime_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def _new_func(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
return_value = func(*args, **kwargs)
|
||||
delta = time.time() - start_time
|
||||
if delta > cutoff:
|
||||
logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
|
||||
log_data = {'name': repr(func.__name__)}
|
||||
|
||||
if add_log_data:
|
||||
return_value = func(*args, log_data=log_data, **kwargs)
|
||||
else:
|
||||
return_value = func(*args, **kwargs)
|
||||
|
||||
log_data['delta'] = time.time() - start_time
|
||||
if isinstance(return_value, dict):
|
||||
log_data.update(return_value)
|
||||
|
||||
if msg is None:
|
||||
record_msg = 'Running {name} took {delta:.2f}s'
|
||||
else:
|
||||
record_msg = msg
|
||||
if log_data['delta'] > cutoff:
|
||||
func_logger.info(record_msg.format(**log_data))
|
||||
elif log_data['delta'] > debug_cutoff:
|
||||
func_logger.debug(record_msg.format(**log_data))
|
||||
return return_value
|
||||
|
||||
return _new_func
|
||||
|
||||
@@ -103,6 +103,10 @@ ColorHandler = logging.StreamHandler
|
||||
if settings.COLOR_LOGS is True:
|
||||
try:
|
||||
from logutils.colorize import ColorizingStreamHandler
|
||||
import colorama
|
||||
|
||||
colorama.deinit()
|
||||
colorama.init(wrap=False, convert=False, strip=False)
|
||||
|
||||
class ColorHandler(ColorizingStreamHandler):
|
||||
def colorize(self, line, record):
|
||||
|
||||
@@ -118,7 +118,7 @@ class WebsocketTask:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
logger.exception(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
|
||||
@@ -304,11 +304,13 @@ INSTALLED_APPS = [
|
||||
'django.contrib.messages',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.sites',
|
||||
# daphne has to be installed before django.contrib.staticfiles for the app to startup
|
||||
# According to channels 4.0 docs you install daphne instead of channels now
|
||||
'daphne',
|
||||
'django.contrib.staticfiles',
|
||||
'oauth2_provider',
|
||||
'rest_framework',
|
||||
'django_extensions',
|
||||
'channels',
|
||||
'polymorphic',
|
||||
'taggit',
|
||||
'social_django',
|
||||
@@ -416,6 +418,9 @@ AUTH_BASIC_ENABLED = True
|
||||
# when trying to access a UI page that requries authentication.
|
||||
LOGIN_REDIRECT_OVERRIDE = ''
|
||||
|
||||
# Note: This setting may be overridden by database settings.
|
||||
ALLOW_METRICS_FOR_ANONYMOUS_USERS = False
|
||||
|
||||
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
|
||||
DEVSERVER_DEFAULT_PORT = '8013'
|
||||
|
||||
@@ -851,6 +856,7 @@ LOGGING = {
|
||||
'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsbroadcast', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False},
|
||||
'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False},
|
||||
'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'},
|
||||
@@ -983,6 +989,13 @@ DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
|
||||
DEFAULT_EXECUTION_QUEUE_NAME = 'default'
|
||||
# pod spec used when the default execution queue is a container group, e.g. when deploying on k8s/ocp with the operator
|
||||
DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE = ''
|
||||
# Max number of concurrently consumed forks for the default execution queue
|
||||
# Zero means no limit
|
||||
DEFAULT_EXECUTION_QUEUE_MAX_FORKS = 0
|
||||
# Max number of concurrently running jobs for the default execution queue
|
||||
# Zero means no limit
|
||||
DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS = 0
|
||||
|
||||
# Name of the default controlplane queue
|
||||
DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane'
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
|
||||
# this needs to stay at the bottom of this file
|
||||
try:
|
||||
if os.getenv('AWX_KUBE_DEVEL', False):
|
||||
include(optional('minikube.py'), scope=locals())
|
||||
include(optional('development_kube.py'), scope=locals())
|
||||
else:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
except ImportError:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
|
||||
BROADCAST_WEBSOCKET_PORT = 8013
|
||||
BROADCAST_WEBSOCKET_PORT = 8052
|
||||
BROADCAST_WEBSOCKET_VERIFY_CERT = False
|
||||
BROADCAST_WEBSOCKET_PROTOCOL = 'http'
|
||||
@@ -452,7 +452,10 @@ def on_populate_user(sender, **kwargs):
|
||||
remove = bool(team_opts.get('remove', True))
|
||||
state = _update_m2m_from_groups(ldap_user, users_opts, remove)
|
||||
if state is not None:
|
||||
desired_team_states[team_name] = {'member_role': state}
|
||||
organization = team_opts['organization']
|
||||
if organization not in desired_team_states:
|
||||
desired_team_states[organization] = {}
|
||||
desired_team_states[organization][team_name] = {'member_role': state}
|
||||
|
||||
# Check if user.profile is available, otherwise force user.save()
|
||||
try:
|
||||
@@ -473,16 +476,28 @@ def on_populate_user(sender, **kwargs):
|
||||
|
||||
|
||||
def reconcile_users_org_team_mappings(user, desired_org_states, desired_team_states, source):
|
||||
#
|
||||
# Arguments:
|
||||
# user - a user object
|
||||
# desired_org_states: { '<org_name>': { '<role>': <boolean> or None } }
|
||||
# desired_team_states: { '<org_name>': { '<team name>': { '<role>': <boolean> or None } } }
|
||||
# source - a text label indicating the "authentication adapter" for debug messages
|
||||
#
|
||||
# This function will load the users existing roles and then based on the deisred states modify the users roles
|
||||
# True indicates the user needs to be a member of the role
|
||||
# False indicates the user should not be a member of the role
|
||||
# None means this function should not change the users membership of a role
|
||||
#
|
||||
from awx.main.models import Organization, Team
|
||||
|
||||
content_types = []
|
||||
reconcile_items = []
|
||||
if desired_org_states:
|
||||
content_types.append(ContentType.objects.get_for_model(Organization))
|
||||
reconcile_items.append(('organization', desired_org_states, Organization))
|
||||
reconcile_items.append(('organization', desired_org_states))
|
||||
if desired_team_states:
|
||||
content_types.append(ContentType.objects.get_for_model(Team))
|
||||
reconcile_items.append(('team', desired_team_states, Team))
|
||||
reconcile_items.append(('team', desired_team_states))
|
||||
|
||||
if not content_types:
|
||||
# If both desired states were empty we can simply return because there is nothing to reconcile
|
||||
@@ -491,24 +506,39 @@ def reconcile_users_org_team_mappings(user, desired_org_states, desired_team_sta
|
||||
# users_roles is a flat set of IDs
|
||||
users_roles = set(user.roles.filter(content_type__in=content_types).values_list('pk', flat=True))
|
||||
|
||||
for object_type, desired_states, model in reconcile_items:
|
||||
# Get all of the roles in the desired states for efficient DB extraction
|
||||
for object_type, desired_states in reconcile_items:
|
||||
roles = []
|
||||
for sub_dict in desired_states.values():
|
||||
for role_name in sub_dict:
|
||||
if sub_dict[role_name] is None:
|
||||
continue
|
||||
if role_name not in roles:
|
||||
roles.append(role_name)
|
||||
|
||||
# Get a set of named tuples for the org/team name plus all of the roles we got above
|
||||
model_roles = model.objects.filter(name__in=desired_states.keys()).values_list('name', *roles, named=True)
|
||||
if object_type == 'organization':
|
||||
for sub_dict in desired_states.values():
|
||||
for role_name in sub_dict:
|
||||
if sub_dict[role_name] is None:
|
||||
continue
|
||||
if role_name not in roles:
|
||||
roles.append(role_name)
|
||||
model_roles = Organization.objects.filter(name__in=desired_states.keys()).values_list('name', *roles, named=True)
|
||||
else:
|
||||
team_names = []
|
||||
for teams_dict in desired_states.values():
|
||||
team_names.extend(teams_dict.keys())
|
||||
for sub_dict in teams_dict.values():
|
||||
for role_name in sub_dict:
|
||||
if sub_dict[role_name] is None:
|
||||
continue
|
||||
if role_name not in roles:
|
||||
roles.append(role_name)
|
||||
model_roles = Team.objects.filter(name__in=team_names).values_list('name', 'organization__name', *roles, named=True)
|
||||
|
||||
for row in model_roles:
|
||||
for role_name in roles:
|
||||
desired_state = desired_states.get(row.name, {})
|
||||
if desired_state[role_name] is None:
|
||||
if object_type == 'organization':
|
||||
desired_state = desired_states.get(row.name, {})
|
||||
else:
|
||||
desired_state = desired_states.get(row.organization__name, {}).get(row.name, {})
|
||||
|
||||
if desired_state.get(role_name, None) is None:
|
||||
# The mapping was not defined for this [org/team]/role so we can just pass
|
||||
pass
|
||||
continue
|
||||
|
||||
# If somehow the auth adapter knows about an items role but that role is not defined in the DB we are going to print a pretty error
|
||||
# This is your classic safety net that we should never hit; but here you are reading this comment... good luck and Godspeed.
|
||||
|
||||
79
awx/ui/package-lock.json
generated
79
awx/ui/package-lock.json
generated
@@ -8,7 +8,7 @@
|
||||
"dependencies": {
|
||||
"@lingui/react": "3.14.0",
|
||||
"@patternfly/patternfly": "4.217.1",
|
||||
"@patternfly/react-core": "^4.250.1",
|
||||
"@patternfly/react-core": "^4.264.0",
|
||||
"@patternfly/react-icons": "4.92.10",
|
||||
"@patternfly/react-table": "4.108.0",
|
||||
"ace-builds": "^1.10.1",
|
||||
@@ -22,7 +22,7 @@
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^3.0.3",
|
||||
"luxon": "^3.1.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
@@ -3752,13 +3752,13 @@
|
||||
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw=="
|
||||
},
|
||||
"node_modules/@patternfly/react-core": {
|
||||
"version": "4.250.1",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.250.1.tgz",
|
||||
"integrity": "sha512-vAOZPQdZzYXl/vkHnHMIt1eC3nrPDdsuuErPatkNPwmSvilXuXmWP5wxoJ36FbSNRRURkprFwx52zMmWS3iHJA==",
|
||||
"version": "4.264.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz",
|
||||
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==",
|
||||
"dependencies": {
|
||||
"@patternfly/react-icons": "^4.92.6",
|
||||
"@patternfly/react-styles": "^4.91.6",
|
||||
"@patternfly/react-tokens": "^4.93.6",
|
||||
"@patternfly/react-icons": "^4.93.0",
|
||||
"@patternfly/react-styles": "^4.92.0",
|
||||
"@patternfly/react-tokens": "^4.94.0",
|
||||
"focus-trap": "6.9.2",
|
||||
"react-dropzone": "9.0.0",
|
||||
"tippy.js": "5.1.2",
|
||||
@@ -3769,6 +3769,15 @@
|
||||
"react-dom": "^16.8 || ^17 || ^18"
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-core/node_modules/@patternfly/react-icons": {
|
||||
"version": "4.93.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
|
||||
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
|
||||
"peerDependencies": {
|
||||
"react": "^16.8 || ^17 || ^18",
|
||||
"react-dom": "^16.8 || ^17 || ^18"
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-core/node_modules/tslib": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
|
||||
@@ -3784,9 +3793,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-styles": {
|
||||
"version": "4.91.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.91.10.tgz",
|
||||
"integrity": "sha512-fAG4Vjp63ohiR92F4e/Gkw5q1DSSckHKqdnEF75KUpSSBORzYP0EKMpupSd6ItpQFJw3iWs3MJi3/KIAAfU1Jw=="
|
||||
"version": "4.92.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz",
|
||||
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA=="
|
||||
},
|
||||
"node_modules/@patternfly/react-table": {
|
||||
"version": "4.108.0",
|
||||
@@ -3811,9 +3820,9 @@
|
||||
"integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
|
||||
},
|
||||
"node_modules/@patternfly/react-tokens": {
|
||||
"version": "4.93.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.93.10.tgz",
|
||||
"integrity": "sha512-F+j1irDc9M6zvY6qNtDryhbpnHz3R8ymHRdGelNHQzPTIK88YSWEnT1c9iUI+uM/iuZol7sJmO5STtg2aPIDRQ=="
|
||||
"version": "4.94.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz",
|
||||
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw=="
|
||||
},
|
||||
"node_modules/@pmmmwh/react-refresh-webpack-plugin": {
|
||||
"version": "0.5.4",
|
||||
@@ -15468,9 +15477,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/luxon": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
|
||||
"integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w==",
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz",
|
||||
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
@@ -25094,19 +25103,25 @@
|
||||
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw=="
|
||||
},
|
||||
"@patternfly/react-core": {
|
||||
"version": "4.250.1",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.250.1.tgz",
|
||||
"integrity": "sha512-vAOZPQdZzYXl/vkHnHMIt1eC3nrPDdsuuErPatkNPwmSvilXuXmWP5wxoJ36FbSNRRURkprFwx52zMmWS3iHJA==",
|
||||
"version": "4.264.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz",
|
||||
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==",
|
||||
"requires": {
|
||||
"@patternfly/react-icons": "^4.92.6",
|
||||
"@patternfly/react-styles": "^4.91.6",
|
||||
"@patternfly/react-tokens": "^4.93.6",
|
||||
"@patternfly/react-icons": "^4.93.0",
|
||||
"@patternfly/react-styles": "^4.92.0",
|
||||
"@patternfly/react-tokens": "^4.94.0",
|
||||
"focus-trap": "6.9.2",
|
||||
"react-dropzone": "9.0.0",
|
||||
"tippy.js": "5.1.2",
|
||||
"tslib": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@patternfly/react-icons": {
|
||||
"version": "4.93.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
|
||||
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
|
||||
"requires": {}
|
||||
},
|
||||
"tslib": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
|
||||
@@ -25121,9 +25136,9 @@
|
||||
"requires": {}
|
||||
},
|
||||
"@patternfly/react-styles": {
|
||||
"version": "4.91.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.91.10.tgz",
|
||||
"integrity": "sha512-fAG4Vjp63ohiR92F4e/Gkw5q1DSSckHKqdnEF75KUpSSBORzYP0EKMpupSd6ItpQFJw3iWs3MJi3/KIAAfU1Jw=="
|
||||
"version": "4.92.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz",
|
||||
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA=="
|
||||
},
|
||||
"@patternfly/react-table": {
|
||||
"version": "4.108.0",
|
||||
@@ -25146,9 +25161,9 @@
|
||||
}
|
||||
},
|
||||
"@patternfly/react-tokens": {
|
||||
"version": "4.93.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.93.10.tgz",
|
||||
"integrity": "sha512-F+j1irDc9M6zvY6qNtDryhbpnHz3R8ymHRdGelNHQzPTIK88YSWEnT1c9iUI+uM/iuZol7sJmO5STtg2aPIDRQ=="
|
||||
"version": "4.94.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz",
|
||||
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw=="
|
||||
},
|
||||
"@pmmmwh/react-refresh-webpack-plugin": {
|
||||
"version": "0.5.4",
|
||||
@@ -34210,9 +34225,9 @@
|
||||
}
|
||||
},
|
||||
"luxon": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
|
||||
"integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w=="
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz",
|
||||
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw=="
|
||||
},
|
||||
"lz-string": {
|
||||
"version": "1.4.4",
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"dependencies": {
|
||||
"@lingui/react": "3.14.0",
|
||||
"@patternfly/patternfly": "4.217.1",
|
||||
"@patternfly/react-core": "^4.250.1",
|
||||
"@patternfly/react-core": "^4.264.0",
|
||||
"@patternfly/react-icons": "4.92.10",
|
||||
"@patternfly/react-table": "4.108.0",
|
||||
"ace-builds": "^1.10.1",
|
||||
@@ -22,7 +22,7 @@
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^3.0.3",
|
||||
"luxon": "^3.1.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
|
||||
@@ -20,6 +20,10 @@ class Hosts extends Base {
|
||||
return this.http.get(`${this.baseUrl}${id}/all_groups/`, { params });
|
||||
}
|
||||
|
||||
readGroups(id, params) {
|
||||
return this.http.get(`${this.baseUrl}${id}/groups/`, { params });
|
||||
}
|
||||
|
||||
readGroupsOptions(id) {
|
||||
return this.http.options(`${this.baseUrl}${id}/groups/`);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
import React from 'react';
|
||||
import { arrayOf, bool, number, shape, string } from 'prop-types';
|
||||
|
||||
import { Label, LabelGroup } from '@patternfly/react-core';
|
||||
import { Link } from 'react-router-dom';
|
||||
|
||||
function InstanceGroupLabels({ labels, isLinkable }) {
|
||||
const buildLinkURL = (isContainerGroup) =>
|
||||
isContainerGroup
|
||||
? '/instance_groups/container_group/'
|
||||
: '/instance_groups/';
|
||||
return (
|
||||
<LabelGroup numLabels={5}>
|
||||
{labels.map(({ id, name, is_container_group }) =>
|
||||
isLinkable ? (
|
||||
<Label
|
||||
color="blue"
|
||||
key={id}
|
||||
render={({ className, content, componentRef }) => (
|
||||
<Link
|
||||
className={className}
|
||||
innerRef={componentRef}
|
||||
to={`${buildLinkURL(is_container_group)}${id}/details`}
|
||||
>
|
||||
{content}
|
||||
</Link>
|
||||
)}
|
||||
>
|
||||
{name}
|
||||
</Label>
|
||||
) : (
|
||||
<Label color="blue" key={id}>
|
||||
{name}
|
||||
</Label>
|
||||
)
|
||||
)}
|
||||
</LabelGroup>
|
||||
);
|
||||
}
|
||||
|
||||
InstanceGroupLabels.propTypes = {
|
||||
labels: arrayOf(shape({ id: number.isRequired, name: string.isRequired }))
|
||||
.isRequired,
|
||||
isLinkable: bool,
|
||||
};
|
||||
|
||||
InstanceGroupLabels.defaultProps = { isLinkable: false };
|
||||
|
||||
export default InstanceGroupLabels;
|
||||
1
awx/ui/src/components/InstanceGroupLabels/index.js
Normal file
1
awx/ui/src/components/InstanceGroupLabels/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { default } from './InstanceGroupLabels';
|
||||
@@ -153,6 +153,10 @@ function CredentialsStep({
|
||||
}))}
|
||||
value={selectedType && selectedType.id}
|
||||
onChange={(e, id) => {
|
||||
// Reset query params when the category of credentials is changed
|
||||
history.replace({
|
||||
search: '',
|
||||
});
|
||||
setSelectedType(types.find((o) => o.id === parseInt(id, 10)));
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -3,6 +3,7 @@ import { act } from 'react-dom/test-utils';
|
||||
import { Formik } from 'formik';
|
||||
import { CredentialsAPI, CredentialTypesAPI } from 'api';
|
||||
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import CredentialsStep from './CredentialsStep';
|
||||
|
||||
jest.mock('../../../api/models/CredentialTypes');
|
||||
@@ -164,6 +165,41 @@ describe('CredentialsStep', () => {
|
||||
});
|
||||
});
|
||||
|
||||
test('should reset query params (credential.page) when selected credential type is changed', async () => {
|
||||
let wrapper;
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['?credential.page=2'],
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<CredentialsStep allowCredentialsWithPasswords />
|
||||
</Formik>,
|
||||
{
|
||||
context: { router: { history } },
|
||||
}
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 1,
|
||||
order_by: 'name',
|
||||
page: 2,
|
||||
page_size: 5,
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
wrapper.find('AnsibleSelect').invoke('onChange')({}, 3);
|
||||
});
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 3,
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
});
|
||||
});
|
||||
|
||||
test("error should be shown when a credential that prompts for passwords is selected on a step that doesn't allow it", async () => {
|
||||
let wrapper;
|
||||
await act(async () => {
|
||||
|
||||
@@ -173,6 +173,10 @@ function MultiCredentialsLookup({
|
||||
}))}
|
||||
value={selectedType && selectedType.id}
|
||||
onChange={(e, id) => {
|
||||
// Reset query params when the category of credentials is changed
|
||||
history.replace({
|
||||
search: '',
|
||||
});
|
||||
setSelectedType(
|
||||
credentialTypes.find((o) => o.id === parseInt(id, 10))
|
||||
);
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
mountWithContexts,
|
||||
waitForElement,
|
||||
} from '../../../testUtils/enzymeHelpers';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import MultiCredentialsLookup from './MultiCredentialsLookup';
|
||||
|
||||
jest.mock('../../api');
|
||||
@@ -228,6 +229,53 @@ describe('<Formik><MultiCredentialsLookup /></Formik>', () => {
|
||||
]);
|
||||
});
|
||||
|
||||
test('should reset query params (credentials.page) when selected credential type is changed', async () => {
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['?credentials.page=2'],
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<MultiCredentialsLookup
|
||||
value={credentials}
|
||||
tooltip="This is credentials look up"
|
||||
onChange={() => {}}
|
||||
onError={() => {}}
|
||||
/>
|
||||
</Formik>,
|
||||
{
|
||||
context: { router: { history } },
|
||||
}
|
||||
);
|
||||
});
|
||||
const searchButton = await waitForElement(
|
||||
wrapper,
|
||||
'Button[aria-label="Search"]'
|
||||
);
|
||||
await act(async () => {
|
||||
searchButton.invoke('onClick')();
|
||||
});
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 400,
|
||||
order_by: 'name',
|
||||
page: 2,
|
||||
page_size: 5,
|
||||
});
|
||||
|
||||
const select = await waitForElement(wrapper, 'AnsibleSelect');
|
||||
await act(async () => {
|
||||
select.invoke('onChange')({}, 500);
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 500,
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
});
|
||||
});
|
||||
|
||||
test('should only add 1 credential per credential type except vault(see below)', async () => {
|
||||
const onChange = jest.fn();
|
||||
await act(async () => {
|
||||
|
||||
@@ -6,6 +6,7 @@ import { Link } from 'react-router-dom';
|
||||
import styled from 'styled-components';
|
||||
import { Chip, Divider, Title } from '@patternfly/react-core';
|
||||
import { toTitleCase } from 'util/strings';
|
||||
import InstanceGroupLabels from 'components/InstanceGroupLabels';
|
||||
import CredentialChip from '../CredentialChip';
|
||||
import ChipGroup from '../ChipGroup';
|
||||
import { DetailList, Detail, UserDateDetail } from '../DetailList';
|
||||
@@ -227,21 +228,7 @@ function PromptDetail({
|
||||
label={t`Instance Groups`}
|
||||
rows={4}
|
||||
value={
|
||||
<ChipGroup
|
||||
numChips={5}
|
||||
totalChips={overrides.instance_groups.length}
|
||||
ouiaId="prompt-instance-groups-chips"
|
||||
>
|
||||
{overrides.instance_groups.map((instance_group) => (
|
||||
<Chip
|
||||
key={instance_group.id}
|
||||
ouiaId={`instance-group-${instance_group.id}-chip`}
|
||||
isReadOnly
|
||||
>
|
||||
{instance_group.name}
|
||||
</Chip>
|
||||
))}
|
||||
</ChipGroup>
|
||||
<InstanceGroupLabels labels={overrides.instance_groups} />
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -34,8 +34,14 @@ const QS_CONFIG = getQSConfig('template', {
|
||||
order_by: 'name',
|
||||
});
|
||||
|
||||
function RelatedTemplateList({ searchParams, projectName = null }) {
|
||||
const { id: projectId } = useParams();
|
||||
const resources = {
|
||||
projects: 'project',
|
||||
inventories: 'inventory',
|
||||
credentials: 'credentials',
|
||||
};
|
||||
|
||||
function RelatedTemplateList({ searchParams, resourceName = null }) {
|
||||
const { id } = useParams();
|
||||
const location = useLocation();
|
||||
const { addToast, Toast, toastProps } = useToast();
|
||||
|
||||
@@ -129,12 +135,19 @@ function RelatedTemplateList({ searchParams, projectName = null }) {
|
||||
actions && Object.prototype.hasOwnProperty.call(actions, 'POST');
|
||||
|
||||
let linkTo = '';
|
||||
|
||||
if (projectName) {
|
||||
const qs = encodeQueryString({
|
||||
project_id: projectId,
|
||||
project_name: projectName,
|
||||
});
|
||||
if (resourceName) {
|
||||
const queryString = {
|
||||
resource_id: id,
|
||||
resource_name: resourceName,
|
||||
resource_type: resources[location.pathname.split('/')[1]],
|
||||
resource_kind: null,
|
||||
};
|
||||
if (Array.isArray(resourceName)) {
|
||||
const [name, kind] = resourceName;
|
||||
queryString.resource_name = name;
|
||||
queryString.resource_kind = kind;
|
||||
}
|
||||
const qs = encodeQueryString(queryString);
|
||||
linkTo = `/templates/job_template/add/?${qs}`;
|
||||
} else {
|
||||
linkTo = '/templates/job_template/add';
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
/* eslint-disable import/prefer-default-export */
|
||||
@@ -10,6 +10,7 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api';
|
||||
import { parseVariableField, jsonToYaml } from 'util/yaml';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import InstanceGroupLabels from 'components/InstanceGroupLabels';
|
||||
import parseRuleObj from '../shared/parseRuleObj';
|
||||
import FrequencyDetails from './FrequencyDetails';
|
||||
import AlertModal from '../../AlertModal';
|
||||
@@ -27,11 +28,6 @@ import { VariablesDetail } from '../../CodeEditor';
|
||||
import { VERBOSITY } from '../../VerbositySelectField';
|
||||
import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext';
|
||||
|
||||
const buildLinkURL = (instance) =>
|
||||
instance.is_container_group
|
||||
? '/instance_groups/container_group/'
|
||||
: '/instance_groups/';
|
||||
|
||||
const PromptDivider = styled(Divider)`
|
||||
margin-top: var(--pf-global--spacer--lg);
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
@@ -498,26 +494,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
|
||||
fullWidth
|
||||
label={t`Instance Groups`}
|
||||
value={
|
||||
<ChipGroup
|
||||
numChips={5}
|
||||
totalChips={instanceGroups.length}
|
||||
ouiaId="instance-group-chips"
|
||||
>
|
||||
{instanceGroups.map((ig) => (
|
||||
<Link
|
||||
to={`${buildLinkURL(ig)}${ig.id}/details`}
|
||||
key={ig.id}
|
||||
>
|
||||
<Chip
|
||||
key={ig.id}
|
||||
ouiaId={`instance-group-${ig.id}-chip`}
|
||||
isReadOnly
|
||||
>
|
||||
{ig.name}
|
||||
</Chip>
|
||||
</Link>
|
||||
))}
|
||||
</ChipGroup>
|
||||
<InstanceGroupLabels labels={instanceGroups} isLinkable />
|
||||
}
|
||||
isEmpty={instanceGroups.length === 0}
|
||||
/>
|
||||
|
||||
@@ -420,7 +420,7 @@ describe('<AdvancedSearch />', () => {
|
||||
const selectOptions = wrapper.find(
|
||||
'Select[aria-label="Related search type"] SelectOption'
|
||||
);
|
||||
expect(selectOptions).toHaveLength(2);
|
||||
expect(selectOptions).toHaveLength(3);
|
||||
expect(
|
||||
selectOptions.find('SelectOption[id="name-option-select"]').prop('value')
|
||||
).toBe('name__icontains');
|
||||
|
||||
@@ -31,6 +31,12 @@ function RelatedLookupTypeInput({
|
||||
value="name__icontains"
|
||||
description={t`Fuzzy search on name field.`}
|
||||
/>
|
||||
<SelectOption
|
||||
id="name-exact-option-select"
|
||||
key="name"
|
||||
value="name"
|
||||
description={t`Exact search on name field.`}
|
||||
/>
|
||||
<SelectOption
|
||||
id="id-option-select"
|
||||
key="id"
|
||||
|
||||
@@ -24,12 +24,10 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
|
||||
const { id } = useParams();
|
||||
|
||||
const relevantResults = relatedJobs.filter(
|
||||
({
|
||||
job: jobId,
|
||||
summary_fields: {
|
||||
unified_job_template: { unified_job_type },
|
||||
},
|
||||
}) => jobId && `${jobId}` !== id && unified_job_type !== 'workflow_approval'
|
||||
({ job: jobId, summary_fields }) =>
|
||||
jobId &&
|
||||
`${jobId}` !== id &&
|
||||
summary_fields.job.type !== 'workflow_approval'
|
||||
);
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
@@ -101,16 +99,14 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
|
||||
{sortedJobs?.map((node) => (
|
||||
<SelectOption
|
||||
key={node.id}
|
||||
to={`/jobs/${
|
||||
JOB_URL_SEGMENT_MAP[
|
||||
node.summary_fields.unified_job_template.unified_job_type
|
||||
]
|
||||
}/${node.summary_fields.job?.id}/output`}
|
||||
to={`/jobs/${JOB_URL_SEGMENT_MAP[node.summary_fields.job.type]}/${
|
||||
node.summary_fields.job?.id
|
||||
}/output`}
|
||||
component={Link}
|
||||
value={node.summary_fields.unified_job_template.name}
|
||||
value={node.summary_fields.job.name}
|
||||
>
|
||||
{stringIsUUID(node.identifier)
|
||||
? node.summary_fields.unified_job_template.name
|
||||
? node.summary_fields.job.name
|
||||
: node.identifier}
|
||||
</SelectOption>
|
||||
))}
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
import React from 'react';
|
||||
import { within, render, screen, waitFor } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import WorkflowOutputNavigation from './WorkflowOutputNavigation';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import { I18nProvider } from '@lingui/react';
|
||||
import { i18n } from '@lingui/core';
|
||||
import { en } from 'make-plural/plurals';
|
||||
import english from '../../../src/locales/en/messages';
|
||||
import { Router } from 'react-router-dom';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useParams: () => ({
|
||||
id: 1,
|
||||
}),
|
||||
}));
|
||||
const jobs = [
|
||||
{
|
||||
id: 1,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Ansible',
|
||||
type: 'project_update',
|
||||
id: 1,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 4,
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Durham',
|
||||
type: 'job',
|
||||
id: 2,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 3,
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Red hat',
|
||||
type: 'job',
|
||||
id: 3,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 2,
|
||||
},
|
||||
];
|
||||
|
||||
describe('<WorkflowOuputNavigation/>', () => {
|
||||
test('Should open modal and deprovision node', async () => {
|
||||
i18n.loadLocaleData({ en: { plurals: en } });
|
||||
i18n.load({ en: english });
|
||||
i18n.activate('en');
|
||||
const user = userEvent.setup();
|
||||
const ref = jest
|
||||
.spyOn(React, 'useRef')
|
||||
.mockReturnValueOnce({ current: 'div' });
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['jobs/playbook/2/output'],
|
||||
});
|
||||
render(
|
||||
<I18nProvider i18n={i18n}>
|
||||
<Router history={history}>
|
||||
<WorkflowOutputNavigation relatedJobs={jobs} parentRef={ref} />
|
||||
</Router>
|
||||
</I18nProvider>
|
||||
);
|
||||
|
||||
const button = screen.getByRole('button');
|
||||
await user.click(button);
|
||||
|
||||
await waitFor(() => screen.getByText('Workflow Nodes'));
|
||||
await waitFor(() => screen.getByText('Red hat'));
|
||||
await waitFor(() => screen.getByText('Durham'));
|
||||
await waitFor(() => screen.getByText('Ansible'));
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user