mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 20:44:45 -03:30
Compare commits
210 Commits
12824-Inst
...
13089-Sche
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
26a947ed31 | ||
|
|
b99a434dee | ||
|
|
6cee99a9f9 | ||
|
|
ee509aea56 | ||
|
|
b5452a48f8 | ||
|
|
0c980fa7d5 | ||
|
|
e34ce8c795 | ||
|
|
3543644e0e | ||
|
|
36c0d07b30 | ||
|
|
239827a9cf | ||
|
|
ac9871b36f | ||
|
|
f739908ccf | ||
|
|
cf1ec07eab | ||
|
|
d968b648de | ||
|
|
5dd0eab806 | ||
|
|
41f3f381ec | ||
|
|
ac8cff75ce | ||
|
|
94b34b801c | ||
|
|
8f6849fc22 | ||
|
|
821b1701bf | ||
|
|
b7f2825909 | ||
|
|
e87e041a2a | ||
|
|
cc336e791c | ||
|
|
c2a3c3b285 | ||
|
|
7b8dcc98e7 | ||
|
|
d5011492bf | ||
|
|
e363ddf470 | ||
|
|
987709cdb3 | ||
|
|
f04ac3c798 | ||
|
|
71a6baccdb | ||
|
|
d07076b686 | ||
|
|
7129f3e8cd | ||
|
|
df61a5cea1 | ||
|
|
a4b950f79b | ||
|
|
8be739d255 | ||
|
|
ca54195099 | ||
|
|
f0fcfdde39 | ||
|
|
80b1ba4a35 | ||
|
|
51f8e362dc | ||
|
|
737d6d8c8b | ||
|
|
beaf6b6058 | ||
|
|
aad1fbcef8 | ||
|
|
0b96d617ac | ||
|
|
fe768a159b | ||
|
|
c1ebea858b | ||
|
|
da9b8135e8 | ||
|
|
76cecf3f6b | ||
|
|
7b2938f515 | ||
|
|
916b5642d2 | ||
|
|
e524d3df3e | ||
|
|
01e9a611ea | ||
|
|
ef29589940 | ||
|
|
cec2d2dfb9 | ||
|
|
15b7ad3570 | ||
|
|
36ff9cbc6d | ||
|
|
ed74d80ecb | ||
|
|
a0b8215c06 | ||
|
|
f88b993b18 | ||
|
|
4a7f4d0ed4 | ||
|
|
6e08c3567f | ||
|
|
adbcb5c5e4 | ||
|
|
8054c6aedc | ||
|
|
58734a33c4 | ||
|
|
2832f28014 | ||
|
|
e5057691ee | ||
|
|
a0cfd8501c | ||
|
|
99b643bd77 | ||
|
|
305b39d8e5 | ||
|
|
bb047baeba | ||
|
|
9637aad37e | ||
|
|
fbc06ec623 | ||
|
|
57430afc55 | ||
|
|
7aae7e8ed4 | ||
|
|
a67d107a58 | ||
|
|
642003e207 | ||
|
|
ec7e2284df | ||
|
|
ff7facdfa2 | ||
|
|
6df4e62132 | ||
|
|
6289bfb639 | ||
|
|
95e4b2064f | ||
|
|
48eba60be4 | ||
|
|
c7efa8b4e0 | ||
|
|
657b5cb1aa | ||
|
|
06daebbecf | ||
|
|
fb37f22bf4 | ||
|
|
71f326b705 | ||
|
|
6508ab4a33 | ||
|
|
bf871bd427 | ||
|
|
e403c603d6 | ||
|
|
4b7b3c7c7d | ||
|
|
1cdd2cad67 | ||
|
|
86856f242a | ||
|
|
65c3db8cb8 | ||
|
|
7fa9dcbc2a | ||
|
|
7cfb957de3 | ||
|
|
d0d467e863 | ||
|
|
eaccf32aa3 | ||
|
|
a8fdb22ab3 | ||
|
|
ae79f94a48 | ||
|
|
40499a4084 | ||
|
|
b36fa93005 | ||
|
|
8839b4e90b | ||
|
|
7866135d6c | ||
|
|
fe48dc412f | ||
|
|
3a25c4221f | ||
|
|
7e1be3ef94 | ||
|
|
b2f8ca09ba | ||
|
|
c7692f5c56 | ||
|
|
3b24afa7f2 | ||
|
|
2b3f3e2043 | ||
|
|
68614b83c0 | ||
|
|
a1edc75c11 | ||
|
|
4b0e7a5cde | ||
|
|
01c6ac1b14 | ||
|
|
f0481d0a60 | ||
|
|
fd2a8b8531 | ||
|
|
239959a4c9 | ||
|
|
84f2b91105 | ||
|
|
9d7b249b20 | ||
|
|
5bd15dd48d | ||
|
|
d03348c6e4 | ||
|
|
5faeff6bec | ||
|
|
b94a126c02 | ||
|
|
eedd146643 | ||
|
|
d30c5ca9cd | ||
|
|
a3b21b261c | ||
|
|
d1d60c9ef1 | ||
|
|
925e055bb3 | ||
|
|
9f40d7a05c | ||
|
|
d34f6af830 | ||
|
|
163ccfd410 | ||
|
|
968c316c0c | ||
|
|
2fdce43f9e | ||
|
|
fa305a7bfa | ||
|
|
0933a96d60 | ||
|
|
8b9db837ca | ||
|
|
1106367962 | ||
|
|
721e19e1c8 | ||
|
|
f9bb26ad33 | ||
|
|
87363af615 | ||
|
|
332c433b6e | ||
|
|
e029cf7196 | ||
|
|
a1d34462b0 | ||
|
|
e4283841d6 | ||
|
|
477a63d1b4 | ||
|
|
4a30cc244f | ||
|
|
271613b86d | ||
|
|
1f939aa25e | ||
|
|
ac57f5cb28 | ||
|
|
86b0a3d4f1 | ||
|
|
b269ed48ee | ||
|
|
fe1b37afaf | ||
|
|
c39172f516 | ||
|
|
87dd8c118d | ||
|
|
d6004fd2d3 | ||
|
|
3d3e4ad150 | ||
|
|
81821fd378 | ||
|
|
9b047c2af6 | ||
|
|
f0d6bc0dc8 | ||
|
|
8e5af2b5f2 | ||
|
|
918db89dc8 | ||
|
|
7590301ae7 | ||
|
|
6e25a552d3 | ||
|
|
0db75fdbfd | ||
|
|
83c48bb5fa | ||
|
|
1c65339a24 | ||
|
|
75e6366c5e | ||
|
|
af6fec5592 | ||
|
|
893dba7076 | ||
|
|
d571b9bbbc | ||
|
|
b28cc34ff3 | ||
|
|
776d39f057 | ||
|
|
61b242d194 | ||
|
|
22b81f5dd3 | ||
|
|
99e1920d42 | ||
|
|
2218fd5c25 | ||
|
|
3c656842f0 | ||
|
|
bd7635e74e | ||
|
|
0faa999ceb | ||
|
|
1bedf32baf | ||
|
|
577f102e53 | ||
|
|
c5cf39abb7 | ||
|
|
6b315f39de | ||
|
|
529a936d0a | ||
|
|
6538d34b48 | ||
|
|
e40824bded | ||
|
|
ed318ea784 | ||
|
|
d2b69e05f6 | ||
|
|
b57ae592ed | ||
|
|
e22f887765 | ||
|
|
fc838ba44b | ||
|
|
b19aa4a88d | ||
|
|
eba24db74c | ||
|
|
153a197fad | ||
|
|
8f4c329c2a | ||
|
|
368eb46f5b | ||
|
|
d6fea77082 | ||
|
|
878035c13b | ||
|
|
2cc971a43f | ||
|
|
9d77c54612 | ||
|
|
ef651a3a21 | ||
|
|
aaf6f5f17e | ||
|
|
3303f7bfcf | ||
|
|
41fd6ea37f | ||
|
|
4808a0053f | ||
|
|
de41601f27 | ||
|
|
487efb77ce | ||
|
|
aae57378f0 | ||
|
|
cfce31419d | ||
|
|
8e83f9b134 |
10
.github/triage_replies.md
vendored
10
.github/triage_replies.md
vendored
@@ -53,6 +53,16 @@ https://github.com/ansible/awx/#get-involved \
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### Red Hat Support Team
|
||||
- Hi! \
|
||||
\
|
||||
It appears that you are using an RPM build for RHEL. Please reach out to the Red Hat support team and submit a ticket. \
|
||||
\
|
||||
Here is the link to do so: \
|
||||
\
|
||||
https://access.redhat.com/support \
|
||||
\
|
||||
Thank you for your submission and for supporting AWX!
|
||||
|
||||
|
||||
## Common
|
||||
|
||||
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@@ -2,6 +2,7 @@
|
||||
name: CI
|
||||
env:
|
||||
BRANCH: ${{ github.base_ref || 'devel' }}
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
@@ -144,3 +145,22 @@ jobs:
|
||||
env:
|
||||
AWX_TEST_IMAGE: awx
|
||||
AWX_TEST_VERSION: ci
|
||||
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Run sanity tests
|
||||
run: make test_collection_sanity
|
||||
env:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
2
.github/workflows/devel_images.yml
vendored
2
.github/workflows/devel_images.yml
vendored
@@ -1,5 +1,7 @@
|
||||
---
|
||||
name: Build/Push Development Images
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
||||
7
.github/workflows/e2e_test.yml
vendored
7
.github/workflows/e2e_test.yml
vendored
@@ -1,9 +1,12 @@
|
||||
---
|
||||
name: E2E Tests
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
jobs:
|
||||
e2e-test:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'qe:e2e')
|
||||
runs-on: ubuntu-latest
|
||||
@@ -104,5 +107,3 @@ jobs:
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
|
||||
|
||||
|
||||
26
.github/workflows/feature_branch_deletion.yml
vendored
Normal file
26
.github/workflows/feature_branch_deletion.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Feature branch deletion cleanup
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
delete:
|
||||
branches:
|
||||
- feature_**
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Delete API Schema
|
||||
env:
|
||||
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
|
||||
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
|
||||
AWS_REGION: 'us-east-1'
|
||||
run: |
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"
|
||||
|
||||
|
||||
18
.github/workflows/pr_body_check.yml
vendored
18
.github/workflows/pr_body_check.yml
vendored
@@ -13,21 +13,13 @@ jobs:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Write PR body to a file
|
||||
run: |
|
||||
cat >> pr.body << __SOME_RANDOM_PR_EOF__
|
||||
${{ github.event.pull_request.body }}
|
||||
__SOME_RANDOM_PR_EOF__
|
||||
|
||||
- name: Display the received body for troubleshooting
|
||||
run: cat pr.body
|
||||
|
||||
# We want to write these out individually just incase the options were joined on a single line
|
||||
- name: Check for each of the lines
|
||||
env:
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
grep "Bug, Docs Fix or other nominal change" pr.body > Z
|
||||
grep "New or Enhanced Feature" pr.body > Y
|
||||
grep "Breaking Change" pr.body > X
|
||||
echo $PR_BODY | grep "Bug, Docs Fix or other nominal change" > Z
|
||||
echo $PR_BODY | grep "New or Enhanced Feature" > Y
|
||||
echo $PR_BODY | grep "Breaking Change" > X
|
||||
exit 0
|
||||
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
||||
|
||||
4
.github/workflows/promote.yml
vendored
4
.github/workflows/promote.yml
vendored
@@ -1,5 +1,9 @@
|
||||
---
|
||||
name: Promote Release
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
4
.github/workflows/stage.yml
vendored
4
.github/workflows/stage.yml
vendored
@@ -1,5 +1,9 @@
|
||||
---
|
||||
name: Stage Release
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
||||
5
.github/workflows/upload_schema.yml
vendored
5
.github/workflows/upload_schema.yml
vendored
@@ -1,10 +1,15 @@
|
||||
---
|
||||
name: Upload API Schema
|
||||
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
- release_**
|
||||
- feature_**
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -12,7 +12,7 @@ recursive-include awx/plugins *.ps1
|
||||
recursive-include requirements *.txt
|
||||
recursive-include requirements *.yml
|
||||
recursive-include config *
|
||||
recursive-include docs/licenses *
|
||||
recursive-include licenses *
|
||||
recursive-exclude awx devonly.py*
|
||||
recursive-exclude awx/api/tests *
|
||||
recursive-exclude awx/main/tests *
|
||||
|
||||
67
Makefile
67
Makefile
@@ -6,7 +6,20 @@ CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
|
||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
# args for the ansible-test sanity command
|
||||
COLLECTION_SANITY_ARGS ?= --docker
|
||||
# collection unit testing directories
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
# collection integration test directories (defaults to all)
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
# args for collection install
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -34,7 +47,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 setuptools_scm[toml]==6.4.2 wheel==0.36.2
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -85,6 +98,7 @@ clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
|
||||
clean-api:
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
rm -rf .tox
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
rm -f awx/awx_test.sqlite3*
|
||||
@@ -117,7 +131,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -181,7 +195,7 @@ collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
||||
|
||||
@@ -287,19 +301,13 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi && \
|
||||
pip install ansible-core && \
|
||||
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
@@ -329,8 +337,13 @@ install_collection: build_collection
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
test_collection_sanity:
|
||||
rm -rf awx_collection_build/
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
@@ -377,6 +390,8 @@ clean-ui:
|
||||
rm -rf awx/ui/build
|
||||
rm -rf awx/ui/src/locales/_build
|
||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||
# the collectstatic command doesn't like it if this dir doesn't exist.
|
||||
mkdir -p awx/ui/build/static
|
||||
|
||||
awx/ui/node_modules:
|
||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci
|
||||
@@ -386,20 +401,18 @@ $(UI_BUILD_FLAG_FILE):
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||
mkdir -p awx/public/static/css
|
||||
mkdir -p awx/public/static/js
|
||||
mkdir -p awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* awx/public/static/media
|
||||
touch $@
|
||||
|
||||
|
||||
|
||||
ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@@ -451,8 +464,9 @@ awx/projects:
|
||||
COMPOSE_UP_OPTS ?=
|
||||
COMPOSE_OPTS ?=
|
||||
CONTROL_PLANE_NODE_COUNT ?= 1
|
||||
EXECUTION_NODE_COUNT ?= 2
|
||||
EXECUTION_NODE_COUNT ?= 0
|
||||
MINIKUBE_CONTAINER_GROUP ?= false
|
||||
MINIKUBE_SETUP ?= false # if false, run minikube separately
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS ?=
|
||||
|
||||
ifneq ($(ADMIN_PASSWORD),)
|
||||
@@ -461,7 +475,7 @@ endif
|
||||
|
||||
docker-compose-sources: .git/hooks/pre-commit
|
||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose-minikube/deploy.yml; \
|
||||
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
fi;
|
||||
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
@@ -591,13 +605,12 @@ pot: $(UI_BUILD_FLAG_FILE)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
|
||||
LANG = "en_us"
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
print-%:
|
||||
@echo $($*)
|
||||
@@ -635,4 +648,4 @@ help/generate:
|
||||
} \
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
@printf "\n"
|
||||
|
||||
@@ -113,7 +113,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -2221,6 +2221,15 @@ class InventorySourceUpdateSerializer(InventorySourceSerializer):
|
||||
class Meta:
|
||||
fields = ('can_update',)
|
||||
|
||||
def validate(self, attrs):
|
||||
project = self.instance.source_project
|
||||
if project:
|
||||
failed_reason = project.get_reason_if_failed()
|
||||
if failed_reason:
|
||||
raise serializers.ValidationError(failed_reason)
|
||||
|
||||
return super(InventorySourceUpdateSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
|
||||
|
||||
@@ -4272,17 +4281,10 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
# Basic validation - cannot run a playbook without a playbook
|
||||
if not template.project:
|
||||
errors['project'] = _("A project is required to run a job.")
|
||||
elif template.project.status in ('error', 'failed'):
|
||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||
|
||||
latest_update = template.project.project_updates.last()
|
||||
if latest_update is not None and latest_update.failed:
|
||||
failed_validation_tasks = latest_update.project_update_events.filter(
|
||||
event='runner_on_failed',
|
||||
play="Perform project signature/checksum verification",
|
||||
)
|
||||
if failed_validation_tasks:
|
||||
errors['playbook'] = _("Last project update failed due to signature validation failure.")
|
||||
else:
|
||||
failure_reason = template.project.get_reason_if_failed()
|
||||
if failure_reason:
|
||||
errors['playbook'] = failure_reason
|
||||
|
||||
# cannot run a playbook without an inventory
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
@@ -4952,7 +4954,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
if obj.node_type != 'hop':
|
||||
if obj.node_type == 'execution':
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
@@ -5038,12 +5040,10 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
capacity = serializers.SerializerMethodField()
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.IntegerField(
|
||||
help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance group'), read_only=True
|
||||
)
|
||||
jobs_running = serializers.SerializerMethodField()
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance group'), read_only=True)
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_container_group = serializers.BooleanField(
|
||||
@@ -5069,6 +5069,22 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
label=_('Policy Instance Minimum'),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
|
||||
)
|
||||
max_concurrent_jobs = serializers.IntegerField(
|
||||
default=0,
|
||||
min_value=0,
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Max Concurrent Jobs'),
|
||||
help_text=_("Maximum number of concurrent jobs to run on a group. When set to zero, no maximum is enforced."),
|
||||
)
|
||||
max_forks = serializers.IntegerField(
|
||||
default=0,
|
||||
min_value=0,
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Max Forks'),
|
||||
help_text=_("Maximum number of forks to execute concurrently on a group. When set to zero, no maximum is enforced."),
|
||||
)
|
||||
policy_instance_list = serializers.ListField(
|
||||
child=serializers.CharField(),
|
||||
required=False,
|
||||
@@ -5090,6 +5106,8 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
"consumed_capacity",
|
||||
"percent_capacity_remaining",
|
||||
"jobs_running",
|
||||
"max_concurrent_jobs",
|
||||
"max_forks",
|
||||
"jobs_total",
|
||||
"instances",
|
||||
"is_container_group",
|
||||
@@ -5171,28 +5189,39 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'task_manager_igs' not in self.context:
|
||||
instance_groups_queryset = None
|
||||
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
|
||||
if self.parent: # Is ListView:
|
||||
instance_groups_queryset = self.parent.instance
|
||||
|
||||
instances = TaskManagerInstances(jobs_qs)
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'],
|
||||
instance_groups_queryset=instance_groups_queryset,
|
||||
)
|
||||
|
||||
self.context['task_manager_igs'] = instance_groups
|
||||
self.context['task_manager_igs'] = tm_models.instance_groups
|
||||
return self.context['task_manager_igs']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_consumed_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity:
|
||||
return 0.0
|
||||
def get_capacity(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
|
||||
return ig_mgr.get_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
capacity = self.get_capacity(obj)
|
||||
if not capacity:
|
||||
return 0.0
|
||||
consumed_capacity = self.get_consumed_capacity(obj)
|
||||
return float("{0:.2f}".format(((float(capacity) - float(consumed_capacity)) / (float(capacity))) * 100))
|
||||
|
||||
def get_instances(self, obj):
|
||||
return obj.instances.count()
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return len(ig_mgr.get_instances(obj.name))
|
||||
|
||||
def get_jobs_running(self, obj):
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_jobs_running(obj.name)
|
||||
|
||||
|
||||
class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Launch a Job Template:
|
||||
|
||||
{% ifmeth GET %}
|
||||
Make a GET request to this resource to determine if the job_template can be
|
||||
launched and whether any passwords are required to launch the job_template.
|
||||
The response will include the following fields:
|
||||
@@ -29,8 +29,8 @@ The response will include the following fields:
|
||||
* `inventory_needed_to_start`: Flag indicating the presence of an inventory
|
||||
associated with the job template. If not then one should be supplied when
|
||||
launching the job (boolean, read-only)
|
||||
|
||||
Make a POST request to this resource to launch the job_template. If any
|
||||
{% endifmeth %}
|
||||
{% ifmeth POST %}Make a POST request to this resource to launch the job_template. If any
|
||||
passwords, inventory, or extra variables (extra_vars) are required, they must
|
||||
be passed via POST data, with extra_vars given as a YAML or JSON string and
|
||||
escaped parentheses. If the `inventory_needed_to_start` is `True` then the
|
||||
@@ -41,3 +41,4 @@ are not provided, a 400 status code will be returned. If the job cannot be
|
||||
launched, a 405 status code will be returned. If the provided credential or
|
||||
inventory are not allowed to be used by the user, then a 403 status code will
|
||||
be returned.
|
||||
{% endifmeth %}
|
||||
@@ -5,6 +5,7 @@
|
||||
import dateutil
|
||||
import functools
|
||||
import html
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import requests
|
||||
@@ -20,9 +21,10 @@ from urllib3.exceptions import ConnectTimeoutError
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Sum
|
||||
from django.db.models import Q, Sum, Count
|
||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||
from django.db.models.fields.related import ManyToManyField, ForeignKey
|
||||
from django.db.models.functions import Trunc
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
@@ -47,9 +49,6 @@ from rest_framework import status
|
||||
from rest_framework_yaml.parsers import YAMLParser
|
||||
from rest_framework_yaml.renderers import YAMLRenderer
|
||||
|
||||
# QSStats
|
||||
import qsstats
|
||||
|
||||
# ANSIConv
|
||||
import ansiconv
|
||||
|
||||
@@ -283,30 +282,50 @@ class DashboardJobsGraphView(APIView):
|
||||
success_query = success_query.filter(instance_of=models.ProjectUpdate)
|
||||
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
|
||||
|
||||
success_qss = qsstats.QuerySetStats(success_query, 'finished')
|
||||
failed_qss = qsstats.QuerySetStats(failed_query, 'finished')
|
||||
|
||||
start_date = now()
|
||||
end = now()
|
||||
interval = 'day'
|
||||
if period == 'month':
|
||||
end_date = start_date - dateutil.relativedelta.relativedelta(months=1)
|
||||
interval = 'days'
|
||||
start = end - dateutil.relativedelta.relativedelta(months=1)
|
||||
elif period == 'two_weeks':
|
||||
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=2)
|
||||
interval = 'days'
|
||||
start = end - dateutil.relativedelta.relativedelta(weeks=2)
|
||||
elif period == 'week':
|
||||
end_date = start_date - dateutil.relativedelta.relativedelta(weeks=1)
|
||||
interval = 'days'
|
||||
start = end - dateutil.relativedelta.relativedelta(weeks=1)
|
||||
elif period == 'day':
|
||||
end_date = start_date - dateutil.relativedelta.relativedelta(days=1)
|
||||
interval = 'hours'
|
||||
start = end - dateutil.relativedelta.relativedelta(days=1)
|
||||
interval = 'hour'
|
||||
else:
|
||||
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
dashboard_data = {"jobs": {"successful": [], "failed": []}}
|
||||
for element in success_qss.time_series(end_date, start_date, interval=interval):
|
||||
dashboard_data['jobs']['successful'].append([time.mktime(element[0].timetuple()), element[1]])
|
||||
for element in failed_qss.time_series(end_date, start_date, interval=interval):
|
||||
dashboard_data['jobs']['failed'].append([time.mktime(element[0].timetuple()), element[1]])
|
||||
|
||||
succ_list = dashboard_data['jobs']['successful']
|
||||
fail_list = dashboard_data['jobs']['failed']
|
||||
|
||||
qs_s = (
|
||||
success_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_s = {item['d']: item['agg'] for item in qs_s}
|
||||
qs_f = (
|
||||
failed_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_f = {item['d']: item['agg'] for item in qs_f}
|
||||
|
||||
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
for d in itertools.count():
|
||||
date = start_date + dateutil.relativedelta.relativedelta(days=d)
|
||||
if date > end:
|
||||
break
|
||||
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
|
||||
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
|
||||
|
||||
return Response(dashboard_data)
|
||||
|
||||
|
||||
@@ -325,6 +344,13 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
@@ -392,8 +418,8 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(node_type='execution')
|
||||
# FIXME: For now, we don't have a good way of checking the health of a hop node.
|
||||
return super().get_queryset().exclude(node_type='hop')
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -413,9 +439,10 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
|
||||
execution_node_health_check.apply_async([obj.hostname])
|
||||
else:
|
||||
from awx.main.tasks.system import cluster_node_health_check
|
||||
|
||||
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
|
||||
return Response(
|
||||
{"error": f"Cannot run a health check on instances of type {obj.node_type}. Health checks can only be run on execution nodes."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@@ -2220,6 +2247,8 @@ class InventorySourceUpdateView(RetrieveAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
serializer = self.get_serializer(instance=obj, data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
if obj.can_update:
|
||||
update = obj.update()
|
||||
if not update:
|
||||
|
||||
@@ -16,7 +16,7 @@ from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.ha import Instance, InstanceGroup, schedule_policy_task
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.inventory import Inventory
|
||||
@@ -107,6 +107,11 @@ class InstanceGroupMembershipMixin(object):
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
|
||||
# sometimes removing an instance has a non-obvious consequence
|
||||
# this is almost always true if policy_instance_percentage or _minimum is non-zero
|
||||
# after removing a single instance, the other memberships need to be re-balanced
|
||||
schedule_policy_task()
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@@ -6237,4 +6237,5 @@ msgstr "%s se está actualizando."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
|
||||
@@ -721,7 +721,7 @@ msgstr "DTSTART valide obligatoire dans rrule. La valeur doit commencer par : DT
|
||||
#: awx/api/serializers.py:4657
|
||||
msgid ""
|
||||
"DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ."
|
||||
msgstr "DTSTART ne peut correspondre à une DateHeure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
|
||||
msgstr "DTSTART ne peut correspondre à une date-heure naïve. Spécifier ;TZINFO= ou YYYYMMDDTHHMMSSZZ."
|
||||
|
||||
#: awx/api/serializers.py:4659
|
||||
msgid "Multiple DTSTART is not supported."
|
||||
@@ -6239,4 +6239,5 @@ msgstr "%s est en cours de mise à niveau."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Cette page sera rafraîchie une fois terminée."
|
||||
msgstr "Cette page sera rafraîchie une fois terminée."
|
||||
|
||||
|
||||
@@ -6237,4 +6237,5 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
|
||||
@@ -2697,46 +2697,66 @@ class ActivityStreamAccess(BaseAccess):
|
||||
# 'job_template', 'job', 'project', 'project_update', 'workflow_job',
|
||||
# 'inventory_source', 'workflow_job_template'
|
||||
|
||||
inventory_set = Inventory.accessible_objects(self.user, 'read_role')
|
||||
credential_set = Credential.accessible_objects(self.user, 'read_role')
|
||||
q = Q(user=self.user)
|
||||
inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role')
|
||||
if inventory_set:
|
||||
q |= (
|
||||
Q(ad_hoc_command__inventory__in=inventory_set)
|
||||
| Q(inventory__in=inventory_set)
|
||||
| Q(host__inventory__in=inventory_set)
|
||||
| Q(group__inventory__in=inventory_set)
|
||||
| Q(inventory_source__inventory__in=inventory_set)
|
||||
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
|
||||
)
|
||||
|
||||
credential_set = Credential.accessible_pk_qs(self.user, 'read_role')
|
||||
if credential_set:
|
||||
q |= Q(credential__in=credential_set)
|
||||
|
||||
auditing_orgs = (
|
||||
(Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role'))
|
||||
.distinct()
|
||||
.values_list('id', flat=True)
|
||||
)
|
||||
project_set = Project.accessible_objects(self.user, 'read_role')
|
||||
jt_set = JobTemplate.accessible_objects(self.user, 'read_role')
|
||||
team_set = Team.accessible_objects(self.user, 'read_role')
|
||||
wfjt_set = WorkflowJobTemplate.accessible_objects(self.user, 'read_role')
|
||||
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
|
||||
token_set = OAuth2TokenAccess(self.user).filtered_queryset()
|
||||
if auditing_orgs:
|
||||
q |= (
|
||||
Q(user__in=auditing_orgs.values('member_role__members'))
|
||||
| Q(organization__in=auditing_orgs)
|
||||
| Q(notification_template__organization__in=auditing_orgs)
|
||||
| Q(notification__notification_template__organization__in=auditing_orgs)
|
||||
| Q(label__organization__in=auditing_orgs)
|
||||
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
|
||||
)
|
||||
|
||||
return qs.filter(
|
||||
Q(ad_hoc_command__inventory__in=inventory_set)
|
||||
| Q(o_auth2_application__in=app_set)
|
||||
| Q(o_auth2_access_token__in=token_set)
|
||||
| Q(user__in=auditing_orgs.values('member_role__members'))
|
||||
| Q(user=self.user)
|
||||
| Q(organization__in=auditing_orgs)
|
||||
| Q(inventory__in=inventory_set)
|
||||
| Q(host__inventory__in=inventory_set)
|
||||
| Q(group__inventory__in=inventory_set)
|
||||
| Q(inventory_source__inventory__in=inventory_set)
|
||||
| Q(inventory_update__inventory_source__inventory__in=inventory_set)
|
||||
| Q(credential__in=credential_set)
|
||||
| Q(team__in=team_set)
|
||||
| Q(project__in=project_set)
|
||||
| Q(project_update__project__in=project_set)
|
||||
| Q(job_template__in=jt_set)
|
||||
| Q(job__job_template__in=jt_set)
|
||||
| Q(workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job__workflow_job_template__in=wfjt_set)
|
||||
| Q(notification_template__organization__in=auditing_orgs)
|
||||
| Q(notification__notification_template__organization__in=auditing_orgs)
|
||||
| Q(label__organization__in=auditing_orgs)
|
||||
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
|
||||
).distinct()
|
||||
project_set = Project.accessible_pk_qs(self.user, 'read_role')
|
||||
if project_set:
|
||||
q |= Q(project__in=project_set) | Q(project_update__project__in=project_set)
|
||||
|
||||
jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role')
|
||||
if jt_set:
|
||||
q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set)
|
||||
|
||||
wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role')
|
||||
if wfjt_set:
|
||||
q |= (
|
||||
Q(workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)
|
||||
| Q(workflow_job__workflow_job_template__in=wfjt_set)
|
||||
)
|
||||
|
||||
team_set = Team.accessible_pk_qs(self.user, 'read_role')
|
||||
if team_set:
|
||||
q |= Q(team__in=team_set)
|
||||
|
||||
app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()
|
||||
if app_set:
|
||||
q |= Q(o_auth2_application__in=app_set)
|
||||
|
||||
token_set = OAuth2TokenAccess(self.user).filtered_queryset()
|
||||
if token_set:
|
||||
q |= Q(o_auth2_access_token__in=token_set)
|
||||
|
||||
return qs.filter(q).distinct()
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import datetime
|
||||
import asyncio
|
||||
import logging
|
||||
import aioredis
|
||||
import redis
|
||||
import redis.asyncio
|
||||
import re
|
||||
|
||||
from prometheus_client import (
|
||||
@@ -82,7 +82,7 @@ class BroadcastWebsocketStatsManager:
|
||||
|
||||
async def run_loop(self):
|
||||
try:
|
||||
redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
|
||||
redis_conn = await redis.asyncio.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
|
||||
await redis_conn.set(self._redis_key, stats_data_str)
|
||||
@@ -122,8 +122,8 @@ class BroadcastWebsocketStats:
|
||||
'Number of messages received, to be forwarded, by the broadcast websocket system',
|
||||
registry=self._registry,
|
||||
)
|
||||
self._messages_received = Gauge(
|
||||
f'awx_{self.remote_name}_messages_received',
|
||||
self._messages_received_current_conn = Gauge(
|
||||
f'awx_{self.remote_name}_messages_received_currrent_conn',
|
||||
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
|
||||
registry=self._registry,
|
||||
)
|
||||
@@ -144,13 +144,13 @@ class BroadcastWebsocketStats:
|
||||
|
||||
def record_message_received(self):
|
||||
self._internal_messages_received_per_minute.record()
|
||||
self._messages_received.inc()
|
||||
self._messages_received_current_conn.inc()
|
||||
self._messages_received_total.inc()
|
||||
|
||||
def record_connection_established(self):
|
||||
self._connection.state('connected')
|
||||
self._connection_start.set_to_current_time()
|
||||
self._messages_received.set(0)
|
||||
self._messages_received_current_conn.set(0)
|
||||
|
||||
def record_connection_lost(self):
|
||||
self._connection.state('disconnected')
|
||||
|
||||
@@ -16,7 +16,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
|
||||
"""
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
@@ -237,9 +237,8 @@ def projects_by_scm_type(since, **kwargs):
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
|
||||
tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
for tm_instance in tm_instances.instances_by_hostname.values():
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
for tm_instance in tm_models.instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
'uuid': instance.uuid,
|
||||
@@ -251,6 +250,7 @@ def instance_info(since, include_hostnames=False, **kwargs):
|
||||
'enabled': instance.enabled,
|
||||
'consumed_capacity': tm_instance.consumed_capacity,
|
||||
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
|
||||
'node_type': instance.node_type,
|
||||
}
|
||||
if include_hostnames is True:
|
||||
instance_info['hostname'] = instance.hostname
|
||||
|
||||
@@ -57,6 +57,7 @@ def metrics():
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
'node_type',
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
@@ -84,6 +85,7 @@ def metrics():
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
'node_type',
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
@@ -111,6 +113,7 @@ def metrics():
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
'node_type',
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
@@ -120,6 +123,7 @@ def metrics():
|
||||
[
|
||||
'hostname',
|
||||
'instance_uuid',
|
||||
'node_type',
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
@@ -180,12 +184,13 @@ def metrics():
|
||||
instance_data = instance_info(None, include_hostnames=True)
|
||||
for uuid, info in instance_data.items():
|
||||
hostname = info['hostname']
|
||||
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
|
||||
node_type = info['node_type']
|
||||
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['capacity'])
|
||||
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
|
||||
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
|
||||
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
|
||||
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
|
||||
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
|
||||
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['consumed_capacity'])
|
||||
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).set(instance_data[uuid]['remaining_capacity'])
|
||||
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid, node_type=node_type).info(
|
||||
{
|
||||
'enabled': str(instance_data[uuid]['enabled']),
|
||||
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),
|
||||
|
||||
@@ -5,7 +5,9 @@ import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
@@ -163,7 +165,7 @@ class Metrics:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif settings.IS_TESTING():
|
||||
elif is_testing():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.my_hostname()
|
||||
|
||||
@@ -569,7 +569,7 @@ register(
|
||||
register(
|
||||
'LOG_AGGREGATOR_LOGGERS',
|
||||
field_class=fields.StringListField,
|
||||
default=['awx', 'activity_stream', 'job_events', 'system_tracking'],
|
||||
default=['awx', 'activity_stream', 'job_events', 'system_tracking', 'broadcast_websocket'],
|
||||
label=_('Loggers Sending Data to Log Aggregator Form'),
|
||||
help_text=_(
|
||||
'List of loggers that will send HTTP logs to the collector, these can '
|
||||
@@ -577,7 +577,8 @@ register(
|
||||
'awx - service logs\n'
|
||||
'activity_stream - activity stream records\n'
|
||||
'job_events - callback data from Ansible job events\n'
|
||||
'system_tracking - facts gathered from scan jobs.'
|
||||
'system_tracking - facts gathered from scan jobs\n'
|
||||
'broadcast_websocket - errors pertaining to websockets broadcast metrics\n'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
|
||||
@@ -9,10 +9,16 @@ aim_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'url',
|
||||
'label': _('CyberArk AIM URL'),
|
||||
'label': _('CyberArk CCP URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
},
|
||||
{
|
||||
'id': 'webservice_id',
|
||||
'label': _('Web Service ID'),
|
||||
'type': 'string',
|
||||
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
|
||||
},
|
||||
{
|
||||
'id': 'app_id',
|
||||
'label': _('Application ID'),
|
||||
@@ -64,10 +70,13 @@ def aim_backend(**kwargs):
|
||||
client_cert = kwargs.get('client_cert', None)
|
||||
client_key = kwargs.get('client_key', None)
|
||||
verify = kwargs['verify']
|
||||
webservice_id = kwargs['webservice_id']
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
|
||||
query_params = {
|
||||
'AppId': app_id,
|
||||
@@ -78,7 +87,7 @@ def aim_backend(**kwargs):
|
||||
query_params['reason'] = reason
|
||||
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
|
||||
|
||||
with CertFiles(client_cert, client_key) as cert:
|
||||
res = requests.get(
|
||||
@@ -92,4 +101,4 @@ def aim_backend(**kwargs):
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from .plugin import CredentialPlugin, CertFiles, raise_for_status
|
||||
|
||||
import base64
|
||||
from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
@@ -61,7 +60,7 @@ def conjur_backend(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
auth_kwargs = {
|
||||
'headers': {'Content-Type': 'text/plain'},
|
||||
'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
|
||||
'data': api_key,
|
||||
'allow_redirects': False,
|
||||
}
|
||||
@@ -69,9 +68,9 @@ def conjur_backend(**kwargs):
|
||||
with CertFiles(cacert) as cert:
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
auth_kwargs['verify'] = cert
|
||||
resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
|
||||
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
|
||||
raise_for_status(resp)
|
||||
token = base64.b64encode(resp.content).decode('utf-8')
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
@@ -79,9 +78,10 @@ def conjur_backend(**kwargs):
|
||||
}
|
||||
|
||||
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
|
||||
path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
|
||||
path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
|
||||
if version:
|
||||
path = '?'.join([path, version])
|
||||
ver = "version={}".format(version)
|
||||
path = '?'.join([path, ver])
|
||||
|
||||
with CertFiles(cacert) as cert:
|
||||
lookup_kwargs['verify'] = cert
|
||||
@@ -90,4 +90,4 @@ def conjur_backend(**kwargs):
|
||||
return resp.text
|
||||
|
||||
|
||||
conjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)
|
||||
conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
|
||||
|
||||
@@ -466,7 +466,7 @@ class AutoscalePool(WorkerPool):
|
||||
task_name = 'unknown'
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from django_guid import get_guid
|
||||
|
||||
from . import pg_bus_conn
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -93,7 +92,7 @@ class task:
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not settings.IS_TESTING(sys.argv):
|
||||
if not is_testing():
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(queue, json.dumps(obj))
|
||||
return (obj, queue)
|
||||
|
||||
@@ -38,7 +38,14 @@ class Command(BaseCommand):
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
100,
|
||||
0,
|
||||
[],
|
||||
is_container_group=True,
|
||||
pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE,
|
||||
max_forks=settings.DEFAULT_EXECUTION_QUEUE_MAX_FORKS,
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
|
||||
|
||||
@@ -32,8 +32,14 @@ class Command(BaseCommand):
|
||||
def handle(self, **options):
|
||||
self.old_key = settings.SECRET_KEY
|
||||
custom_key = os.environ.get("TOWER_SECRET_KEY")
|
||||
if options.get("use_custom_key") and custom_key:
|
||||
self.new_key = custom_key
|
||||
if options.get("use_custom_key"):
|
||||
if custom_key:
|
||||
self.new_key = custom_key
|
||||
else:
|
||||
print("Use custom key was specified but the env var TOWER_SECRET_KEY was not available")
|
||||
import sys
|
||||
|
||||
sys.exit(1)
|
||||
else:
|
||||
self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip()
|
||||
self._notification_templates()
|
||||
|
||||
@@ -17,7 +17,9 @@ class InstanceNotFound(Exception):
|
||||
|
||||
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
|
||||
def __init__(
|
||||
self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None, max_forks=None, max_concurrent_jobs=None
|
||||
):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.instance_percent = instance_percent
|
||||
@@ -25,6 +27,8 @@ class RegisterQueue:
|
||||
self.hostname_list = hostname_list
|
||||
self.is_container_group = is_container_group
|
||||
self.pod_spec_override = pod_spec_override
|
||||
self.max_forks = max_forks
|
||||
self.max_concurrent_jobs = max_concurrent_jobs
|
||||
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
@@ -45,6 +49,14 @@ class RegisterQueue:
|
||||
ig.pod_spec_override = self.pod_spec_override
|
||||
changed = True
|
||||
|
||||
if self.max_forks and (ig.max_forks != self.max_forks):
|
||||
ig.max_forks = self.max_forks
|
||||
changed = True
|
||||
|
||||
if self.max_concurrent_jobs and (ig.max_concurrent_jobs != self.max_concurrent_jobs):
|
||||
ig.max_concurrent_jobs = self.max_concurrent_jobs
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
ig.save()
|
||||
|
||||
|
||||
@@ -158,7 +158,11 @@ class InstanceManager(models.Manager):
|
||||
return (False, instance)
|
||||
|
||||
# Create new instance, and fill in default values
|
||||
create_defaults = {'node_state': Instance.States.INSTALLED, 'capacity': 0}
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'listener_port': 27199,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {}
|
||||
|
||||
@@ -1,24 +1,14 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-21 21:29
|
||||
|
||||
from django.db import migrations
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("awx")
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
sources = InventorySource.objects.filter(update_on_project_update=True)
|
||||
for src in sources:
|
||||
if src.update_on_launch == False:
|
||||
src.update_on_launch = True
|
||||
src.save(update_fields=['update_on_launch'])
|
||||
logger.info(f"Setting update_on_launch to True for {src}")
|
||||
proj = src.source_project
|
||||
if proj and proj.scm_update_on_launch is False:
|
||||
proj.scm_update_on_launch = True
|
||||
proj.save(update_fields=['scm_update_on_launch'])
|
||||
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
|
||||
InventorySource.objects.filter(update_on_project_update=True).update(update_on_launch=True)
|
||||
|
||||
Project = apps.get_model('main', 'Project')
|
||||
Project.objects.filter(scm_inventory_sources__update_on_project_update=True).update(scm_update_on_launch=True)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
23
awx/main/migrations/0173_instancegroup_max_limits.py
Normal file
23
awx/main/migrations/0173_instancegroup_max_limits.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 3.2.13 on 2022-10-24 18:22
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0172_prevent_instance_fallback'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='max_concurrent_jobs',
|
||||
field=models.IntegerField(default=0, help_text='Maximum number of concurrent jobs to run on this group. Zero means no limit.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='max_forks',
|
||||
field=models.IntegerField(default=0, help_text='Max forks to execute on this group. Zero means no limit.'),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0174_ensure_org_ee_admin_roles.py
Normal file
18
awx/main/migrations/0174_ensure_org_ee_admin_roles.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 21:11
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.migrations import _rbac as rbac
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0173_instancegroup_max_limits'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(rbac.create_roles),
|
||||
]
|
||||
@@ -15,6 +15,7 @@ def aws(cred, env, private_data_dir):
|
||||
|
||||
if cred.has_input('security_token'):
|
||||
env['AWS_SECURITY_TOKEN'] = cred.get_input('security_token', default='')
|
||||
env['AWS_SESSION_TOKEN'] = env['AWS_SECURITY_TOKEN']
|
||||
|
||||
|
||||
def gce(cred, env, private_data_dir):
|
||||
|
||||
@@ -233,11 +233,12 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
if not isinstance(vargs.get('grace_period'), int):
|
||||
vargs['grace_period'] = 60 # grace period of 60 minutes, need to set because CLI default will not take effect
|
||||
if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
|
||||
active_pks = list(
|
||||
UnifiedJob.objects.filter(
|
||||
(models.Q(execution_node=self.hostname) | models.Q(controller_node=self.hostname)) & models.Q(status__in=('running', 'waiting'))
|
||||
).values_list('pk', flat=True)
|
||||
)
|
||||
active_job_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
|
||||
if self.node_type == 'execution':
|
||||
active_job_qs = active_job_qs.filter(execution_node=self.hostname)
|
||||
else:
|
||||
active_job_qs = active_job_qs.filter(controller_node=self.hostname)
|
||||
active_pks = list(active_job_qs.values_list('pk', flat=True))
|
||||
if active_pks:
|
||||
vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks]
|
||||
if 'remove_images' in vargs or 'image_prune' in vargs:
|
||||
@@ -378,6 +379,8 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
default='',
|
||||
)
|
||||
)
|
||||
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
|
||||
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
|
||||
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
|
||||
policy_instance_minimum = models.IntegerField(default=0, help_text=_("Static minimum number of Instances to automatically assign to this group"))
|
||||
policy_instance_list = JSONBlob(
|
||||
@@ -391,6 +394,8 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
if self.is_container_group:
|
||||
return self.max_forks
|
||||
return sum(inst.capacity for inst in self.instances.all())
|
||||
|
||||
@property
|
||||
|
||||
@@ -247,6 +247,19 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
return (number, step)
|
||||
|
||||
def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
|
||||
"""
|
||||
Returns a slice of Hosts given a slice number and total slice count, or
|
||||
the original queryset if slicing is not requested.
|
||||
|
||||
NOTE: If slicing is performed, this will return a List[Host] with the
|
||||
resulting slice. If slicing is not performed it will return the
|
||||
original queryset (not evaluating it or forcing it to a list). This
|
||||
puts the burden on the caller to check the resulting type. This is
|
||||
non-ideal because it's easy to get wrong, but I think the only way
|
||||
around it is to force the queryset which has memory implications for
|
||||
large inventories.
|
||||
"""
|
||||
|
||||
if slice_count > 1 and slice_number > 0:
|
||||
offset = slice_number - 1
|
||||
host_queryset = host_queryset[offset::slice_count]
|
||||
@@ -554,17 +567,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
# Use .job_host_summaries.all() to get jobs affecting this host.
|
||||
# Use .job_events.all() to get events affecting this host.
|
||||
|
||||
'''
|
||||
We don't use timestamp, but we may in the future.
|
||||
'''
|
||||
|
||||
def update_ansible_facts(self, module, facts, timestamp=None):
|
||||
if module == "ansible":
|
||||
self.ansible_facts.update(facts)
|
||||
else:
|
||||
self.ansible_facts[module] = facts
|
||||
self.save()
|
||||
|
||||
def get_effective_host_name(self):
|
||||
"""
|
||||
Return the name of the host that will be used in actual ansible
|
||||
|
||||
@@ -15,6 +15,7 @@ from urllib.parse import urljoin
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
from django.db.models.query import QuerySet
|
||||
|
||||
# from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -43,7 +44,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -844,22 +845,35 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
||||
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
|
||||
"""Return value is an iterable for the relevant hosts for this job"""
|
||||
if not self.inventory:
|
||||
return []
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if filters:
|
||||
host_queryset = host_queryset.filter(**filters)
|
||||
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if isinstance(host_queryset, QuerySet):
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
|
||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
hosts = self._get_inventory_hosts()
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
if timeout > 0:
|
||||
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = hosts.filter(ansible_facts_modified__gte=timeout)
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
else:
|
||||
hosts = self._get_inventory_hosts()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
@@ -869,23 +883,38 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the file so we can check if it changed later
|
||||
modification_times[filepath] = os.path.getmtime(filepath)
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
def finish_job_fact_cache(self, destination, modification_times):
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote it pre-playbook run...
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if modified > modification_times.get(filepath, 0):
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
@@ -893,7 +922,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
@@ -904,12 +933,21 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
host.save()
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
|
||||
@@ -471,6 +471,29 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def get_reason_if_failed(self):
|
||||
"""
|
||||
If the project is in a failed or errored state, return a human-readable
|
||||
error message explaining why. Otherwise return None.
|
||||
|
||||
This is used during validation in the serializer and also by
|
||||
RunProjectUpdate/RunInventoryUpdate.
|
||||
"""
|
||||
|
||||
if self.status not in ('error', 'failed'):
|
||||
return None
|
||||
|
||||
latest_update = self.project_updates.last()
|
||||
if latest_update is not None and latest_update.failed:
|
||||
failed_validation_tasks = latest_update.project_update_events.filter(
|
||||
event='runner_on_failed',
|
||||
play="Perform project signature/checksum verification",
|
||||
)
|
||||
if failed_validation_tasks:
|
||||
return _("Last project update failed due to signature validation failure.")
|
||||
|
||||
return _("Missing a revision to run due to failed project update.")
|
||||
|
||||
'''
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
|
||||
@@ -1351,12 +1351,12 @@ class UnifiedJob(
|
||||
if required in defined_fields and not credential.has_input(required):
|
||||
missing_credential_inputs.append(required)
|
||||
|
||||
if missing_credential_inputs:
|
||||
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
|
||||
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
|
||||
)
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
if missing_credential_inputs:
|
||||
self.job_explanation = '{} cannot start because Credential {} does not provide one or more required fields ({}).'.format(
|
||||
self._meta.verbose_name.title(), credential.name, ', '.join(sorted(missing_credential_inputs))
|
||||
)
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
|
||||
needed = self.get_passwords_needed_to_start()
|
||||
try:
|
||||
|
||||
@@ -5,9 +5,6 @@ import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
@@ -17,6 +14,8 @@ logger = logging.getLogger('awx.main.notifications.webhook_backend')
|
||||
|
||||
class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
MAX_RETRIES = 5
|
||||
|
||||
init_parameters = {
|
||||
"url": {"label": "Target URL", "type": "string"},
|
||||
"http_method": {"label": "HTTP Method", "type": "string", "default": "POST"},
|
||||
@@ -64,20 +63,67 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
if self.http_method.lower() not in ['put', 'post']:
|
||||
raise ValueError("HTTP method must be either 'POST' or 'PUT'.")
|
||||
chosen_method = getattr(requests, self.http_method.lower(), None)
|
||||
|
||||
for m in messages:
|
||||
|
||||
auth = None
|
||||
if self.username or self.password:
|
||||
auth = (self.username, self.password)
|
||||
r = chosen_method(
|
||||
"{}".format(m.recipients()[0]),
|
||||
auth=auth,
|
||||
data=json.dumps(m.body, ensure_ascii=False).encode('utf-8'),
|
||||
headers=dict(list(get_awx_http_client_headers().items()) + list((self.headers or {}).items())),
|
||||
verify=(not self.disable_ssl_verification),
|
||||
)
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_str(_("Error sending notification webhook: {}").format(r.status_code)))
|
||||
|
||||
# the constructor for EmailMessage - https://docs.djangoproject.com/en/4.1/_modules/django/core/mail/message will turn an empty dictionary to an empty string
|
||||
# sometimes an empty dict is intentional and we added this conditional to enforce that
|
||||
if not m.body:
|
||||
m.body = {}
|
||||
|
||||
url = str(m.recipients()[0])
|
||||
data = json.dumps(m.body, ensure_ascii=False).encode('utf-8')
|
||||
headers = {**(get_awx_http_client_headers()), **(self.headers or {})}
|
||||
|
||||
err = None
|
||||
|
||||
for retries in range(self.MAX_RETRIES):
|
||||
|
||||
# Sometimes we hit redirect URLs. We must account for this. We still extract the redirect URL from the response headers and try again. Max retires == 5
|
||||
resp = chosen_method(
|
||||
url=url,
|
||||
auth=auth,
|
||||
data=data,
|
||||
headers=headers,
|
||||
verify=(not self.disable_ssl_verification),
|
||||
allow_redirects=False, # override default behaviour for redirects
|
||||
)
|
||||
|
||||
# either success or error reached if this conditional fires
|
||||
if resp.status_code not in [301, 307]:
|
||||
break
|
||||
|
||||
# we've hit a redirect. extract the redirect URL out of the first response header and try again
|
||||
logger.warning(
|
||||
f"Received a {resp.status_code} from {url}, trying to reach redirect url {resp.headers.get('Location', None)}; attempt #{retries+1}"
|
||||
)
|
||||
|
||||
# take the first redirect URL in the response header and try that
|
||||
url = resp.headers.get("Location", None)
|
||||
|
||||
if url is None:
|
||||
err = f"Webhook notification received redirect to a blank URL from {url}. Response headers={resp.headers}"
|
||||
break
|
||||
else:
|
||||
# no break condition in the loop encountered; therefore we have hit the maximum number of retries
|
||||
err = f"Webhook notification max number of retries [{self.MAX_RETRIES}] exceeded. Failed to send webhook notification to {url}"
|
||||
|
||||
if resp.status_code >= 400:
|
||||
err = f"Error sending webhook notification: {resp.status_code}"
|
||||
|
||||
# log error message
|
||||
if err:
|
||||
logger.error(err)
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_str(_("Error sending notification webhook: {}").format(r.status_code)))
|
||||
sent_messages += 1
|
||||
raise Exception(err)
|
||||
|
||||
# no errors were encountered therefore we successfully sent off the notification webhook
|
||||
if resp.status_code in range(200, 299):
|
||||
logger.debug(f"Notification webhook successfully sent to {url}. Received {resp.status_code}")
|
||||
sent_messages += 1
|
||||
|
||||
return sent_messages
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed
|
||||
|
||||
from taggit.managers import TaggableManager
|
||||
|
||||
|
||||
class ActivityStreamRegistrar(object):
|
||||
def __init__(self):
|
||||
@@ -19,6 +21,8 @@ class ActivityStreamRegistrar(object):
|
||||
pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete")
|
||||
|
||||
for m2mfield in model._meta.many_to_many:
|
||||
if isinstance(m2mfield, TaggableManager):
|
||||
continue # Special case for taggit app
|
||||
try:
|
||||
m2m_attr = getattr(model, m2mfield.name)
|
||||
m2m_changed.connect(
|
||||
|
||||
@@ -27,8 +27,8 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@@ -39,12 +39,11 @@ from awx.main.utils import (
|
||||
ScheduleTaskManager,
|
||||
ScheduleWorkflowManager,
|
||||
)
|
||||
from awx.main.utils.common import task_manager_bulk_reschedule
|
||||
from awx.main.utils.common import task_manager_bulk_reschedule, is_testing
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
@@ -71,7 +70,12 @@ class TaskBase:
|
||||
# is called later.
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.start_time = time.time()
|
||||
|
||||
# We want to avoid calling settings in loops, so cache these settings at init time
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT
|
||||
self.control_task_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
|
||||
for m in self.subsystem_metrics.METRICS:
|
||||
if m.startswith(self.prefix):
|
||||
self.subsystem_metrics.set(m, 0)
|
||||
@@ -79,7 +83,7 @@ class TaskBase:
|
||||
def timed_out(self):
|
||||
"""Return True/False if we have met or exceeded the timeout for the task manager."""
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed >= settings.TASK_MANAGER_TIMEOUT:
|
||||
if elapsed >= self.task_manager_timeout:
|
||||
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
|
||||
return True
|
||||
return False
|
||||
@@ -97,7 +101,7 @@ class TaskBase:
|
||||
self.all_tasks = [t for t in qs]
|
||||
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not settings.IS_TESTING():
|
||||
if not is_testing():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
@@ -471,9 +475,8 @@ class TaskManager(TaskBase):
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
self.dependency_graph = DependencyGraph()
|
||||
self.instances = TaskManagerInstances(self.all_tasks)
|
||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||
self.tm_models = TaskManagerModels()
|
||||
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
@@ -504,8 +507,16 @@ class TaskManager(TaskBase):
|
||||
return None
|
||||
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
def start_task(self, task, instance_group, instance=None):
|
||||
# Just like for process_running_tasks, add the job to the dependency graph and
|
||||
# ask the TaskManagerInstanceGroups object to update consumed capacity on all
|
||||
# implicated instances and container groups.
|
||||
self.dependency_graph.add_job(task)
|
||||
if instance_group is not None:
|
||||
task.instance_group = instance_group
|
||||
# We need the instance group assigned to correctly account for container group max_concurrent_jobs and max_forks
|
||||
self.tm_models.consume_capacity(task)
|
||||
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
@@ -513,20 +524,6 @@ class TaskManager(TaskBase):
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
# update capacity for control node and execution node
|
||||
if task.controller_node:
|
||||
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
if task.execution_node:
|
||||
self.instances[task.execution_node].consume_capacity(task.task_impact)
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
@@ -546,7 +543,6 @@ class TaskManager(TaskBase):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = instance_group
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||
@@ -559,6 +555,7 @@ class TaskManager(TaskBase):
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
@@ -566,7 +563,7 @@ class TaskManager(TaskBase):
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
@@ -580,6 +577,7 @@ class TaskManager(TaskBase):
|
||||
if type(task) is WorkflowJob:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
self.dependency_graph.add_job(task)
|
||||
self.tm_models.consume_capacity(task)
|
||||
|
||||
@timeit
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
@@ -604,18 +602,18 @@ class TaskManager(TaskBase):
|
||||
if isinstance(task, WorkflowJob):
|
||||
# Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
|
||||
# Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
self.start_task(task, None, None)
|
||||
continue
|
||||
|
||||
found_acceptable_queue = False
|
||||
|
||||
# Determine if there is control capacity for the task
|
||||
if task.capacity_type == 'control':
|
||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_impact = task.task_impact + self.control_task_impact
|
||||
else:
|
||||
control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, impact=control_impact, capacity_type='control'
|
||||
control_impact = self.control_task_impact
|
||||
control_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=self.controlplane_ig.name, impact=control_impact, capacity_type='control'
|
||||
)
|
||||
if not control_instance:
|
||||
self.task_needs_capacity(task, tasks_to_update_job_explanation)
|
||||
@@ -626,25 +624,29 @@ class TaskManager(TaskBase):
|
||||
|
||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||
if task.capacity_type == 'control':
|
||||
if not self.tm_models.instance_groups[self.controlplane_ig.name].has_remaining_capacity(control_impact=True):
|
||||
continue
|
||||
task.execution_node = control_instance.hostname
|
||||
execution_instance = self.instances[control_instance.hostname].obj
|
||||
execution_instance = self.tm_models.instances[control_instance.hostname].obj
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
|
||||
self.start_task(task, self.controlplane_ig, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
continue
|
||||
|
||||
for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task):
|
||||
for instance_group in self.tm_models.instance_groups.get_instance_groups_from_task_cache(task):
|
||||
if not self.tm_models.instance_groups[instance_group.name].has_remaining_capacity(task):
|
||||
continue
|
||||
if instance_group.is_container_group:
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
||||
self.start_task(task, instance_group, None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
# at this point we know the instance group is NOT a container group
|
||||
# because if it was, it would have started the task and broke out of the loop.
|
||||
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
execution_instance = self.tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=instance_group.name, add_hybrid_control_cost=True
|
||||
) or self.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
|
||||
) or self.tm_models.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
|
||||
|
||||
if execution_instance:
|
||||
task.execution_node = execution_instance.hostname
|
||||
@@ -660,8 +662,8 @@ class TaskManager(TaskBase):
|
||||
task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity
|
||||
)
|
||||
)
|
||||
execution_instance = self.instances[execution_instance.hostname].obj
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
execution_instance = self.tm_models.instances[execution_instance.hostname].obj
|
||||
self.start_task(task, instance_group, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -15,15 +15,18 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
class TaskManagerInstance:
|
||||
"""A class representing minimal data the task manager needs to represent an Instance."""
|
||||
|
||||
def __init__(self, obj):
|
||||
def __init__(self, obj, **kwargs):
|
||||
self.obj = obj
|
||||
self.node_type = obj.node_type
|
||||
self.consumed_capacity = 0
|
||||
self.capacity = obj.capacity
|
||||
self.hostname = obj.hostname
|
||||
self.jobs_running = 0
|
||||
|
||||
def consume_capacity(self, impact):
|
||||
def consume_capacity(self, impact, job_impact=False):
|
||||
self.consumed_capacity += impact
|
||||
if job_impact:
|
||||
self.jobs_running += 1
|
||||
|
||||
@property
|
||||
def remaining_capacity(self):
|
||||
@@ -33,9 +36,106 @@ class TaskManagerInstance:
|
||||
return remaining
|
||||
|
||||
|
||||
class TaskManagerInstanceGroup:
|
||||
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
|
||||
|
||||
def __init__(self, obj, task_manager_instances=None, **kwargs):
|
||||
self.name = obj.name
|
||||
self.is_container_group = obj.is_container_group
|
||||
self.container_group_jobs = 0
|
||||
self.container_group_consumed_forks = 0
|
||||
_instances = obj.instances.all()
|
||||
# We want the list of TaskManagerInstance objects because these are shared across the TaskManagerInstanceGroup objects.
|
||||
# This way when we consume capacity on an instance that is in multiple groups, we tabulate across all the groups correctly.
|
||||
self.instances = [task_manager_instances[instance.hostname] for instance in _instances if instance.hostname in task_manager_instances]
|
||||
self.instance_hostnames = tuple([instance.hostname for instance in _instances if instance.hostname in task_manager_instances])
|
||||
self.max_concurrent_jobs = obj.max_concurrent_jobs
|
||||
self.max_forks = obj.max_forks
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
|
||||
def consume_capacity(self, task):
|
||||
"""We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level."""
|
||||
if self.is_container_group:
|
||||
self.container_group_jobs += 1
|
||||
self.container_group_consumed_forks += task.task_impact
|
||||
else:
|
||||
raise RuntimeError("We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.")
|
||||
|
||||
def get_remaining_instance_capacity(self):
|
||||
return sum(inst.remaining_capacity for inst in self.instances)
|
||||
|
||||
def get_instance_capacity(self):
|
||||
return sum(inst.capacity for inst in self.instances)
|
||||
|
||||
def get_consumed_instance_capacity(self):
|
||||
return sum(inst.consumed_capacity for inst in self.instances)
|
||||
|
||||
def get_instance_jobs_running(self):
|
||||
return sum(inst.jobs_running for inst in self.instances)
|
||||
|
||||
def get_jobs_running(self):
|
||||
if self.is_container_group:
|
||||
return self.container_group_jobs
|
||||
return sum(inst.jobs_running for inst in self.instances)
|
||||
|
||||
def get_capacity(self):
|
||||
"""This reports any type of capacity, including that of container group jobs.
|
||||
|
||||
Container groups don't really have capacity, but if they have max_forks set,
|
||||
we can interperet that as how much capacity the user has defined them to have.
|
||||
"""
|
||||
if self.is_container_group:
|
||||
return self.max_forks
|
||||
return self.get_instance_capacity()
|
||||
|
||||
def get_consumed_capacity(self):
|
||||
if self.is_container_group:
|
||||
return self.container_group_consumed_forks
|
||||
return self.get_consumed_instance_capacity()
|
||||
|
||||
def get_remaining_capacity(self):
|
||||
return self.get_capacity() - self.get_consumed_capacity()
|
||||
|
||||
def has_remaining_capacity(self, task=None, control_impact=False):
|
||||
"""Pass either a task or control_impact=True to determine if the IG has capacity to run the control task or job task."""
|
||||
task_impact = self.control_task_impact if control_impact else task.task_impact
|
||||
job_impact = 0 if control_impact else 1
|
||||
task_string = f"task {task.log_format} with impact of {task_impact}" if task else f"control task with impact of {task_impact}"
|
||||
|
||||
# We only want to loop over instances if self.max_concurrent_jobs is set
|
||||
if self.max_concurrent_jobs == 0:
|
||||
# Override the calculated remaining capacity, because when max_concurrent_jobs == 0 we don't enforce any max
|
||||
remaining_jobs = 0
|
||||
else:
|
||||
remaining_jobs = self.max_concurrent_jobs - self.get_jobs_running() - job_impact
|
||||
|
||||
# We only want to loop over instances if self.max_forks is set
|
||||
if self.max_forks == 0:
|
||||
# Override the calculated remaining capacity, because when max_forks == 0 we don't enforce any max
|
||||
remaining_forks = 0
|
||||
else:
|
||||
remaining_forks = self.max_forks - self.get_consumed_capacity() - task_impact
|
||||
|
||||
if remaining_jobs < 0 or remaining_forks < 0:
|
||||
# A value less than zero means the task will not fit on the group
|
||||
if remaining_jobs < 0:
|
||||
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_jobs} remaining jobs")
|
||||
if remaining_forks < 0:
|
||||
logger.debug(f"{task_string} cannot fit on instance group {self.name} with {remaining_forks} remaining forks")
|
||||
return False
|
||||
|
||||
# Returning true means there is enough remaining capacity on the group to run the task (or no instance group level limits are being set)
|
||||
logger.debug(f"{task_string} can fit on instance group {self.name} with {remaining_forks} remaining forks and {remaining_jobs}")
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerInstances:
|
||||
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
|
||||
def __init__(self, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled'), **kwargs):
|
||||
self.instances_by_hostname = dict()
|
||||
self.instance_groups_container_group_jobs = dict()
|
||||
self.instance_groups_container_group_consumed_forks = dict()
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
|
||||
if instances is None:
|
||||
instances = (
|
||||
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
|
||||
@@ -43,18 +143,15 @@ class TaskManagerInstances:
|
||||
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
|
||||
)
|
||||
for instance in instances:
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance, **kwargs)
|
||||
|
||||
# initialize remaining capacity based on currently waiting and running tasks
|
||||
for task in active_tasks:
|
||||
if task.status not in ['waiting', 'running']:
|
||||
continue
|
||||
control_instance = self.instances_by_hostname.get(task.controller_node, '')
|
||||
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
|
||||
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
|
||||
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact)
|
||||
if control_instance and control_instance.node_type in ('hybrid', 'control'):
|
||||
self.instances_by_hostname[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
def consume_capacity(self, task):
|
||||
control_instance = self.instances_by_hostname.get(task.controller_node, '')
|
||||
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
|
||||
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
|
||||
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact, job_impact=True)
|
||||
if control_instance and control_instance.node_type in ('hybrid', 'control'):
|
||||
self.instances_by_hostname[task.controller_node].consume_capacity(self.control_task_impact)
|
||||
|
||||
def __getitem__(self, hostname):
|
||||
return self.instances_by_hostname.get(hostname)
|
||||
@@ -64,42 +161,57 @@ class TaskManagerInstances:
|
||||
|
||||
|
||||
class TaskManagerInstanceGroups:
|
||||
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
|
||||
"""A class representing minimal data the task manager needs to represent all the InstanceGroups."""
|
||||
|
||||
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
||||
def __init__(self, task_manager_instances=None, instance_groups=None, instance_groups_queryset=None, **kwargs):
|
||||
self.instance_groups = dict()
|
||||
self.task_manager_instances = task_manager_instances if task_manager_instances is not None else TaskManagerInstances()
|
||||
self.controlplane_ig = None
|
||||
self.pk_ig_map = dict()
|
||||
self.control_task_impact = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
self.controlplane_ig_name = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
|
||||
if instance_groups is not None: # for testing
|
||||
self.instance_groups = instance_groups
|
||||
self.instance_groups = {ig.name: TaskManagerInstanceGroup(ig, self.task_manager_instances, **kwargs) for ig in instance_groups}
|
||||
self.pk_ig_map = {ig.pk: ig for ig in instance_groups}
|
||||
else:
|
||||
if instance_groups_queryset is None:
|
||||
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only('name', 'instances')
|
||||
for instance_group in instance_groups_queryset:
|
||||
if instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
self.controlplane_ig = instance_group
|
||||
self.instance_groups[instance_group.name] = dict(
|
||||
instances=[
|
||||
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
||||
],
|
||||
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only(
|
||||
'name', 'instances', 'max_concurrent_jobs', 'max_forks', 'is_container_group'
|
||||
)
|
||||
for instance_group in instance_groups_queryset:
|
||||
if instance_group.name == self.controlplane_ig_name:
|
||||
self.controlplane_ig = instance_group
|
||||
self.instance_groups[instance_group.name] = TaskManagerInstanceGroup(instance_group, self.task_manager_instances, **kwargs)
|
||||
self.pk_ig_map[instance_group.pk] = instance_group
|
||||
|
||||
def __getitem__(self, ig_name):
|
||||
return self.instance_groups.get(ig_name)
|
||||
|
||||
def __contains__(self, ig_name):
|
||||
return ig_name in self.instance_groups
|
||||
|
||||
def get_remaining_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.remaining_capacity for inst in instances)
|
||||
return self.instance_groups[group_name].get_remaining_instance_capacity()
|
||||
|
||||
def get_consumed_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.consumed_capacity for inst in instances)
|
||||
return self.instance_groups[group_name].get_consumed_capacity()
|
||||
|
||||
def get_jobs_running(self, group_name):
|
||||
return self.instance_groups[group_name].get_jobs_running()
|
||||
|
||||
def get_capacity(self, group_name):
|
||||
return self.instance_groups[group_name].get_capacity()
|
||||
|
||||
def get_instances(self, group_name):
|
||||
return self.instance_groups[group_name].instances
|
||||
|
||||
def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False):
|
||||
impact = impact if impact else task.task_impact
|
||||
capacity_type = capacity_type if capacity_type else task.capacity_type
|
||||
instance_most_capacity = None
|
||||
most_remaining_capacity = -1
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
instances = self.instance_groups[instance_group_name].instances
|
||||
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
@@ -107,7 +219,7 @@ class TaskManagerInstanceGroups:
|
||||
would_be_remaining = i.remaining_capacity - impact
|
||||
# hybrid nodes _always_ control their own tasks
|
||||
if add_hybrid_control_cost and i.node_type == 'hybrid':
|
||||
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
would_be_remaining -= self.control_task_impact
|
||||
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
most_remaining_capacity = would_be_remaining
|
||||
@@ -115,10 +227,13 @@ class TaskManagerInstanceGroups:
|
||||
|
||||
def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'):
|
||||
largest_instance = None
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
instances = self.instance_groups[instance_group_name].instances
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
if i.capacity <= 0:
|
||||
# We don't want to select an idle instance with 0 capacity
|
||||
continue
|
||||
if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity:
|
||||
if largest_instance is None:
|
||||
largest_instance = i
|
||||
@@ -139,3 +254,56 @@ class TaskManagerInstanceGroups:
|
||||
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
|
||||
return task.global_instance_groups
|
||||
return igs
|
||||
|
||||
|
||||
class TaskManagerModels:
|
||||
def __init__(self, **kwargs):
|
||||
# We want to avoid calls to settings over and over in loops, so cache this information here
|
||||
kwargs['control_task_impact'] = kwargs.get('control_task_impact', settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
kwargs['controlplane_ig_name'] = kwargs.get('controlplane_ig_name', settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
self.instances = TaskManagerInstances(**kwargs)
|
||||
self.instance_groups = TaskManagerInstanceGroups(task_manager_instances=self.instances, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def init_with_consumed_capacity(cls, **kwargs):
|
||||
tmm = cls(**kwargs)
|
||||
tasks = kwargs.get('tasks', None)
|
||||
|
||||
if tasks is None:
|
||||
instance_group_queryset = kwargs.get('instance_groups_queryset', None)
|
||||
# No tasks were provided, so we will fetch them from the database
|
||||
task_status_filter_list = kwargs.get('task_status_filter_list', ['running', 'waiting'])
|
||||
task_fields = kwargs.get('task_fields', ('task_impact', 'controller_node', 'execution_node', 'instance_group'))
|
||||
from awx.main.models import UnifiedJob
|
||||
|
||||
if instance_group_queryset is not None:
|
||||
logger.debug("******************INSTANCE GROUP QUERYSET PASSED -- FILTERING TASKS ****************************")
|
||||
# Sometimes things like the serializer pass a queryset that looks at not all instance groups. in this case,
|
||||
# we also need to filter the tasks we look at
|
||||
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list, instance_group__in=[ig.id for ig in instance_group_queryset]).only(
|
||||
*task_fields
|
||||
)
|
||||
else:
|
||||
# No instance group query set, look at all tasks in whole system
|
||||
tasks = UnifiedJob.objects.filter(status__in=task_status_filter_list).only(*task_fields)
|
||||
|
||||
for task in tasks:
|
||||
tmm.consume_capacity(task)
|
||||
|
||||
return tmm
|
||||
|
||||
def consume_capacity(self, task):
|
||||
# Consume capacity on instances, which bubbles up to instance groups they are a member of
|
||||
self.instances.consume_capacity(task)
|
||||
|
||||
# For container group jobs, additionally we must account for capacity consumed since
|
||||
# The container groups have no instances to look at to track how many jobs/forks are consumed
|
||||
if task.instance_group_id:
|
||||
if not task.instance_group_id in self.instance_groups.pk_ig_map.keys():
|
||||
logger.warn(
|
||||
f"Task {task.log_format} assigned {task.instance_group_id} but this instance group not present in map of instance groups{self.instance_groups.pk_ig_map.keys()}"
|
||||
)
|
||||
else:
|
||||
ig = self.instance_groups.pk_ig_map[task.instance_group_id]
|
||||
if ig.is_container_group:
|
||||
self.instance_groups[ig.name].consume_capacity(task)
|
||||
|
||||
@@ -2,8 +2,6 @@ import json
|
||||
import time
|
||||
import logging
|
||||
from collections import deque
|
||||
import os
|
||||
import stat
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -206,21 +204,6 @@ class RunnerCallback:
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||
# We opened a connection just for that save, close it here now
|
||||
connections.close_all()
|
||||
elif status_data['status'] == 'failed':
|
||||
# For encrypted ssh_key_data, ansible-runner worker will open and write the
|
||||
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will
|
||||
# read from this named pipe so that the key can be used in ansible-playbook.
|
||||
# Once the podman container exits, the named pipe is deleted.
|
||||
# However, if the podman container fails to start in the first place, e.g. the image
|
||||
# name is incorrect, then this pipe is not cleaned up. Eventually ansible-runner
|
||||
# processor will attempt to write artifacts to the private data dir via unstream_dir, requiring
|
||||
# that it open this named pipe. This leads to a hang. Thus, before any artifacts
|
||||
# are written by the processor, it's important to remove this ssh_key_data pipe.
|
||||
private_data_dir = self.instance.job_env.get('AWX_PRIVATE_DATA_DIR', None)
|
||||
if private_data_dir:
|
||||
key_data_file = os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'ssh_key_data')
|
||||
if os.path.exists(key_data_file) and stat.S_ISFIFO(os.stat(key_data_file).st_mode):
|
||||
os.remove(key_data_file)
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
|
||||
@@ -426,7 +426,7 @@ class BaseTask(object):
|
||||
"""
|
||||
instance.log_lifecycle("post_run")
|
||||
|
||||
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
|
||||
def final_run_hook(self, instance, status, private_data_dir):
|
||||
"""
|
||||
Hook for any steps to run after job/task is marked as complete.
|
||||
"""
|
||||
@@ -469,7 +469,6 @@ class BaseTask(object):
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
fact_modification_times = {}
|
||||
self.runner_callback.event_ct = 0
|
||||
|
||||
'''
|
||||
@@ -498,14 +497,6 @@ class BaseTask(object):
|
||||
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
||||
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
self.instance.start_job_fact_cache(
|
||||
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
|
||||
fact_modification_times,
|
||||
)
|
||||
|
||||
# May have to serialize the value
|
||||
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
|
||||
passwords = self.build_passwords(self.instance, kwargs)
|
||||
@@ -646,7 +637,7 @@ class BaseTask(object):
|
||||
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
|
||||
|
||||
try:
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
|
||||
self.final_run_hook(self.instance, status, private_data_dir)
|
||||
except Exception:
|
||||
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
|
||||
|
||||
@@ -767,6 +758,10 @@ class SourceControlMixin(BaseTask):
|
||||
|
||||
try:
|
||||
original_branch = None
|
||||
failed_reason = project.get_reason_if_failed()
|
||||
if failed_reason:
|
||||
self.update_model(self.instance.pk, status='failed', job_explanation=failed_reason)
|
||||
raise RuntimeError(failed_reason)
|
||||
project_path = project.get_project_path(check_if_exists=False)
|
||||
if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):
|
||||
if os.path.exists(project_path):
|
||||
@@ -1056,22 +1051,25 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
error = _('Job could not start because no Execution Environment could be found.')
|
||||
self.update_model(job.pk, status='error', job_explanation=error)
|
||||
raise RuntimeError(error)
|
||||
elif job.project.status in ('error', 'failed'):
|
||||
msg = _('The project revision for this job template is unknown due to a failed update.')
|
||||
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
if job.inventory.kind == 'smart':
|
||||
# cache smart inventory memberships so that the host_filter query is not
|
||||
# ran inside of the event saving code
|
||||
update_smart_memberships_for_inventory(job.inventory)
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
||||
if not private_data_dir:
|
||||
def post_run_hook(self, job, status):
|
||||
super(RunJob, self).post_run_hook(job, status)
|
||||
job.refresh_from_db(fields=['job_env'])
|
||||
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
||||
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
|
||||
# If there's no private data dir, that means we didn't get into the
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
@@ -1079,9 +1077,11 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
fact_modification_times,
|
||||
self.facts_write_time,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir)
|
||||
try:
|
||||
inventory = job.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
|
||||
@@ -61,10 +61,15 @@ def read_receptor_config():
|
||||
return yaml.safe_load(f)
|
||||
|
||||
|
||||
def get_receptor_sockfile():
|
||||
data = read_receptor_config()
|
||||
def work_signing_enabled(config_data):
|
||||
for section in config_data:
|
||||
if 'work-signing' in section:
|
||||
return True
|
||||
return False
|
||||
|
||||
for section in data:
|
||||
|
||||
def get_receptor_sockfile(config_data):
|
||||
for section in config_data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'control-service':
|
||||
if 'filename' in entry_data:
|
||||
@@ -75,12 +80,11 @@ def get_receptor_sockfile():
|
||||
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
|
||||
|
||||
|
||||
def get_tls_client(use_stream_tls=None):
|
||||
def get_tls_client(config_data, use_stream_tls=None):
|
||||
if not use_stream_tls:
|
||||
return None
|
||||
|
||||
data = read_receptor_config()
|
||||
for section in data:
|
||||
for section in config_data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'tls-client':
|
||||
if 'name' in entry_data:
|
||||
@@ -88,10 +92,12 @@ def get_tls_client(use_stream_tls=None):
|
||||
return None
|
||||
|
||||
|
||||
def get_receptor_ctl():
|
||||
receptor_sockfile = get_receptor_sockfile()
|
||||
def get_receptor_ctl(config_data=None):
|
||||
if config_data is None:
|
||||
config_data = read_receptor_config()
|
||||
receptor_sockfile = get_receptor_sockfile(config_data)
|
||||
try:
|
||||
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
|
||||
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True))
|
||||
except RuntimeError:
|
||||
return ReceptorControl(receptor_sockfile)
|
||||
|
||||
@@ -159,15 +165,18 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
"""
|
||||
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
|
||||
"""
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
config_data = read_receptor_config()
|
||||
receptor_ctl = get_receptor_ctl(config_data)
|
||||
|
||||
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
|
||||
kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
|
||||
kwargs.setdefault('tlsclient', get_tls_client(config_data, use_stream_tls))
|
||||
kwargs.setdefault('ttl', '20s')
|
||||
kwargs.setdefault('payload', '')
|
||||
if work_signing_enabled(config_data):
|
||||
kwargs['signwork'] = True
|
||||
|
||||
transmit_start = time.time()
|
||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs)
|
||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, **kwargs)
|
||||
|
||||
unit_id = result['unitid']
|
||||
run_start = time.time()
|
||||
@@ -208,7 +217,10 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
if state_name.lower() == 'failed':
|
||||
work_detail = status.get('Detail', '')
|
||||
if work_detail:
|
||||
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
|
||||
if stdout:
|
||||
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}\nstdout:\n{stdout}')
|
||||
else:
|
||||
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
|
||||
else:
|
||||
raise RemoteJobError(f'Unknown ansible-runner error on node {node}, stdout:\n{stdout}')
|
||||
|
||||
@@ -299,7 +311,8 @@ class AWXReceptorJob:
|
||||
|
||||
def run(self):
|
||||
# We establish a connection to the Receptor socket
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
self.config_data = read_receptor_config()
|
||||
receptor_ctl = get_receptor_ctl(self.config_data)
|
||||
|
||||
res = None
|
||||
try:
|
||||
@@ -324,7 +337,7 @@ class AWXReceptorJob:
|
||||
if self.work_type == 'ansible-runner':
|
||||
work_submit_kw['node'] = self.task.instance.execution_node
|
||||
use_stream_tls = get_conn_type(work_submit_kw['node'], receptor_ctl).name == "STREAMTLS"
|
||||
work_submit_kw['tlsclient'] = get_tls_client(use_stream_tls)
|
||||
work_submit_kw['tlsclient'] = get_tls_client(self.config_data, use_stream_tls)
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
transmitter_future = executor.submit(self.transmit, sockin)
|
||||
@@ -398,9 +411,11 @@ class AWXReceptorJob:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
state_name = unit_status.get('StateName', None)
|
||||
stdout_size = unit_status.get('StdoutSize', 0)
|
||||
except Exception:
|
||||
detail = ''
|
||||
state_name = ''
|
||||
stdout_size = 0
|
||||
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
@@ -411,9 +426,16 @@ class AWXReceptorJob:
|
||||
return
|
||||
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
receptor_output = ''
|
||||
if state_name == 'Failed' and self.task.runner_callback.event_ct == 0:
|
||||
# if receptor work unit failed and no events were emitted, work results may
|
||||
# contain useful information about why the job failed. In case stdout is
|
||||
# massive, only ask for last 1000 bytes
|
||||
startpos = max(stdout_size - 1000, 0)
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
||||
lines = resultfile.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
elif detail:
|
||||
@@ -474,7 +496,9 @@ class AWXReceptorJob:
|
||||
|
||||
@property
|
||||
def sign_work(self):
|
||||
return True if self.work_type in ('ansible-runner', 'local') else False
|
||||
if self.work_type in ('ansible-runner', 'local'):
|
||||
return work_signing_enabled(self.config_data)
|
||||
return False
|
||||
|
||||
@property
|
||||
def work_type(self):
|
||||
|
||||
@@ -52,6 +52,7 @@ from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
ScheduleWorkflowManager,
|
||||
@@ -720,45 +721,43 @@ def handle_work_success(task_actual):
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
first_instance = None
|
||||
first_instance_type = ''
|
||||
if subtasks is not None:
|
||||
for each_task in subtasks:
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
|
||||
if not instance:
|
||||
# Unknown task type
|
||||
logger.warning("Unknown task type: {}".format(each_task['type']))
|
||||
continue
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
|
||||
continue
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
if first_instance is None:
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly
|
||||
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}')
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status in ('successful', 'failed'):
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
first_instance_type,
|
||||
first_instance.name,
|
||||
first_instance.id,
|
||||
)
|
||||
instance.save()
|
||||
instance.websocket_emit_status("failed")
|
||||
deps_of_deps = {}
|
||||
|
||||
for subtask in subtasks:
|
||||
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'):
|
||||
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity
|
||||
blame_job = deps_of_deps.get(subtask.id, instance)
|
||||
subtask.status = 'failed'
|
||||
subtask.failed = True
|
||||
if not subtask.job_explanation:
|
||||
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(blame_job)),
|
||||
blame_job.name,
|
||||
blame_job.id,
|
||||
)
|
||||
subtask.save()
|
||||
subtask.websocket_emit_status("failed")
|
||||
|
||||
for sub_subtask in subtask.get_jobs_fail_chain():
|
||||
deps_of_deps[sub_subtask.id] = subtask
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
if first_instance:
|
||||
schedule_manager_success_or_error(first_instance)
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
|
||||
@@ -3,5 +3,6 @@
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo"
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"AWS_SESSION_TOKEN": "fooo"
|
||||
}
|
||||
@@ -7,7 +7,7 @@ from awx.main.models.ha import Instance
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
||||
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, node_type='execution', memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -171,13 +171,17 @@ class TestKeyRegeneration:
|
||||
|
||||
def test_use_custom_key_with_empty_tower_secret_key_env_var(self):
|
||||
os.environ['TOWER_SECRET_KEY'] = ''
|
||||
new_key = call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert settings.SECRET_KEY != new_key
|
||||
with pytest.raises(SystemExit) as e:
|
||||
call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert e.type == SystemExit
|
||||
assert e.value.code == 1
|
||||
|
||||
def test_use_custom_key_with_no_tower_secret_key_env_var(self):
|
||||
os.environ.pop('TOWER_SECRET_KEY', None)
|
||||
new_key = call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert settings.SECRET_KEY != new_key
|
||||
with pytest.raises(SystemExit) as e:
|
||||
call_command('regenerate_secret_key', '--use-custom-key')
|
||||
assert e.type == SystemExit
|
||||
assert e.value.code == 1
|
||||
|
||||
def test_with_tower_secret_key_env_var(self):
|
||||
custom_key = 'MXSq9uqcwezBOChl/UfmbW1k4op+bC+FQtwPqgJ1u9XV'
|
||||
|
||||
@@ -4,7 +4,7 @@ from awx.main.models import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
|
||||
|
||||
class TestInstanceGroupInstanceMapping(TransactionTestCase):
|
||||
@@ -23,11 +23,10 @@ class TestInstanceGroupInstanceMapping(TransactionTestCase):
|
||||
def test_mapping(self):
|
||||
self.sample_cluster()
|
||||
with self.assertNumQueries(3):
|
||||
instances = TaskManagerInstances([]) # empty task list
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances)
|
||||
instance_groups = TaskManagerInstanceGroups()
|
||||
|
||||
ig_instance_map = instance_groups.instance_groups
|
||||
|
||||
assert set(i.hostname for i in ig_instance_map['ig_small']['instances']) == set(['i1'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_large']['instances']) == set(['i2', 'i3'])
|
||||
assert set(i.hostname for i in ig_instance_map['default']['instances']) == set(['i2'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_small'].instances) == set(['i1'])
|
||||
assert set(i.hostname for i in ig_instance_map['ig_large'].instances) == set(['i2', 'i3'])
|
||||
assert set(i.hostname for i in ig_instance_map['default'].instances) == set(['i2'])
|
||||
|
||||
@@ -10,6 +10,10 @@ from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
from awx.main.scheduler import TaskManager
|
||||
|
||||
from . import create_job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def containerized_job(default_instance_group, kube_credential, job_template_factory):
|
||||
@@ -34,6 +38,50 @@ def test_containerized_job(containerized_job):
|
||||
assert containerized_job.instance_group.credential.kubernetes
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_concurrent_jobs_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
|
||||
"""Construct a scenario where only 1 job will fit within the max_concurrent_jobs of the container group.
|
||||
|
||||
Since max_concurrent_jobs is set to 1, even though 2 jobs are in pending
|
||||
and would be launched into the container group, only one will be started.
|
||||
"""
|
||||
containerized_job.unified_job_template.allow_simultaneous = True
|
||||
containerized_job.unified_job_template.save()
|
||||
default_instance_group = containerized_job.instance_group
|
||||
default_instance_group.max_concurrent_jobs = 1
|
||||
default_instance_group.save()
|
||||
task_impact = 1
|
||||
# Create a second job that should not be scheduled at first, blocked by the other
|
||||
create_job(containerized_job.unified_job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_forks_blocks_start_of_new_jobs(controlplane_instance_group, containerized_job, mocker):
|
||||
"""Construct a scenario where only 1 job will fit within the max_forks of the container group.
|
||||
|
||||
In this case, we set the container_group max_forks to 10, and make the task_impact of a job 6.
|
||||
Therefore, only 1 job will fit within the max of 10.
|
||||
"""
|
||||
containerized_job.unified_job_template.allow_simultaneous = True
|
||||
containerized_job.unified_job_template.save()
|
||||
default_instance_group = containerized_job.instance_group
|
||||
default_instance_group.max_forks = 10
|
||||
# Create a second job that should not be scheduled
|
||||
create_job(containerized_job.unified_job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 6
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm.schedule()
|
||||
tm.start_task.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_kubectl_ssl_verification(containerized_job, default_job_execution_environment):
|
||||
containerized_job.execution_environment = default_job_execution_environment
|
||||
|
||||
@@ -23,7 +23,7 @@ def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_gr
|
||||
mock_task_impact.return_value = 500
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j2, ig2, [], i2)])
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j2, ig2, i2)])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -54,7 +54,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
|
||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, controlplane_instance_group.instances.all()[0])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
pu.save()
|
||||
@@ -62,8 +62,8 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
|
||||
TaskManager.start_task.assert_any_call(j2, ig2, [], i2)
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, i1)
|
||||
TaskManager.start_task.assert_any_call(j2, ig2, i2)
|
||||
assert TaskManager.start_task.call_count == 2
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlpla
|
||||
wfj.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, [], None)
|
||||
TaskManager.start_task.assert_called_once_with(wfj, None, None)
|
||||
assert wfj.instance_group is None
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ def test_failover_group_run(instance_factory, controlplane_instance_group, mocke
|
||||
mock_task_impact.return_value = 500
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j1_1, ig2, [], i2)])
|
||||
mock_job.assert_has_calls([mock.call(j1, ig1, i1), mock.call(j1_1, ig2, i2)])
|
||||
assert mock_job.call_count == 2
|
||||
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_grou
|
||||
j = create_job(objects.job_template)
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -240,12 +240,82 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
|
||||
mock_task_impact.return_value = 505
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j1, controlplane_instance_group, instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j2, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_concurrent_jobs_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
"""When max_concurrent_jobs of an instance group is more restrictive than capacity of instances, enforce max_concurrent_jobs."""
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
# We will expect only 1 job to be started
|
||||
controlplane_instance_group.max_concurrent_jobs = 1
|
||||
controlplane_instance_group.save()
|
||||
num_jobs = 3
|
||||
jobs = []
|
||||
for i in range(num_jobs):
|
||||
jobs.append(
|
||||
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
|
||||
)
|
||||
tm = TaskManager()
|
||||
task_impact = 1
|
||||
|
||||
# Sanity check that multiple jobs would run if not for the max_concurrent_jobs setting.
|
||||
assert task_impact * num_jobs < controlplane_instance_group.capacity
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
jobs[0].status = 'running'
|
||||
jobs[0].controller_node = instance.hostname
|
||||
jobs[0].execution_node = instance.hostname
|
||||
jobs[0].instance_group = controlplane_instance_group
|
||||
jobs[0].save()
|
||||
|
||||
# while that job is running, we should not start another job
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_not_called()
|
||||
# now job is done, we should start one of the two other jobs
|
||||
jobs[0].status = 'successful'
|
||||
jobs[0].save()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_max_forks_ig_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
"""When max_forks of an instance group is less than the capacity of instances, enforce max_forks."""
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
controlplane_instance_group.max_forks = 15
|
||||
controlplane_instance_group.save()
|
||||
task_impact = 10
|
||||
num_jobs = 2
|
||||
# Sanity check that 2 jobs would run if not for the max_forks setting.
|
||||
assert controlplane_instance_group.max_forks < controlplane_instance_group.capacity
|
||||
assert task_impact * num_jobs > controlplane_instance_group.max_forks
|
||||
assert task_impact * num_jobs < controlplane_instance_group.capacity
|
||||
for i in range(num_jobs):
|
||||
create_job(job_template_factory(f'jt{i}', organization=f'org{i}', project=f'proj{i}', inventory=f'inv{i}', credential=f'cred{i}').job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = task_impact
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -267,12 +337,12 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
assert len(pu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
||||
pu[0].status = "successful"
|
||||
pu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -295,12 +365,12 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -342,7 +412,7 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
||||
mock_iu.assert_not_called()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -372,9 +442,7 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(iu, controlplane_instance_group, [j1, j2], instance), mock.call(pu, controlplane_instance_group, [j1, j2], instance)]
|
||||
)
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)])
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -383,9 +451,7 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||
)
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)])
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(pu) == 1
|
||||
@@ -409,7 +475,7 @@ def test_job_not_blocking_project_update(controlplane_instance_group, job_templa
|
||||
project_update.status = "pending"
|
||||
project_update.save()
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -433,7 +499,7 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
||||
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate
|
||||
from awx.main.models import AdHocCommand, InventoryUpdate, JobTemplate, Job
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||
@@ -15,6 +15,24 @@ def test_default_tower_instance_group(default_instance_group, job_factory):
|
||||
assert default_instance_group in job_factory().preferred_instance_groups
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ('execution', 'control'))
|
||||
@pytest.mark.parametrize('active', (True, False))
|
||||
def test_get_cleanup_task_kwargs_active_jobs(node_type, active):
|
||||
instance = Instance.objects.create(hostname='foobar', node_type=node_type)
|
||||
job_kwargs = dict()
|
||||
job_kwargs['controller_node' if node_type == 'control' else 'execution_node'] = instance.hostname
|
||||
job_kwargs['status'] = 'running' if active else 'successful'
|
||||
|
||||
job = Job.objects.create(**job_kwargs)
|
||||
kwargs = instance.get_cleanup_task_kwargs()
|
||||
|
||||
if active:
|
||||
assert kwargs['exclude_strings'] == [f'awx_{job.pk}_']
|
||||
else:
|
||||
assert 'exclude_strings' not in kwargs
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestPolicyTaskScheduling:
|
||||
"""Tests make assertions about when the policy task gets scheduled"""
|
||||
|
||||
@@ -121,8 +121,8 @@ def test_python_and_js_licenses():
|
||||
return errors
|
||||
|
||||
base_dir = settings.BASE_DIR
|
||||
api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
|
||||
ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
|
||||
api_licenses = index_licenses('%s/../licenses' % base_dir)
|
||||
ui_licenses = index_licenses('%s/../licenses/ui' % base_dir)
|
||||
api_requirements = read_api_requirements('%s/../requirements' % base_dir)
|
||||
ui_requirements = read_ui_requirements('%s/ui' % base_dir)
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@ def test_encrypted_subfields(get, post, user, organization):
|
||||
url = reverse('api:notification_template_detail', kwargs={'pk': response.data['id']})
|
||||
response = get(url, u)
|
||||
assert response.data['notification_configuration']['account_token'] == "$encrypted$"
|
||||
|
||||
with mock.patch.object(notification_template_actual.notification_class, "send_messages", assert_send):
|
||||
notification_template_actual.send("Test", {'body': "Test"})
|
||||
|
||||
@@ -175,3 +176,46 @@ def test_custom_environment_injection(post, user, organization):
|
||||
|
||||
fake_send.side_effect = _send_side_effect
|
||||
template.send('subject', 'message')
|
||||
|
||||
|
||||
def mock_post(*args, **kwargs):
|
||||
class MockGoodResponse:
|
||||
def __init__(self):
|
||||
self.status_code = 200
|
||||
|
||||
class MockRedirectResponse:
|
||||
def __init__(self):
|
||||
self.status_code = 301
|
||||
self.headers = {"Location": "http://goodendpoint"}
|
||||
|
||||
if kwargs['url'] == "http://goodendpoint":
|
||||
return MockGoodResponse()
|
||||
else:
|
||||
return MockRedirectResponse()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('requests.post', side_effect=mock_post)
|
||||
def test_webhook_notification_pointed_to_a_redirect_launch_endpoint(post, admin, organization):
|
||||
|
||||
n1 = NotificationTemplate.objects.create(
|
||||
name="test-webhook",
|
||||
description="test webhook",
|
||||
organization=organization,
|
||||
notification_type="webhook",
|
||||
notification_configuration=dict(
|
||||
url="http://some.fake.url",
|
||||
disable_ssl_verification=True,
|
||||
http_method="POST",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
username=admin.username,
|
||||
password=admin.password,
|
||||
),
|
||||
messages={
|
||||
"success": {"message": "", "body": "{}"},
|
||||
},
|
||||
)
|
||||
|
||||
assert n1.send("", n1.messages.get("success").get("body")) == 1
|
||||
|
||||
@@ -5,8 +5,8 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error
|
||||
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -74,3 +74,17 @@ def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_work_error_nested(project, inventory_source):
|
||||
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
|
||||
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
|
||||
job = Job.objects.create(status='pending')
|
||||
iu.dependent_jobs.add(pu)
|
||||
job.dependent_jobs.add(pu, iu)
|
||||
handle_work_error({'type': 'project_update', 'id': pu.id})
|
||||
iu.refresh_from_db()
|
||||
job.refresh_from_db()
|
||||
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
|
||||
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
from unittest.mock import Mock
|
||||
from decimal import Decimal
|
||||
|
||||
from awx.main.models import InstanceGroup, Instance
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
@pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3])
|
||||
@@ -17,83 +14,6 @@ def test_capacity_adjustment_no_save(capacity_adjustment):
|
||||
assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity))
|
||||
|
||||
|
||||
def T(impact):
|
||||
j = mock.Mock(spec_set=['task_impact', 'capacity_type'])
|
||||
j.task_impact = impact
|
||||
j.capacity_type = 'execution'
|
||||
return j
|
||||
|
||||
|
||||
def Is(param):
|
||||
"""
|
||||
param:
|
||||
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
|
||||
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
|
||||
"""
|
||||
|
||||
instances = []
|
||||
if isinstance(param[0], tuple):
|
||||
for (jobs_running, capacity) in param:
|
||||
inst = Mock()
|
||||
inst.capacity = capacity
|
||||
inst.jobs_running = jobs_running
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
else:
|
||||
for i in param:
|
||||
inst = Mock()
|
||||
inst.remaining_capacity = i
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
return instances
|
||||
|
||||
|
||||
class TestInstanceGroup(object):
|
||||
@pytest.mark.parametrize(
|
||||
'task,instances,instance_fit_index,reason',
|
||||
[
|
||||
(T(100), Is([100]), 0, "Only one, pick it"),
|
||||
(T(100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
|
||||
(T(100), Is([50, 100]), 1, "First instance not as good as second instance"),
|
||||
(T(100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
|
||||
(T(100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
|
||||
],
|
||||
)
|
||||
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
|
||||
InstanceGroup(id=10)
|
||||
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances}})
|
||||
|
||||
instance_picked = tm_igs.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert instance_picked is None, reason
|
||||
else:
|
||||
assert instance_picked == instances[instance_fit_index], reason
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'instances,instance_fit_index,reason',
|
||||
[
|
||||
(Is([(0, 100)]), 0, "One idle instance, pick it"),
|
||||
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
|
||||
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
|
||||
],
|
||||
)
|
||||
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
|
||||
def filter_offline_instances(*args):
|
||||
return filter(lambda i: i.capacity > 0, instances)
|
||||
|
||||
InstanceGroup(id=10)
|
||||
instances_online_only = filter_offline_instances(instances)
|
||||
tm_igs = TaskManagerInstanceGroups(instance_groups={'controlplane': {'instances': instances_online_only}})
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert tm_igs.find_largest_idle_instance('controlplane') is None, reason
|
||||
else:
|
||||
assert tm_igs.find_largest_idle_instance('controlplane') == instances[instance_fit_index], reason
|
||||
|
||||
|
||||
def test_cleanup_params_defaults():
|
||||
inst = Instance(hostname='foobar')
|
||||
assert inst.get_cleanup_task_kwargs(exclude_strings=['awx_423_']) == {'exclude_strings': ['awx_423_'], 'file_pattern': '/tmp/awx_*_*', 'grace_period': 60}
|
||||
|
||||
@@ -36,15 +36,14 @@ def job(mocker, hosts, inventory):
|
||||
|
||||
def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
assert os.path.exists(filepath)
|
||||
with open(filepath, 'r') as f:
|
||||
assert f.read() == json.dumps(host.ansible_facts)
|
||||
assert filepath in modified_times
|
||||
assert os.path.getmtime(filepath) <= last_modified
|
||||
|
||||
|
||||
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
@@ -58,18 +57,16 @@ def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
)
|
||||
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
job.start_job_fact_cache(fact_cache, {}, 0)
|
||||
job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
# a file called "foo" should _not_ be written outside the facts dir
|
||||
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
ansible_facts_new = {"foo": "bar"}
|
||||
filepath = os.path.join(fact_cache, hosts[1].name)
|
||||
@@ -83,23 +80,20 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
host.save.assert_not_called()
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
for h in hosts:
|
||||
filepath = os.path.join(fact_cache, h.name)
|
||||
@@ -109,26 +103,22 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for h in hosts:
|
||||
h.save.assert_not_called()
|
||||
bulk_update.assert_not_called()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
os.remove(os.path.join(fact_cache, hosts[1].name))
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
host.save.assert_not_called()
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == {}
|
||||
hosts[1].save.assert_called_once_with()
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@@ -27,11 +27,12 @@ def test_send_messages_as_POST():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=None,
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -57,11 +58,12 @@ def test_send_messages_as_PUT():
|
||||
]
|
||||
)
|
||||
requests_mock.put.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=None,
|
||||
data=json.dumps({'text': 'test body 2'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -87,11 +89,12 @@ def test_send_messages_with_username():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=('userstring', None),
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -117,11 +120,12 @@ def test_send_messages_with_password():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=(None, 'passwordstring'),
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -147,11 +151,12 @@ def test_send_messages_with_username_and_password():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=('userstring', 'passwordstring'),
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -177,11 +182,12 @@ def test_send_messages_with_no_verify_ssl():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=None,
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={'Content-Type': 'application/json', 'User-Agent': 'AWX 0.0.1.dev (open)'},
|
||||
verify=False,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -207,7 +213,7 @@ def test_send_messages_with_additional_headers():
|
||||
]
|
||||
)
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'http://example.com',
|
||||
url='http://example.com',
|
||||
auth=None,
|
||||
data=json.dumps({'text': 'test body'}, ensure_ascii=False).encode('utf-8'),
|
||||
headers={
|
||||
@@ -217,5 +223,6 @@ def test_send_messages_with_additional_headers():
|
||||
'X-Test-Header2': 'test-content-2',
|
||||
},
|
||||
verify=True,
|
||||
allow_redirects=False,
|
||||
)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
|
||||
|
||||
class FakeMeta(object):
|
||||
@@ -16,38 +16,64 @@ class FakeObject(object):
|
||||
|
||||
|
||||
class Job(FakeObject):
|
||||
task_impact = 43
|
||||
is_container_group_task = False
|
||||
controller_node = ''
|
||||
execution_node = ''
|
||||
def __init__(self, **kwargs):
|
||||
self.task_impact = kwargs.get('task_impact', 43)
|
||||
self.is_container_group_task = kwargs.get('is_container_group_task', False)
|
||||
self.controller_node = kwargs.get('controller_node', '')
|
||||
self.execution_node = kwargs.get('execution_node', '')
|
||||
self.instance_group = kwargs.get('instance_group', None)
|
||||
self.instance_group_id = self.instance_group.id if self.instance_group else None
|
||||
self.capacity_type = kwargs.get('capacity_type', 'execution')
|
||||
|
||||
def log_format(self):
|
||||
return 'job 382 (fake)'
|
||||
|
||||
|
||||
class Instances(FakeObject):
|
||||
def add(self, *args):
|
||||
for instance in args:
|
||||
self.obj.instance_list.append(instance)
|
||||
|
||||
def all(self):
|
||||
return self.obj.instance_list
|
||||
|
||||
|
||||
class InstanceGroup(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
super(InstanceGroup, self).__init__(**kwargs)
|
||||
self.instance_list = []
|
||||
self.pk = self.id = kwargs.get('id', 1)
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
mgr = Instances(obj=self)
|
||||
return mgr
|
||||
|
||||
@property
|
||||
def is_container_group(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def max_concurrent_jobs(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def max_forks(self):
|
||||
return 0
|
||||
|
||||
|
||||
class Instance(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
self.node_type = kwargs.get('node_type', 'hybrid')
|
||||
self.capacity = kwargs.get('capacity', 0)
|
||||
self.hostname = kwargs.get('hostname', 'fakehostname')
|
||||
self.consumed_capacity = 0
|
||||
self.jobs_running = 0
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_cluster():
|
||||
def stand_up_cluster():
|
||||
class Instances(FakeObject):
|
||||
def add(self, *args):
|
||||
for instance in args:
|
||||
self.obj.instance_list.append(instance)
|
||||
|
||||
def all(self):
|
||||
return self.obj.instance_list
|
||||
|
||||
class InstanceGroup(FakeObject):
|
||||
def __init__(self, **kwargs):
|
||||
super(InstanceGroup, self).__init__(**kwargs)
|
||||
self.instance_list = []
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
mgr = Instances(obj=self)
|
||||
return mgr
|
||||
|
||||
class Instance(FakeObject):
|
||||
pass
|
||||
|
||||
ig_small = InstanceGroup(name='ig_small')
|
||||
ig_large = InstanceGroup(name='ig_large')
|
||||
@@ -66,14 +92,12 @@ def sample_cluster():
|
||||
@pytest.fixture
|
||||
def create_ig_manager():
|
||||
def _rf(ig_list, tasks):
|
||||
instances = TaskManagerInstances(tasks, instances=set(inst for ig in ig_list for inst in ig.instance_list))
|
||||
|
||||
seed_igs = {}
|
||||
for ig in ig_list:
|
||||
seed_igs[ig.name] = {'instances': [instances[inst.hostname] for inst in ig.instance_list]}
|
||||
|
||||
instance_groups = TaskManagerInstanceGroups(instance_groups=seed_igs)
|
||||
return instance_groups
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
tasks=tasks,
|
||||
instances=set(inst for ig in ig_list for inst in ig.instance_list),
|
||||
instance_groups=ig_list,
|
||||
)
|
||||
return tm_models.instance_groups
|
||||
|
||||
return _rf
|
||||
|
||||
@@ -126,3 +150,75 @@ def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):
|
||||
# Cross-links between groups not visible to current user,
|
||||
# so a naieve accounting of capacities is returned instead
|
||||
assert instance_groups_mgr.get_consumed_capacity('default') == 43
|
||||
|
||||
|
||||
def Is(param):
|
||||
"""
|
||||
param:
|
||||
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
|
||||
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
|
||||
"""
|
||||
|
||||
instances = []
|
||||
if isinstance(param[0], tuple):
|
||||
for index, (jobs_running, capacity) in enumerate(param):
|
||||
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
|
||||
inst.jobs_running = jobs_running
|
||||
instances.append(inst)
|
||||
else:
|
||||
for index, capacity in enumerate(param):
|
||||
inst = Instance(capacity=capacity, node_type='execution', hostname=f'fakehost-{index}')
|
||||
inst.node_type = 'execution'
|
||||
instances.append(inst)
|
||||
return instances
|
||||
|
||||
|
||||
class TestSelectBestInstanceForTask(object):
|
||||
@pytest.mark.parametrize(
|
||||
'task,instances,instance_fit_index,reason',
|
||||
[
|
||||
(Job(task_impact=100), Is([100]), 0, "Only one, pick it"),
|
||||
(Job(task_impact=100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
|
||||
(Job(task_impact=100), Is([50, 100]), 1, "First instance not as good as second instance"),
|
||||
(Job(task_impact=100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
|
||||
(Job(task_impact=100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
|
||||
],
|
||||
)
|
||||
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
|
||||
ig = InstanceGroup(id=10, name='controlplane')
|
||||
tasks = []
|
||||
for instance in instances:
|
||||
ig.instances.add(instance)
|
||||
for _ in range(instance.jobs_running):
|
||||
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
|
||||
instance_picked = tm_models.instance_groups.fit_task_to_most_remaining_capacity_instance(task, 'controlplane')
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert instance_picked is None, reason
|
||||
else:
|
||||
assert instance_picked.hostname == instances[instance_fit_index].hostname, reason
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'instances,instance_fit_index,reason',
|
||||
[
|
||||
(Is([(0, 100)]), 0, "One idle instance, pick it"),
|
||||
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
|
||||
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
|
||||
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
|
||||
],
|
||||
)
|
||||
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
|
||||
ig = InstanceGroup(id=10, name='controlplane')
|
||||
tasks = []
|
||||
for instance in instances:
|
||||
ig.instances.add(instance)
|
||||
for _ in range(instance.jobs_running):
|
||||
tasks.append(Job(execution_node=instance.hostname, controller_node=instance.hostname, instance_group=ig))
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(tasks=tasks, instances=instances, instance_groups=[ig])
|
||||
|
||||
if instance_fit_index is None:
|
||||
assert tm_models.instance_groups.find_largest_idle_instance('controlplane') is None, reason
|
||||
else:
|
||||
assert tm_models.instance_groups.find_largest_idle_instance('controlplane').hostname == instances[instance_fit_index].hostname, reason
|
||||
|
||||
@@ -11,11 +11,12 @@ import os
|
||||
import subprocess
|
||||
import re
|
||||
import stat
|
||||
import sys
|
||||
import urllib.parse
|
||||
import threading
|
||||
import contextlib
|
||||
import tempfile
|
||||
from functools import reduce, wraps
|
||||
import functools
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||
@@ -73,6 +74,7 @@ __all__ = [
|
||||
'NullablePromptPseudoField',
|
||||
'model_instance_diff',
|
||||
'parse_yaml_or_json',
|
||||
'is_testing',
|
||||
'RequireDebugTrueOrTest',
|
||||
'has_model_field_prefetched',
|
||||
'set_environ',
|
||||
@@ -88,6 +90,7 @@ __all__ = [
|
||||
'deepmerge',
|
||||
'get_event_partition_epoch',
|
||||
'cleanup_new_process',
|
||||
'log_excess_runtime',
|
||||
]
|
||||
|
||||
|
||||
@@ -144,6 +147,19 @@ def underscore_to_camelcase(s):
|
||||
return ''.join(x.capitalize() or '_' for x in s.split('_'))
|
||||
|
||||
|
||||
@functools.cache
|
||||
def is_testing(argv=None):
|
||||
'''Return True if running django or py.test unit tests.'''
|
||||
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
|
||||
return True
|
||||
argv = sys.argv if argv is None else argv
|
||||
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
|
||||
return True
|
||||
elif len(argv) >= 2 and argv[1] == 'test':
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class RequireDebugTrueOrTest(logging.Filter):
|
||||
"""
|
||||
Logging filter to output when in DEBUG mode or running tests.
|
||||
@@ -152,7 +168,7 @@ class RequireDebugTrueOrTest(logging.Filter):
|
||||
def filter(self, record):
|
||||
from django.conf import settings
|
||||
|
||||
return settings.DEBUG or settings.IS_TESTING()
|
||||
return settings.DEBUG or is_testing()
|
||||
|
||||
|
||||
class IllegalArgumentError(ValueError):
|
||||
@@ -174,7 +190,7 @@ def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
|
||||
cache = cache or get_memoize_cache()
|
||||
|
||||
def memoize_decorator(f):
|
||||
@wraps(f)
|
||||
@functools.wraps(f)
|
||||
def _memoizer(*args, **kwargs):
|
||||
if track_function:
|
||||
cache_dict_key = slugify('%r %r' % (args, kwargs))
|
||||
@@ -992,7 +1008,7 @@ def getattrd(obj, name, default=NoDefaultProvided):
|
||||
"""
|
||||
|
||||
try:
|
||||
return reduce(getattr, name.split("."), obj)
|
||||
return functools.reduce(getattr, name.split("."), obj)
|
||||
except AttributeError:
|
||||
if default != NoDefaultProvided:
|
||||
return default
|
||||
@@ -1188,7 +1204,7 @@ def cleanup_new_process(func):
|
||||
Cleanup django connection, cache connection, before executing new thread or processes entry point, func.
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
@functools.wraps(func)
|
||||
def wrapper_cleanup_new_process(*args, **kwargs):
|
||||
from awx.conf.settings import SettingsWrapper # noqa
|
||||
|
||||
@@ -1200,15 +1216,30 @@ def cleanup_new_process(func):
|
||||
return wrapper_cleanup_new_process
|
||||
|
||||
|
||||
def log_excess_runtime(func_logger, cutoff=5.0):
|
||||
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False):
|
||||
def log_excess_runtime_decorator(func):
|
||||
@wraps(func)
|
||||
@functools.wraps(func)
|
||||
def _new_func(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
return_value = func(*args, **kwargs)
|
||||
delta = time.time() - start_time
|
||||
if delta > cutoff:
|
||||
logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
|
||||
log_data = {'name': repr(func.__name__)}
|
||||
|
||||
if add_log_data:
|
||||
return_value = func(*args, log_data=log_data, **kwargs)
|
||||
else:
|
||||
return_value = func(*args, **kwargs)
|
||||
|
||||
log_data['delta'] = time.time() - start_time
|
||||
if isinstance(return_value, dict):
|
||||
log_data.update(return_value)
|
||||
|
||||
if msg is None:
|
||||
record_msg = 'Running {name} took {delta:.2f}s'
|
||||
else:
|
||||
record_msg = msg
|
||||
if log_data['delta'] > cutoff:
|
||||
func_logger.info(record_msg.format(**log_data))
|
||||
elif log_data['delta'] > debug_cutoff:
|
||||
func_logger.debug(record_msg.format(**log_data))
|
||||
return return_value
|
||||
|
||||
return _new_func
|
||||
|
||||
@@ -110,7 +110,7 @@ if settings.COLOR_LOGS is True:
|
||||
# logs rendered with cyan text
|
||||
previous_level_map = self.level_map.copy()
|
||||
if record.name == "awx.analytics.job_lifecycle":
|
||||
self.level_map[logging.DEBUG] = (None, 'cyan', True)
|
||||
self.level_map[logging.INFO] = (None, 'cyan', True)
|
||||
msg = super(ColorHandler, self).colorize(line, record)
|
||||
self.level_map = previous_level_map
|
||||
return msg
|
||||
|
||||
@@ -118,7 +118,7 @@ class WebsocketTask:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
logger.exception(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warning(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
|
||||
@@ -10,28 +10,6 @@ import socket
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
|
||||
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def is_testing(argv=None):
|
||||
import sys
|
||||
|
||||
'''Return True if running django or py.test unit tests.'''
|
||||
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
|
||||
return True
|
||||
argv = sys.argv if argv is None else argv
|
||||
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
|
||||
return True
|
||||
elif len(argv) >= 2 and argv[1] == 'test':
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def IS_TESTING(argv=None):
|
||||
return is_testing(argv)
|
||||
|
||||
|
||||
if "pytest" in sys.modules:
|
||||
from unittest import mock
|
||||
|
||||
@@ -40,9 +18,13 @@ if "pytest" in sys.modules:
|
||||
else:
|
||||
import ldap
|
||||
|
||||
|
||||
DEBUG = True
|
||||
SQL_DEBUG = DEBUG
|
||||
|
||||
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
|
||||
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
|
||||
|
||||
# FIXME: it would be nice to cycle back around and allow this to be
|
||||
# BigAutoField going forward, but we'd have to be explicit about our
|
||||
# existing models.
|
||||
@@ -101,7 +83,7 @@ USE_L10N = True
|
||||
|
||||
USE_TZ = True
|
||||
|
||||
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.join(BASE_DIR, 'static'))
|
||||
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.join(BASE_DIR, 'static')]
|
||||
|
||||
# Absolute filesystem path to the directory where static file are collected via
|
||||
# the collectstatic command.
|
||||
@@ -254,6 +236,14 @@ START_TASK_LIMIT = 100
|
||||
TASK_MANAGER_TIMEOUT = 300
|
||||
TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60
|
||||
|
||||
# Number of seconds _in addition to_ the task manager timeout a job can stay
|
||||
# in waiting without being reaped
|
||||
JOB_WAITING_GRACE_PERIOD = 60
|
||||
|
||||
# Number of seconds after a container group job finished time to wait
|
||||
# before the awx_k8s_reaper task will tear down the pods
|
||||
K8S_POD_REAPER_GRACE_PERIOD = 60
|
||||
|
||||
# Disallow sending session cookies over insecure connections
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
@@ -314,11 +304,13 @@ INSTALLED_APPS = [
|
||||
'django.contrib.messages',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.sites',
|
||||
# daphne has to be installed before django.contrib.staticfiles for the app to startup
|
||||
# According to channels 4.0 docs you install daphne instead of channels now
|
||||
'daphne',
|
||||
'django.contrib.staticfiles',
|
||||
'oauth2_provider',
|
||||
'rest_framework',
|
||||
'django_extensions',
|
||||
'channels',
|
||||
'polymorphic',
|
||||
'taggit',
|
||||
'social_django',
|
||||
@@ -861,6 +853,7 @@ LOGGING = {
|
||||
'awx.main.signals': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.api.permissions': {'level': 'INFO'}, # very verbose debug-level logs
|
||||
'awx.analytics': {'handlers': ['external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.broadcast_websocket': {'handlers': ['console', 'file', 'wsbroadcast', 'external_logger'], 'level': 'INFO', 'propagate': False},
|
||||
'awx.analytics.performance': {'handlers': ['console', 'file', 'tower_warnings', 'external_logger'], 'level': 'DEBUG', 'propagate': False},
|
||||
'awx.analytics.job_lifecycle': {'handlers': ['console', 'job_lifecycle'], 'level': 'DEBUG', 'propagate': False},
|
||||
'django_auth_ldap': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG'},
|
||||
@@ -993,6 +986,13 @@ DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
|
||||
DEFAULT_EXECUTION_QUEUE_NAME = 'default'
|
||||
# pod spec used when the default execution queue is a container group, e.g. when deploying on k8s/ocp with the operator
|
||||
DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE = ''
|
||||
# Max number of concurrently consumed forks for the default execution queue
|
||||
# Zero means no limit
|
||||
DEFAULT_EXECUTION_QUEUE_MAX_FORKS = 0
|
||||
# Max number of concurrently running jobs for the default execution queue
|
||||
# Zero means no limit
|
||||
DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS = 0
|
||||
|
||||
# Name of the default controlplane queue
|
||||
DEFAULT_CONTROL_PLANE_QUEUE_NAME = 'controlplane'
|
||||
|
||||
@@ -1004,16 +1004,5 @@ DEFAULT_CONTAINER_RUN_OPTIONS = ['--network', 'slirp4netns:enable_ipv6=true']
|
||||
# Mount exposed paths as hostPath resource in k8s/ocp
|
||||
AWX_MOUNT_ISOLATED_PATHS_ON_K8S = False
|
||||
|
||||
# Time out task managers if they take longer than this many seconds
|
||||
TASK_MANAGER_TIMEOUT = 300
|
||||
|
||||
# Number of seconds _in addition to_ the task manager timeout a job can stay
|
||||
# in waiting without being reaped
|
||||
JOB_WAITING_GRACE_PERIOD = 60
|
||||
|
||||
# Number of seconds after a container group job finished time to wait
|
||||
# before the awx_k8s_reaper task will tear down the pods
|
||||
K8S_POD_REAPER_GRACE_PERIOD = 60
|
||||
|
||||
# This is overridden downstream via /etc/tower/conf.d/cluster_host_id.py
|
||||
CLUSTER_HOST_ID = socket.gethostname()
|
||||
|
||||
@@ -114,7 +114,7 @@ if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
|
||||
# this needs to stay at the bottom of this file
|
||||
try:
|
||||
if os.getenv('AWX_KUBE_DEVEL', False):
|
||||
include(optional('minikube.py'), scope=locals())
|
||||
include(optional('development_kube.py'), scope=locals())
|
||||
else:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
except ImportError:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
|
||||
BROADCAST_WEBSOCKET_PORT = 8013
|
||||
BROADCAST_WEBSOCKET_PORT = 8052
|
||||
BROADCAST_WEBSOCKET_VERIFY_CERT = False
|
||||
BROADCAST_WEBSOCKET_PROTOCOL = 'http'
|
||||
115
awx/ui/package-lock.json
generated
115
awx/ui/package-lock.json
generated
@@ -7,9 +7,9 @@
|
||||
"name": "ui",
|
||||
"dependencies": {
|
||||
"@lingui/react": "3.14.0",
|
||||
"@patternfly/patternfly": "4.210.2",
|
||||
"@patternfly/react-core": "^4.239.0",
|
||||
"@patternfly/react-icons": "4.90.0",
|
||||
"@patternfly/patternfly": "4.217.1",
|
||||
"@patternfly/react-core": "^4.264.0",
|
||||
"@patternfly/react-icons": "4.92.10",
|
||||
"@patternfly/react-table": "4.108.0",
|
||||
"ace-builds": "^1.10.1",
|
||||
"ansi-to-html": "0.7.2",
|
||||
@@ -22,7 +22,7 @@
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^3.0.3",
|
||||
"luxon": "^3.1.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
@@ -3747,26 +3747,35 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@patternfly/patternfly": {
|
||||
"version": "4.210.2",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.210.2.tgz",
|
||||
"integrity": "sha512-aZiW24Bxi6uVmk5RyNTp+6q6ThtlJZotNRJfWVeGuwu1UlbBuV4DFa1bpjA6jfTZpfEpX2YL5+R+4ZVSCFAVdw=="
|
||||
"version": "4.217.1",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.217.1.tgz",
|
||||
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw=="
|
||||
},
|
||||
"node_modules/@patternfly/react-core": {
|
||||
"version": "4.239.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.239.0.tgz",
|
||||
"integrity": "sha512-6CmYABCJLUXTlzCk6C3WouMNZpS0BCT+aHU8CvYpFQ/NrpYp3MJaDsYbqgCRWV42rmIO5iXun/4WhXBJzJEoQg==",
|
||||
"version": "4.264.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz",
|
||||
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==",
|
||||
"dependencies": {
|
||||
"@patternfly/react-icons": "^4.90.0",
|
||||
"@patternfly/react-styles": "^4.89.0",
|
||||
"@patternfly/react-tokens": "^4.91.0",
|
||||
"@patternfly/react-icons": "^4.93.0",
|
||||
"@patternfly/react-styles": "^4.92.0",
|
||||
"@patternfly/react-tokens": "^4.94.0",
|
||||
"focus-trap": "6.9.2",
|
||||
"react-dropzone": "9.0.0",
|
||||
"tippy.js": "5.1.2",
|
||||
"tslib": "^2.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^16.8.0 || ^17.0.0",
|
||||
"react-dom": "^16.8.0 || ^17.0.0"
|
||||
"react": "^16.8 || ^17 || ^18",
|
||||
"react-dom": "^16.8 || ^17 || ^18"
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-core/node_modules/@patternfly/react-icons": {
|
||||
"version": "4.93.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
|
||||
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
|
||||
"peerDependencies": {
|
||||
"react": "^16.8 || ^17 || ^18",
|
||||
"react-dom": "^16.8 || ^17 || ^18"
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-core/node_modules/tslib": {
|
||||
@@ -3775,18 +3784,18 @@
|
||||
"integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw=="
|
||||
},
|
||||
"node_modules/@patternfly/react-icons": {
|
||||
"version": "4.90.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.90.0.tgz",
|
||||
"integrity": "sha512-qEnQKbxbUgyiosiKSkeKEBwmhgJwWEqniIAFyoxj+kpzAdeu7ueWe5iBbqo06mvDOedecFiM5mIE1N0MXwk8Yw==",
|
||||
"version": "4.92.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.92.10.tgz",
|
||||
"integrity": "sha512-vwCy7b+OyyuvLDSLqLUG2DkJZgMDogjld8tJTdAaG8HiEhC1sJPZac+5wD7AuS3ym/sQolS4vYtNiVDnMEORxA==",
|
||||
"peerDependencies": {
|
||||
"react": "^16.8.0 || ^17.0.0",
|
||||
"react-dom": "^16.8.0 || ^17.0.0"
|
||||
"react": "^16.8 || ^17 || ^18",
|
||||
"react-dom": "^16.8 || ^17 || ^18"
|
||||
}
|
||||
},
|
||||
"node_modules/@patternfly/react-styles": {
|
||||
"version": "4.89.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.89.0.tgz",
|
||||
"integrity": "sha512-SkT+qx3Xqu70T5s+i/AUT2hI2sKAPDX4ffeiJIUDu/oyWiFdk+/9DEivnLSyJMruroXXN33zKibvzb5rH7DKTQ=="
|
||||
"version": "4.92.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz",
|
||||
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA=="
|
||||
},
|
||||
"node_modules/@patternfly/react-table": {
|
||||
"version": "4.108.0",
|
||||
@@ -3811,9 +3820,9 @@
|
||||
"integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
|
||||
},
|
||||
"node_modules/@patternfly/react-tokens": {
|
||||
"version": "4.91.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.91.0.tgz",
|
||||
"integrity": "sha512-QeQCy8o8E/16fAr8mxqXIYRmpTsjCHJXi5p5jmgEDFmYMesN6Pqfv6N5D0FHb+CIaNOZWRps7GkWvlIMIE81sw=="
|
||||
"version": "4.94.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz",
|
||||
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw=="
|
||||
},
|
||||
"node_modules/@pmmmwh/react-refresh-webpack-plugin": {
|
||||
"version": "0.5.4",
|
||||
@@ -15468,9 +15477,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/luxon": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
|
||||
"integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w==",
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz",
|
||||
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
@@ -25089,24 +25098,30 @@
|
||||
"dev": true
|
||||
},
|
||||
"@patternfly/patternfly": {
|
||||
"version": "4.210.2",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.210.2.tgz",
|
||||
"integrity": "sha512-aZiW24Bxi6uVmk5RyNTp+6q6ThtlJZotNRJfWVeGuwu1UlbBuV4DFa1bpjA6jfTZpfEpX2YL5+R+4ZVSCFAVdw=="
|
||||
"version": "4.217.1",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/patternfly/-/patternfly-4.217.1.tgz",
|
||||
"integrity": "sha512-uN7JgfQsyR16YHkuGRCTIcBcnyKIqKjGkB2SGk9x1XXH3yYGenL83kpAavX9Xtozqp17KppOlybJuzcKvZMrgw=="
|
||||
},
|
||||
"@patternfly/react-core": {
|
||||
"version": "4.239.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.239.0.tgz",
|
||||
"integrity": "sha512-6CmYABCJLUXTlzCk6C3WouMNZpS0BCT+aHU8CvYpFQ/NrpYp3MJaDsYbqgCRWV42rmIO5iXun/4WhXBJzJEoQg==",
|
||||
"version": "4.264.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-core/-/react-core-4.264.0.tgz",
|
||||
"integrity": "sha512-tK0BMWxw8nhukev40HZ6q6d02pDnjX7oyA91vHa18aakJUKBWMaerqpG4NZVMoh0tPKX3aLNj+zyCwDALFAZZw==",
|
||||
"requires": {
|
||||
"@patternfly/react-icons": "^4.90.0",
|
||||
"@patternfly/react-styles": "^4.89.0",
|
||||
"@patternfly/react-tokens": "^4.91.0",
|
||||
"@patternfly/react-icons": "^4.93.0",
|
||||
"@patternfly/react-styles": "^4.92.0",
|
||||
"@patternfly/react-tokens": "^4.94.0",
|
||||
"focus-trap": "6.9.2",
|
||||
"react-dropzone": "9.0.0",
|
||||
"tippy.js": "5.1.2",
|
||||
"tslib": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@patternfly/react-icons": {
|
||||
"version": "4.93.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.93.0.tgz",
|
||||
"integrity": "sha512-OH0vORVioL+HLWMEog8/3u8jsiMCeJ0pFpvRKRhy5Uk4CdAe40k1SOBvXJP6opr+O8TLbz0q3bm8Jsh/bPaCuQ==",
|
||||
"requires": {}
|
||||
},
|
||||
"tslib": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
|
||||
@@ -25115,15 +25130,15 @@
|
||||
}
|
||||
},
|
||||
"@patternfly/react-icons": {
|
||||
"version": "4.90.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.90.0.tgz",
|
||||
"integrity": "sha512-qEnQKbxbUgyiosiKSkeKEBwmhgJwWEqniIAFyoxj+kpzAdeu7ueWe5iBbqo06mvDOedecFiM5mIE1N0MXwk8Yw==",
|
||||
"version": "4.92.10",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-icons/-/react-icons-4.92.10.tgz",
|
||||
"integrity": "sha512-vwCy7b+OyyuvLDSLqLUG2DkJZgMDogjld8tJTdAaG8HiEhC1sJPZac+5wD7AuS3ym/sQolS4vYtNiVDnMEORxA==",
|
||||
"requires": {}
|
||||
},
|
||||
"@patternfly/react-styles": {
|
||||
"version": "4.89.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.89.0.tgz",
|
||||
"integrity": "sha512-SkT+qx3Xqu70T5s+i/AUT2hI2sKAPDX4ffeiJIUDu/oyWiFdk+/9DEivnLSyJMruroXXN33zKibvzb5rH7DKTQ=="
|
||||
"version": "4.92.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-styles/-/react-styles-4.92.0.tgz",
|
||||
"integrity": "sha512-B/f6iyu8UEN1+wRxdC4sLIhvJeyL8SqInDXZmwOIqK8uPJ8Lze7qrbVhkkVzbMF37/oDPVa6dZH8qZFq062LEA=="
|
||||
},
|
||||
"@patternfly/react-table": {
|
||||
"version": "4.108.0",
|
||||
@@ -25146,9 +25161,9 @@
|
||||
}
|
||||
},
|
||||
"@patternfly/react-tokens": {
|
||||
"version": "4.91.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.91.0.tgz",
|
||||
"integrity": "sha512-QeQCy8o8E/16fAr8mxqXIYRmpTsjCHJXi5p5jmgEDFmYMesN6Pqfv6N5D0FHb+CIaNOZWRps7GkWvlIMIE81sw=="
|
||||
"version": "4.94.0",
|
||||
"resolved": "https://registry.npmjs.org/@patternfly/react-tokens/-/react-tokens-4.94.0.tgz",
|
||||
"integrity": "sha512-fYXxUJZnzpn89K2zzHF0cSncZZVGKrohdb5f5T1wzxwU2NZPVGpvr88xhm+V2Y/fSrrTPwXcP3IIdtNOOtJdZw=="
|
||||
},
|
||||
"@pmmmwh/react-refresh-webpack-plugin": {
|
||||
"version": "0.5.4",
|
||||
@@ -34210,9 +34225,9 @@
|
||||
}
|
||||
},
|
||||
"luxon": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz",
|
||||
"integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w=="
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.1.1.tgz",
|
||||
"integrity": "sha512-Ah6DloGmvseB/pX1cAmjbFvyU/pKuwQMQqz7d0yvuDlVYLTs2WeDHQMpC8tGjm1da+BriHROW/OEIT/KfYg6xw=="
|
||||
},
|
||||
"lz-string": {
|
||||
"version": "1.4.4",
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@lingui/react": "3.14.0",
|
||||
"@patternfly/patternfly": "4.210.2",
|
||||
"@patternfly/react-core": "^4.239.0",
|
||||
"@patternfly/react-icons": "4.90.0",
|
||||
"@patternfly/patternfly": "4.217.1",
|
||||
"@patternfly/react-core": "^4.264.0",
|
||||
"@patternfly/react-icons": "4.92.10",
|
||||
"@patternfly/react-table": "4.108.0",
|
||||
"ace-builds": "^1.10.1",
|
||||
"ansi-to-html": "0.7.2",
|
||||
@@ -22,7 +22,7 @@
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^3.0.3",
|
||||
"luxon": "^3.1.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
|
||||
@@ -20,6 +20,10 @@ class Hosts extends Base {
|
||||
return this.http.get(`${this.baseUrl}${id}/all_groups/`, { params });
|
||||
}
|
||||
|
||||
readGroups(id, params) {
|
||||
return this.http.get(`${this.baseUrl}${id}/groups/`, { params });
|
||||
}
|
||||
|
||||
readGroupsOptions(id) {
|
||||
return this.http.options(`${this.baseUrl}${id}/groups/`);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
import React from 'react';
|
||||
import { arrayOf, bool, number, shape, string } from 'prop-types';
|
||||
|
||||
import { Label, LabelGroup } from '@patternfly/react-core';
|
||||
import { Link } from 'react-router-dom';
|
||||
|
||||
function InstanceGroupLabels({ labels, isLinkable }) {
|
||||
const buildLinkURL = (isContainerGroup) =>
|
||||
isContainerGroup
|
||||
? '/instance_groups/container_group/'
|
||||
: '/instance_groups/';
|
||||
return (
|
||||
<LabelGroup numLabels={5}>
|
||||
{labels.map(({ id, name, is_container_group }) =>
|
||||
isLinkable ? (
|
||||
<Label
|
||||
color="blue"
|
||||
key={id}
|
||||
render={({ className, content, componentRef }) => (
|
||||
<Link
|
||||
className={className}
|
||||
innerRef={componentRef}
|
||||
to={`${buildLinkURL(is_container_group)}${id}/details`}
|
||||
>
|
||||
{content}
|
||||
</Link>
|
||||
)}
|
||||
>
|
||||
{name}
|
||||
</Label>
|
||||
) : (
|
||||
<Label color="blue" key={id}>
|
||||
{name}
|
||||
</Label>
|
||||
)
|
||||
)}
|
||||
</LabelGroup>
|
||||
);
|
||||
}
|
||||
|
||||
InstanceGroupLabels.propTypes = {
|
||||
labels: arrayOf(shape({ id: number.isRequired, name: string.isRequired }))
|
||||
.isRequired,
|
||||
isLinkable: bool,
|
||||
};
|
||||
|
||||
InstanceGroupLabels.defaultProps = { isLinkable: false };
|
||||
|
||||
export default InstanceGroupLabels;
|
||||
1
awx/ui/src/components/InstanceGroupLabels/index.js
Normal file
1
awx/ui/src/components/InstanceGroupLabels/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { default } from './InstanceGroupLabels';
|
||||
@@ -153,6 +153,10 @@ function CredentialsStep({
|
||||
}))}
|
||||
value={selectedType && selectedType.id}
|
||||
onChange={(e, id) => {
|
||||
// Reset query params when the category of credentials is changed
|
||||
history.replace({
|
||||
search: '',
|
||||
});
|
||||
setSelectedType(types.find((o) => o.id === parseInt(id, 10)));
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -3,6 +3,7 @@ import { act } from 'react-dom/test-utils';
|
||||
import { Formik } from 'formik';
|
||||
import { CredentialsAPI, CredentialTypesAPI } from 'api';
|
||||
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import CredentialsStep from './CredentialsStep';
|
||||
|
||||
jest.mock('../../../api/models/CredentialTypes');
|
||||
@@ -164,6 +165,41 @@ describe('CredentialsStep', () => {
|
||||
});
|
||||
});
|
||||
|
||||
test('should reset query params (credential.page) when selected credential type is changed', async () => {
|
||||
let wrapper;
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['?credential.page=2'],
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<CredentialsStep allowCredentialsWithPasswords />
|
||||
</Formik>,
|
||||
{
|
||||
context: { router: { history } },
|
||||
}
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 1,
|
||||
order_by: 'name',
|
||||
page: 2,
|
||||
page_size: 5,
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
wrapper.find('AnsibleSelect').invoke('onChange')({}, 3);
|
||||
});
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 3,
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
});
|
||||
});
|
||||
|
||||
test("error should be shown when a credential that prompts for passwords is selected on a step that doesn't allow it", async () => {
|
||||
let wrapper;
|
||||
await act(async () => {
|
||||
|
||||
@@ -173,6 +173,10 @@ function MultiCredentialsLookup({
|
||||
}))}
|
||||
value={selectedType && selectedType.id}
|
||||
onChange={(e, id) => {
|
||||
// Reset query params when the category of credentials is changed
|
||||
history.replace({
|
||||
search: '',
|
||||
});
|
||||
setSelectedType(
|
||||
credentialTypes.find((o) => o.id === parseInt(id, 10))
|
||||
);
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
mountWithContexts,
|
||||
waitForElement,
|
||||
} from '../../../testUtils/enzymeHelpers';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import MultiCredentialsLookup from './MultiCredentialsLookup';
|
||||
|
||||
jest.mock('../../api');
|
||||
@@ -228,6 +229,53 @@ describe('<Formik><MultiCredentialsLookup /></Formik>', () => {
|
||||
]);
|
||||
});
|
||||
|
||||
test('should reset query params (credentials.page) when selected credential type is changed', async () => {
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['?credentials.page=2'],
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<MultiCredentialsLookup
|
||||
value={credentials}
|
||||
tooltip="This is credentials look up"
|
||||
onChange={() => {}}
|
||||
onError={() => {}}
|
||||
/>
|
||||
</Formik>,
|
||||
{
|
||||
context: { router: { history } },
|
||||
}
|
||||
);
|
||||
});
|
||||
const searchButton = await waitForElement(
|
||||
wrapper,
|
||||
'Button[aria-label="Search"]'
|
||||
);
|
||||
await act(async () => {
|
||||
searchButton.invoke('onClick')();
|
||||
});
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 400,
|
||||
order_by: 'name',
|
||||
page: 2,
|
||||
page_size: 5,
|
||||
});
|
||||
|
||||
const select = await waitForElement(wrapper, 'AnsibleSelect');
|
||||
await act(async () => {
|
||||
select.invoke('onChange')({}, 500);
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(CredentialsAPI.read).toHaveBeenCalledWith({
|
||||
credential_type: 500,
|
||||
order_by: 'name',
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
});
|
||||
});
|
||||
|
||||
test('should only add 1 credential per credential type except vault(see below)', async () => {
|
||||
const onChange = jest.fn();
|
||||
await act(async () => {
|
||||
|
||||
@@ -6,6 +6,7 @@ import { Link } from 'react-router-dom';
|
||||
import styled from 'styled-components';
|
||||
import { Chip, Divider, Title } from '@patternfly/react-core';
|
||||
import { toTitleCase } from 'util/strings';
|
||||
import InstanceGroupLabels from 'components/InstanceGroupLabels';
|
||||
import CredentialChip from '../CredentialChip';
|
||||
import ChipGroup from '../ChipGroup';
|
||||
import { DetailList, Detail, UserDateDetail } from '../DetailList';
|
||||
@@ -227,21 +228,7 @@ function PromptDetail({
|
||||
label={t`Instance Groups`}
|
||||
rows={4}
|
||||
value={
|
||||
<ChipGroup
|
||||
numChips={5}
|
||||
totalChips={overrides.instance_groups.length}
|
||||
ouiaId="prompt-instance-groups-chips"
|
||||
>
|
||||
{overrides.instance_groups.map((instance_group) => (
|
||||
<Chip
|
||||
key={instance_group.id}
|
||||
ouiaId={`instance-group-${instance_group.id}-chip`}
|
||||
isReadOnly
|
||||
>
|
||||
{instance_group.name}
|
||||
</Chip>
|
||||
))}
|
||||
</ChipGroup>
|
||||
<InstanceGroupLabels labels={overrides.instance_groups} />
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -10,6 +10,7 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api';
|
||||
import { parseVariableField, jsonToYaml } from 'util/yaml';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import InstanceGroupLabels from 'components/InstanceGroupLabels';
|
||||
import parseRuleObj from '../shared/parseRuleObj';
|
||||
import FrequencyDetails from './FrequencyDetails';
|
||||
import AlertModal from '../../AlertModal';
|
||||
@@ -27,11 +28,6 @@ import { VariablesDetail } from '../../CodeEditor';
|
||||
import { VERBOSITY } from '../../VerbositySelectField';
|
||||
import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext';
|
||||
|
||||
const buildLinkURL = (instance) =>
|
||||
instance.is_container_group
|
||||
? '/instance_groups/container_group/'
|
||||
: '/instance_groups/';
|
||||
|
||||
const PromptDivider = styled(Divider)`
|
||||
margin-top: var(--pf-global--spacer--lg);
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
@@ -498,26 +494,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
|
||||
fullWidth
|
||||
label={t`Instance Groups`}
|
||||
value={
|
||||
<ChipGroup
|
||||
numChips={5}
|
||||
totalChips={instanceGroups.length}
|
||||
ouiaId="instance-group-chips"
|
||||
>
|
||||
{instanceGroups.map((ig) => (
|
||||
<Link
|
||||
to={`${buildLinkURL(ig)}${ig.id}/details`}
|
||||
key={ig.id}
|
||||
>
|
||||
<Chip
|
||||
key={ig.id}
|
||||
ouiaId={`instance-group-${ig.id}-chip`}
|
||||
isReadOnly
|
||||
>
|
||||
{ig.name}
|
||||
</Chip>
|
||||
</Link>
|
||||
))}
|
||||
</ChipGroup>
|
||||
<InstanceGroupLabels labels={instanceGroups} isLinkable />
|
||||
}
|
||||
isEmpty={instanceGroups.length === 0}
|
||||
/>
|
||||
|
||||
@@ -55,7 +55,6 @@ function DateTimePicker({ dateFieldName, timeFieldName, label }) {
|
||||
onChange={onDateChange}
|
||||
/>
|
||||
<TimePicker
|
||||
placeholder="hh:mm AM/PM"
|
||||
stepMinutes={15}
|
||||
aria-label={
|
||||
timeFieldName.startsWith('start') ? t`Start time` : t`End time`
|
||||
|
||||
93
awx/ui/src/components/Schedule/shared/FrequenciesList.js
Normal file
93
awx/ui/src/components/Schedule/shared/FrequenciesList.js
Normal file
@@ -0,0 +1,93 @@
|
||||
import React, { useState } from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import {
|
||||
Button,
|
||||
Switch,
|
||||
Toolbar,
|
||||
ToolbarContent,
|
||||
ToolbarItem,
|
||||
} from '@patternfly/react-core';
|
||||
import { PencilAltIcon } from '@patternfly/react-icons';
|
||||
import {
|
||||
TableComposable,
|
||||
Tbody,
|
||||
Thead,
|
||||
Th,
|
||||
Tr,
|
||||
Td,
|
||||
} from '@patternfly/react-table';
|
||||
|
||||
import { useField } from 'formik';
|
||||
import ContentEmpty from 'components/ContentEmpty';
|
||||
|
||||
function FrequenciesList({ openWizard }) {
|
||||
const [isShowingRules, setIsShowingRules] = useState(true);
|
||||
const [frequencies] = useField('frequencies');
|
||||
const list = (freq) => (
|
||||
<Tr key={freq.rrule}>
|
||||
<Td>{freq.frequency}</Td>
|
||||
<Td>{freq.rrule}</Td>
|
||||
<Td>{t`End`}</Td>
|
||||
<Td>
|
||||
<Button
|
||||
variant="plain"
|
||||
aria-label={t`Click to toggle default value`}
|
||||
ouiaId={freq ? `${freq}-button` : 'new-freq-button'}
|
||||
onClick={() => {
|
||||
openWizard(true);
|
||||
}}
|
||||
>
|
||||
<PencilAltIcon />
|
||||
</Button>
|
||||
</Td>
|
||||
</Tr>
|
||||
);
|
||||
return (
|
||||
<>
|
||||
<Toolbar>
|
||||
<ToolbarContent>
|
||||
<ToolbarItem>
|
||||
<Button
|
||||
onClick={() => {
|
||||
openWizard(true);
|
||||
}}
|
||||
variant="secondary"
|
||||
>
|
||||
{isShowingRules ? t`Add RRules` : t`Add Exception`}
|
||||
</Button>
|
||||
</ToolbarItem>
|
||||
<ToolbarItem>
|
||||
<Switch
|
||||
label={t`Occurances`}
|
||||
labelOff={t`Exceptions`}
|
||||
isChecked={isShowingRules}
|
||||
onChange={(isChecked) => {
|
||||
setIsShowingRules(isChecked);
|
||||
}}
|
||||
/>
|
||||
</ToolbarItem>
|
||||
</ToolbarContent>
|
||||
</Toolbar>
|
||||
<div css="overflow: auto">
|
||||
{frequencies.value[0].frequency === '' &&
|
||||
frequencies.value.length < 2 ? (
|
||||
<ContentEmpty title={t`RRules`} message={t`Add RRules`} />
|
||||
) : (
|
||||
<TableComposable aria-label={t`RRules`} ouiaId="rrules-list">
|
||||
<Thead>
|
||||
<Tr>
|
||||
<Th>{t`Frequency`}</Th>
|
||||
<Th>{t`RRule`}</Th>
|
||||
<Th>{t`Ending`}</Th>
|
||||
<Th>{t`Actions`}</Th>
|
||||
</Tr>
|
||||
</Thead>
|
||||
<Tbody>{frequencies.value.map((freq, i) => list(freq, i))}</Tbody>
|
||||
</TableComposable>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default FrequenciesList;
|
||||
@@ -1,568 +0,0 @@
|
||||
import 'styled-components/macro';
|
||||
import React from 'react';
|
||||
import styled from 'styled-components';
|
||||
import { useField } from 'formik';
|
||||
|
||||
import { t, Trans, Plural } from '@lingui/macro';
|
||||
import { RRule } from 'rrule';
|
||||
import {
|
||||
Checkbox as _Checkbox,
|
||||
FormGroup,
|
||||
Radio,
|
||||
TextInput,
|
||||
} from '@patternfly/react-core';
|
||||
import { required, requiredPositiveInteger } from 'util/validators';
|
||||
import AnsibleSelect from '../../AnsibleSelect';
|
||||
import FormField from '../../FormField';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
|
||||
const RunOnRadio = styled(Radio)`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
|
||||
label {
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
:not(:last-of-type) {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
select:not(:first-of-type) {
|
||||
margin-left: 10px;
|
||||
}
|
||||
`;
|
||||
|
||||
const RunEveryLabel = styled.p`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
`;
|
||||
|
||||
const Checkbox = styled(_Checkbox)`
|
||||
:not(:last-of-type) {
|
||||
margin-right: 10px;
|
||||
}
|
||||
`;
|
||||
|
||||
const FrequencyDetailSubform = ({ frequency, prefix, isException }) => {
|
||||
const id = prefix.replace('.', '-');
|
||||
const [runOnDayMonth] = useField({
|
||||
name: `${prefix}.runOnDayMonth`,
|
||||
});
|
||||
const [runOnDayNumber] = useField({
|
||||
name: `${prefix}.runOnDayNumber`,
|
||||
});
|
||||
const [runOnTheOccurrence] = useField({
|
||||
name: `${prefix}.runOnTheOccurrence`,
|
||||
});
|
||||
const [runOnTheDay] = useField({
|
||||
name: `${prefix}.runOnTheDay`,
|
||||
});
|
||||
const [runOnTheMonth] = useField({
|
||||
name: `${prefix}.runOnTheMonth`,
|
||||
});
|
||||
const [startDate] = useField(`${prefix}.startDate`);
|
||||
|
||||
const [daysOfWeek, daysOfWeekMeta, daysOfWeekHelpers] = useField({
|
||||
name: `${prefix}.daysOfWeek`,
|
||||
validate: (val) => {
|
||||
if (frequency === 'week') {
|
||||
return required(t`Select a value for this field`)(val?.length > 0);
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
const [end, endMeta] = useField({
|
||||
name: `${prefix}.end`,
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [interval, intervalMeta] = useField({
|
||||
name: `${prefix}.interval`,
|
||||
validate: requiredPositiveInteger(),
|
||||
});
|
||||
const [runOn, runOnMeta] = useField({
|
||||
name: `${prefix}.runOn`,
|
||||
validate: (val) => {
|
||||
if (frequency === 'month' || frequency === 'year') {
|
||||
return required(t`Select a value for this field`)(val);
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
|
||||
const monthOptions = [
|
||||
{
|
||||
key: 'january',
|
||||
value: 1,
|
||||
label: t`January`,
|
||||
},
|
||||
{
|
||||
key: 'february',
|
||||
value: 2,
|
||||
label: t`February`,
|
||||
},
|
||||
{
|
||||
key: 'march',
|
||||
value: 3,
|
||||
label: t`March`,
|
||||
},
|
||||
{
|
||||
key: 'april',
|
||||
value: 4,
|
||||
label: t`April`,
|
||||
},
|
||||
{
|
||||
key: 'may',
|
||||
value: 5,
|
||||
label: t`May`,
|
||||
},
|
||||
{
|
||||
key: 'june',
|
||||
value: 6,
|
||||
label: t`June`,
|
||||
},
|
||||
{
|
||||
key: 'july',
|
||||
value: 7,
|
||||
label: t`July`,
|
||||
},
|
||||
{
|
||||
key: 'august',
|
||||
value: 8,
|
||||
label: t`August`,
|
||||
},
|
||||
{
|
||||
key: 'september',
|
||||
value: 9,
|
||||
label: t`September`,
|
||||
},
|
||||
{
|
||||
key: 'october',
|
||||
value: 10,
|
||||
label: t`October`,
|
||||
},
|
||||
{
|
||||
key: 'november',
|
||||
value: 11,
|
||||
label: t`November`,
|
||||
},
|
||||
{
|
||||
key: 'december',
|
||||
value: 12,
|
||||
label: t`December`,
|
||||
},
|
||||
];
|
||||
|
||||
const updateDaysOfWeek = (day, checked) => {
|
||||
const newDaysOfWeek = daysOfWeek.value ? [...daysOfWeek.value] : [];
|
||||
daysOfWeekHelpers.setTouched(true);
|
||||
if (checked) {
|
||||
newDaysOfWeek.push(day);
|
||||
daysOfWeekHelpers.setValue(newDaysOfWeek);
|
||||
} else {
|
||||
daysOfWeekHelpers.setValue(
|
||||
newDaysOfWeek.filter((selectedDay) => selectedDay !== day)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
const getPeriodLabel = () => {
|
||||
switch (frequency) {
|
||||
case 'minute':
|
||||
return t`Minute`;
|
||||
case 'hour':
|
||||
return t`Hour`;
|
||||
case 'day':
|
||||
return t`Day`;
|
||||
case 'week':
|
||||
return t`Week`;
|
||||
case 'month':
|
||||
return t`Month`;
|
||||
case 'year':
|
||||
return t`Year`;
|
||||
default:
|
||||
throw new Error(t`Frequency did not match an expected value`);
|
||||
}
|
||||
};
|
||||
|
||||
const getRunEveryLabel = () => {
|
||||
const intervalValue = interval.value;
|
||||
|
||||
switch (frequency) {
|
||||
case 'minute':
|
||||
return <Plural value={intervalValue} one="minute" other="minutes" />;
|
||||
case 'hour':
|
||||
return <Plural value={intervalValue} one="hour" other="hours" />;
|
||||
case 'day':
|
||||
return <Plural value={intervalValue} one="day" other="days" />;
|
||||
case 'week':
|
||||
return <Plural value={intervalValue} one="week" other="weeks" />;
|
||||
case 'month':
|
||||
return <Plural value={intervalValue} one="month" other="months" />;
|
||||
case 'year':
|
||||
return <Plural value={intervalValue} one="year" other="years" />;
|
||||
default:
|
||||
throw new Error(t`Frequency did not match an expected value`);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<p css="grid-column: 1/-1">
|
||||
<b>{getPeriodLabel()}</b>
|
||||
</p>
|
||||
<FormGroup
|
||||
name={`${prefix}.interval`}
|
||||
fieldId={`schedule-run-every-${id}`}
|
||||
helperTextInvalid={intervalMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
!intervalMeta.touched || !intervalMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={isException ? t`Skip every` : t`Run every`}
|
||||
>
|
||||
<div css="display: flex">
|
||||
<TextInput
|
||||
css="margin-right: 10px;"
|
||||
id={`schedule-run-every-${id}`}
|
||||
type="number"
|
||||
min="1"
|
||||
step="1"
|
||||
{...interval}
|
||||
onChange={(value, event) => {
|
||||
interval.onChange(event);
|
||||
}}
|
||||
/>
|
||||
<RunEveryLabel>{getRunEveryLabel()}</RunEveryLabel>
|
||||
</div>
|
||||
</FormGroup>
|
||||
{frequency === 'week' && (
|
||||
<FormGroup
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
fieldId={`schedule-days-of-week-${id}`}
|
||||
helperTextInvalid={daysOfWeekMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
!daysOfWeekMeta.touched || !daysOfWeekMeta.error
|
||||
? 'default'
|
||||
: 'error'
|
||||
}
|
||||
label={t`On days`}
|
||||
>
|
||||
<div css="display: flex">
|
||||
<Checkbox
|
||||
label={t`Sun`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SU, checked);
|
||||
}}
|
||||
aria-label={t`Sunday`}
|
||||
id={`schedule-days-of-week-sun-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sun-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Mon`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.MO)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.MO, checked);
|
||||
}}
|
||||
aria-label={t`Monday`}
|
||||
id={`schedule-days-of-week-mon-${id}`}
|
||||
ouiaId={`schedule-days-of-week-mon-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Tue`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TU, checked);
|
||||
}}
|
||||
aria-label={t`Tuesday`}
|
||||
id={`schedule-days-of-week-tue-${id}`}
|
||||
ouiaId={`schedule-days-of-week-tue-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Wed`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.WE)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.WE, checked);
|
||||
}}
|
||||
aria-label={t`Wednesday`}
|
||||
id={`schedule-days-of-week-wed-${id}`}
|
||||
ouiaId={`schedule-days-of-week-wed-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Thu`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TH)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TH, checked);
|
||||
}}
|
||||
aria-label={t`Thursday`}
|
||||
id={`schedule-days-of-week-thu-${id}`}
|
||||
ouiaId={`schedule-days-of-week-thu-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Fri`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.FR)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.FR, checked);
|
||||
}}
|
||||
aria-label={t`Friday`}
|
||||
id={`schedule-days-of-week-fri-${id}`}
|
||||
ouiaId={`schedule-days-of-week-fri-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Sat`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SA)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SA, checked);
|
||||
}}
|
||||
aria-label={t`Saturday`}
|
||||
id={`schedule-days-of-week-sat-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sat-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
</div>
|
||||
</FormGroup>
|
||||
)}
|
||||
{(frequency === 'month' || frequency === 'year') &&
|
||||
!Number.isNaN(new Date(startDate.value)) && (
|
||||
<FormGroup
|
||||
name={`${prefix}.runOn`}
|
||||
fieldId={`schedule-run-on-${id}`}
|
||||
helperTextInvalid={runOnMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
!runOnMeta.touched || !runOnMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={t`Run on`}
|
||||
>
|
||||
<RunOnRadio
|
||||
id={`schedule-run-on-day-${id}`}
|
||||
name={`${prefix}.runOn`}
|
||||
label={
|
||||
<div css="display: flex;align-items: center;">
|
||||
{frequency === 'month' && (
|
||||
<span
|
||||
id="radio-schedule-run-on-day"
|
||||
css="margin-right: 10px;"
|
||||
>
|
||||
<Trans>Day</Trans>
|
||||
</span>
|
||||
)}
|
||||
{frequency === 'year' && (
|
||||
<AnsibleSelect
|
||||
id={`schedule-run-on-day-month-${id}`}
|
||||
css="margin-right: 10px"
|
||||
isDisabled={runOn.value !== 'day'}
|
||||
data={monthOptions}
|
||||
{...runOnDayMonth}
|
||||
/>
|
||||
)}
|
||||
<TextInput
|
||||
id={`schedule-run-on-day-number-${id}`}
|
||||
type="number"
|
||||
min="1"
|
||||
max="31"
|
||||
step="1"
|
||||
isDisabled={runOn.value !== 'day'}
|
||||
{...runOnDayNumber}
|
||||
onChange={(value, event) => {
|
||||
runOnDayNumber.onChange(event);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
}
|
||||
value="day"
|
||||
isChecked={runOn.value === 'day'}
|
||||
onChange={(value, event) => {
|
||||
event.target.value = 'day';
|
||||
runOn.onChange(event);
|
||||
}}
|
||||
/>
|
||||
<RunOnRadio
|
||||
id={`schedule-run-on-the-${id}`}
|
||||
name={`${prefix}.runOn`}
|
||||
label={
|
||||
<div css="display: flex;align-items: center;">
|
||||
<span
|
||||
id={`radio-schedule-run-on-the-${id}`}
|
||||
css="margin-right: 10px;"
|
||||
>
|
||||
<Trans>The</Trans>
|
||||
</span>
|
||||
<AnsibleSelect
|
||||
id={`schedule-run-on-the-occurrence-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={[
|
||||
{ value: 1, key: 'first', label: t`First` },
|
||||
{
|
||||
value: 2,
|
||||
key: 'second',
|
||||
label: t`Second`,
|
||||
},
|
||||
{ value: 3, key: 'third', label: t`Third` },
|
||||
{
|
||||
value: 4,
|
||||
key: 'fourth',
|
||||
label: t`Fourth`,
|
||||
},
|
||||
{ value: 5, key: 'fifth', label: t`Fifth` },
|
||||
{ value: -1, key: 'last', label: t`Last` },
|
||||
]}
|
||||
{...runOnTheOccurrence}
|
||||
/>
|
||||
<AnsibleSelect
|
||||
id={`schedule-run-on-the-day-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={[
|
||||
{
|
||||
value: 'sunday',
|
||||
key: 'sunday',
|
||||
label: t`Sunday`,
|
||||
},
|
||||
{
|
||||
value: 'monday',
|
||||
key: 'monday',
|
||||
label: t`Monday`,
|
||||
},
|
||||
{
|
||||
value: 'tuesday',
|
||||
key: 'tuesday',
|
||||
label: t`Tuesday`,
|
||||
},
|
||||
{
|
||||
value: 'wednesday',
|
||||
key: 'wednesday',
|
||||
label: t`Wednesday`,
|
||||
},
|
||||
{
|
||||
value: 'thursday',
|
||||
key: 'thursday',
|
||||
label: t`Thursday`,
|
||||
},
|
||||
{
|
||||
value: 'friday',
|
||||
key: 'friday',
|
||||
label: t`Friday`,
|
||||
},
|
||||
{
|
||||
value: 'saturday',
|
||||
key: 'saturday',
|
||||
label: t`Saturday`,
|
||||
},
|
||||
{ value: 'day', key: 'day', label: t`Day` },
|
||||
{
|
||||
value: 'weekday',
|
||||
key: 'weekday',
|
||||
label: t`Weekday`,
|
||||
},
|
||||
{
|
||||
value: 'weekendDay',
|
||||
key: 'weekendDay',
|
||||
label: t`Weekend day`,
|
||||
},
|
||||
]}
|
||||
{...runOnTheDay}
|
||||
/>
|
||||
{frequency === 'year' && (
|
||||
<>
|
||||
<span
|
||||
id={`of-schedule-run-on-the-month-${id}`}
|
||||
css="margin-left: 10px;"
|
||||
>
|
||||
<Trans>of</Trans>
|
||||
</span>
|
||||
<AnsibleSelect
|
||||
id={`schedule-run-on-the-month-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={monthOptions}
|
||||
{...runOnTheMonth}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
}
|
||||
value="the"
|
||||
isChecked={runOn.value === 'the'}
|
||||
onChange={(value, event) => {
|
||||
event.target.value = 'the';
|
||||
runOn.onChange(event);
|
||||
}}
|
||||
/>
|
||||
</FormGroup>
|
||||
)}
|
||||
<FormGroup
|
||||
name={`${prefix}.end`}
|
||||
fieldId={`schedule-end-${id}`}
|
||||
helperTextInvalid={endMeta.error}
|
||||
isRequired
|
||||
validated={!endMeta.touched || !endMeta.error ? 'default' : 'error'}
|
||||
label={t`End`}
|
||||
>
|
||||
<Radio
|
||||
id={`end-never-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`Never`}
|
||||
value="never"
|
||||
isChecked={end.value === 'never'}
|
||||
onChange={(value, event) => {
|
||||
event.target.value = 'never';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId={`end-never-radio-button-${id}`}
|
||||
/>
|
||||
<Radio
|
||||
id={`end-after-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`After number of occurrences`}
|
||||
value="after"
|
||||
isChecked={end.value === 'after'}
|
||||
onChange={(value, event) => {
|
||||
event.target.value = 'after';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId={`end-after-radio-button-${id}`}
|
||||
/>
|
||||
<Radio
|
||||
id={`end-on-date-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`On date`}
|
||||
value="onDate"
|
||||
isChecked={end.value === 'onDate'}
|
||||
onChange={(value, event) => {
|
||||
event.target.value = 'onDate';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId={`end-on-radio-button-${id}`}
|
||||
/>
|
||||
</FormGroup>
|
||||
{end?.value === 'after' && (
|
||||
<FormField
|
||||
id={`schedule-occurrences-${id}`}
|
||||
label={t`Occurrences`}
|
||||
name={`${prefix}.occurrences`}
|
||||
type="number"
|
||||
min="1"
|
||||
step="1"
|
||||
isRequired
|
||||
/>
|
||||
)}
|
||||
{end?.value === 'onDate' && (
|
||||
<DateTimePicker
|
||||
dateFieldName={`${prefix}.endDate`}
|
||||
timeFieldName={`${prefix}.endTime`}
|
||||
label={t`End date/time`}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default FrequencyDetailSubform;
|
||||
@@ -1,30 +1,12 @@
|
||||
import React, { useState } from 'react';
|
||||
import { arrayOf, string } from 'prop-types';
|
||||
import { t } from '@lingui/macro';
|
||||
import { useField } from 'formik';
|
||||
import { RRule } from 'rrule';
|
||||
import { Select, SelectOption, SelectVariant } from '@patternfly/react-core';
|
||||
|
||||
export default function FrequencySelect({
|
||||
id,
|
||||
value,
|
||||
onChange,
|
||||
onBlur,
|
||||
placeholderText,
|
||||
children,
|
||||
}) {
|
||||
export default function FrequencySelect({ id, onBlur, placeholderText }) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
const onSelect = (event, selectedValue) => {
|
||||
if (selectedValue === 'none') {
|
||||
onChange([]);
|
||||
setIsOpen(false);
|
||||
return;
|
||||
}
|
||||
const index = value.indexOf(selectedValue);
|
||||
if (index === -1) {
|
||||
onChange(value.concat(selectedValue));
|
||||
} else {
|
||||
onChange(value.slice(0, index).concat(value.slice(index + 1)));
|
||||
}
|
||||
};
|
||||
const [frequency, , frequencyHelpers] = useField('freq');
|
||||
|
||||
const onToggle = (val) => {
|
||||
if (!val) {
|
||||
@@ -35,21 +17,26 @@ export default function FrequencySelect({
|
||||
|
||||
return (
|
||||
<Select
|
||||
variant={SelectVariant.checkbox}
|
||||
onSelect={onSelect}
|
||||
selections={value}
|
||||
onSelect={(e, v) => {
|
||||
frequencyHelpers.setValue(v);
|
||||
setIsOpen(false);
|
||||
}}
|
||||
selections={frequency.value}
|
||||
placeholderText={placeholderText}
|
||||
onToggle={onToggle}
|
||||
value={frequency.value}
|
||||
isOpen={isOpen}
|
||||
ouiaId={`frequency-select-${id}`}
|
||||
onBlur={() => frequencyHelpers.setTouched(true)}
|
||||
>
|
||||
{children}
|
||||
<SelectOption value={RRule.MINUTELY}>{t`Minute`}</SelectOption>
|
||||
<SelectOption value={RRule.HOURLY}>{t`Hour`}</SelectOption>
|
||||
<SelectOption value={RRule.DAILY}>{t`Day`}</SelectOption>
|
||||
<SelectOption value={RRule.WEEKLY}>{t`Week`}</SelectOption>
|
||||
<SelectOption value={RRule.MONTHLY}>{t`Month`}</SelectOption>
|
||||
<SelectOption value={RRule.YEARLY}>{t`Year`}</SelectOption>
|
||||
</Select>
|
||||
);
|
||||
}
|
||||
|
||||
FrequencySelect.propTypes = {
|
||||
value: arrayOf(string).isRequired,
|
||||
};
|
||||
|
||||
export { SelectOption, SelectVariant };
|
||||
|
||||
77
awx/ui/src/components/Schedule/shared/MonthandYearForm.js
Normal file
77
awx/ui/src/components/Schedule/shared/MonthandYearForm.js
Normal file
@@ -0,0 +1,77 @@
|
||||
import React from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import AnsibleSelect from 'components/AnsibleSelect';
|
||||
import styled from 'styled-components';
|
||||
import {
|
||||
FormGroup,
|
||||
Checkbox as _Checkbox,
|
||||
Grid,
|
||||
GridItem,
|
||||
} from '@patternfly/react-core';
|
||||
import { useField } from 'formik';
|
||||
import { bysetposOptions, monthOptions } from './scheduleFormHelpers';
|
||||
|
||||
const GroupWrapper = styled(FormGroup)`
|
||||
&& .pf-c-form__group-control {
|
||||
display: flex;
|
||||
padding-top: 10px;
|
||||
}
|
||||
&& .pf-c-form__group-label {
|
||||
padding-top: 20px;
|
||||
}
|
||||
`;
|
||||
const Checkbox = styled(_Checkbox)`
|
||||
:not(:last-of-type) {
|
||||
margin-right: 10px;
|
||||
}
|
||||
`;
|
||||
function MonthandYearForm({ id }) {
|
||||
const [bySetPos, , bySetPosHelpers] = useField('bysetpos');
|
||||
const [byMonth, , byMonthHelpers] = useField('bymonth');
|
||||
|
||||
return (
|
||||
<>
|
||||
<GroupWrapper
|
||||
fieldId={`schedule-run-on-${id}`}
|
||||
label={<b>{t`Run on a specific month`}</b>}
|
||||
>
|
||||
<Grid hasGutter>
|
||||
{monthOptions.map((month) => (
|
||||
<GridItem key={month.label} span={2} rowSpan={2}>
|
||||
<Checkbox
|
||||
label={month.label}
|
||||
isChecked={byMonth.value?.includes(month.value)}
|
||||
onChange={(checked) => {
|
||||
if (checked) {
|
||||
byMonthHelpers.setValue([...byMonth.value, month.value]);
|
||||
} else {
|
||||
const removed = byMonth.value.filter(
|
||||
(i) => i !== month.value
|
||||
);
|
||||
byMonthHelpers.setValue(removed);
|
||||
}
|
||||
}}
|
||||
id={`bymonth-${month.label}`}
|
||||
ouiaId={`bymonth-${month.label}`}
|
||||
name="bymonth"
|
||||
/>
|
||||
</GridItem>
|
||||
))}
|
||||
</Grid>
|
||||
</GroupWrapper>
|
||||
<GroupWrapper
|
||||
label={<b>{t`Run on a specific week day at monthly intervals`}</b>}
|
||||
>
|
||||
<AnsibleSelect
|
||||
id={`schedule-run-on-the-occurrence-${id}`}
|
||||
data={bysetposOptions}
|
||||
{...bySetPos}
|
||||
onChange={(e, v) => {
|
||||
bySetPosHelpers.setValue(v);
|
||||
}}
|
||||
/>
|
||||
</GroupWrapper>
|
||||
</>
|
||||
);
|
||||
}
|
||||
export default MonthandYearForm;
|
||||
45
awx/ui/src/components/Schedule/shared/OrdinalDayForm.js
Normal file
45
awx/ui/src/components/Schedule/shared/OrdinalDayForm.js
Normal file
@@ -0,0 +1,45 @@
|
||||
import React from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import styled from 'styled-components';
|
||||
import { useField } from 'formik';
|
||||
import { FormGroup, TextInput } from '@patternfly/react-core';
|
||||
|
||||
const GroupWrapper = styled(FormGroup)`
|
||||
&& .pf-c-form__group-control {
|
||||
display: flex;
|
||||
padding-top: 10px;
|
||||
}
|
||||
&& .pf-c-form__group-label {
|
||||
padding-top: 20px;
|
||||
}
|
||||
`;
|
||||
|
||||
function OrdinalDayForm() {
|
||||
const [byMonthDay] = useField('bymonthday');
|
||||
const [byYearDay] = useField('byyearday');
|
||||
return (
|
||||
<GroupWrapper
|
||||
label={<b>{t`On a specific number day`}</b>}
|
||||
name="ordinalDay"
|
||||
>
|
||||
<TextInput
|
||||
placeholder={t`Run on a day of month`}
|
||||
aria-label={t`Type a numbered day`}
|
||||
type="number"
|
||||
onChange={(value, event) => {
|
||||
byMonthDay.onChange(event);
|
||||
}}
|
||||
/>
|
||||
<TextInput
|
||||
placeholder={t`Run on a day of year`}
|
||||
aria-label={t`Type a numbered day`}
|
||||
type="number"
|
||||
onChange={(value, event) => {
|
||||
byYearDay.onChange(event);
|
||||
}}
|
||||
/>
|
||||
</GroupWrapper>
|
||||
);
|
||||
}
|
||||
|
||||
export default OrdinalDayForm;
|
||||
67
awx/ui/src/components/Schedule/shared/ScheduleEndForm.js
Normal file
67
awx/ui/src/components/Schedule/shared/ScheduleEndForm.js
Normal file
@@ -0,0 +1,67 @@
|
||||
import React from 'react';
|
||||
import { useField } from 'formik';
|
||||
import { t } from '@lingui/macro';
|
||||
import { FormGroup, Radio } from '@patternfly/react-core';
|
||||
import FormField from 'components/FormField';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
|
||||
function ScheduleEndForm() {
|
||||
const [endType, , { setValue }] = useField('endingType');
|
||||
const [count] = useField('count');
|
||||
return (
|
||||
<>
|
||||
<FormGroup name="end" label={t`End`}>
|
||||
<Radio
|
||||
id="endNever"
|
||||
name={t`Never End`}
|
||||
label={t`Never`}
|
||||
value="never"
|
||||
isChecked={endType.value === 'never'}
|
||||
onChange={() => {
|
||||
setValue('never');
|
||||
}}
|
||||
/>
|
||||
<Radio
|
||||
name="Count"
|
||||
id="after"
|
||||
label={t`After number of occurrences`}
|
||||
value="after"
|
||||
isChecked={endType.value === 'after'}
|
||||
onChange={() => {
|
||||
setValue('after');
|
||||
}}
|
||||
/>
|
||||
<Radio
|
||||
name="End Date"
|
||||
label={t`On date`}
|
||||
value="onDate"
|
||||
id="endDate"
|
||||
isChecked={endType.value === 'onDate'}
|
||||
onChange={() => {
|
||||
setValue('onDate');
|
||||
}}
|
||||
/>
|
||||
</FormGroup>
|
||||
{endType.value === 'after' && (
|
||||
<FormField
|
||||
label={t`Occurrences`}
|
||||
name="count"
|
||||
type="number"
|
||||
min="1"
|
||||
step="1"
|
||||
isRequired
|
||||
{...count}
|
||||
/>
|
||||
)}
|
||||
{endType.value === 'onDate' && (
|
||||
<DateTimePicker
|
||||
dateFieldName="endDate"
|
||||
timeFieldName="endTime"
|
||||
label={t`End date/time`}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default ScheduleEndForm;
|
||||
@@ -18,14 +18,9 @@ import SchedulePromptableFields from './SchedulePromptableFields';
|
||||
import ScheduleFormFields from './ScheduleFormFields';
|
||||
import UnsupportedScheduleForm from './UnsupportedScheduleForm';
|
||||
import parseRuleObj, { UnsupportedRRuleError } from './parseRuleObj';
|
||||
import buildRuleObj from './buildRuleObj';
|
||||
import buildRuleSet from './buildRuleSet';
|
||||
|
||||
const NUM_DAYS_PER_FREQUENCY = {
|
||||
week: 7,
|
||||
month: 31,
|
||||
year: 365,
|
||||
};
|
||||
import ScheduleFormWizard from './ScheduleFormWizard';
|
||||
import FrequenciesList from './FrequenciesList';
|
||||
// import { validateSchedule } from './scheduleFormHelpers';
|
||||
|
||||
function ScheduleForm({
|
||||
hasDaysToKeepField,
|
||||
@@ -40,15 +35,16 @@ function ScheduleForm({
|
||||
}) {
|
||||
const [isWizardOpen, setIsWizardOpen] = useState(false);
|
||||
const [isSaveDisabled, setIsSaveDisabled] = useState(false);
|
||||
const [isScheduleWizardOpen, setIsScheduleWizardOpen] = useState(false);
|
||||
const originalLabels = useRef([]);
|
||||
const originalInstanceGroups = useRef([]);
|
||||
|
||||
let rruleError;
|
||||
const now = DateTime.now();
|
||||
|
||||
const closestQuarterHour = DateTime.fromMillis(
|
||||
Math.ceil(now.ts / 900000) * 900000
|
||||
);
|
||||
const tomorrow = closestQuarterHour.plus({ days: 1 });
|
||||
const isTemplate =
|
||||
resource.type === 'workflow_job_template' ||
|
||||
resource.type === 'job_template';
|
||||
@@ -283,69 +279,10 @@ function ScheduleForm({
|
||||
}
|
||||
const [currentDate, time] = dateToInputDateTime(closestQuarterHour.toISO());
|
||||
|
||||
const [tomorrowDate] = dateToInputDateTime(tomorrow.toISO());
|
||||
const initialFrequencyOptions = {
|
||||
minute: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
hour: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
day: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
week: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
daysOfWeek: [],
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
runOn: 'day',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
year: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
runOn: 'day',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnTheMonth: 1,
|
||||
runOnDayMonth: 1,
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
};
|
||||
|
||||
const initialValues = {
|
||||
description: schedule.description || '',
|
||||
frequency: [],
|
||||
frequencies: [],
|
||||
exceptionFrequency: [],
|
||||
frequencyOptions: initialFrequencyOptions,
|
||||
exceptionOptions: initialFrequencyOptions,
|
||||
name: schedule.name || '',
|
||||
startDate: currentDate,
|
||||
startTime: time,
|
||||
@@ -367,11 +304,9 @@ function ScheduleForm({
|
||||
}
|
||||
initialValues.daysToKeep = initialDaysToKeep;
|
||||
}
|
||||
|
||||
let overriddenValues = {};
|
||||
if (schedule.rrule) {
|
||||
try {
|
||||
overriddenValues = parseRuleObj(schedule);
|
||||
parseRuleObj(schedule);
|
||||
} catch (error) {
|
||||
if (error instanceof UnsupportedRRuleError) {
|
||||
return (
|
||||
@@ -394,83 +329,33 @@ function ScheduleForm({
|
||||
if (contentLoading) {
|
||||
return <ContentLoading />;
|
||||
}
|
||||
|
||||
const validate = (values) => {
|
||||
const errors = {};
|
||||
|
||||
values.frequency.forEach((freq) => {
|
||||
const options = values.frequencyOptions[freq];
|
||||
const freqErrors = {};
|
||||
|
||||
if (
|
||||
(freq === 'month' || freq === 'year') &&
|
||||
options.runOn === 'day' &&
|
||||
(options.runOnDayNumber < 1 || options.runOnDayNumber > 31)
|
||||
) {
|
||||
freqErrors.runOn = t`Please select a day number between 1 and 31.`;
|
||||
}
|
||||
|
||||
if (options.end === 'after' && !options.occurrences) {
|
||||
freqErrors.occurrences = t`Please enter a number of occurrences.`;
|
||||
}
|
||||
|
||||
if (options.end === 'onDate') {
|
||||
if (
|
||||
DateTime.fromISO(values.startDate) >=
|
||||
DateTime.fromISO(options.endDate)
|
||||
) {
|
||||
freqErrors.endDate = t`Please select an end date/time that comes after the start date/time.`;
|
||||
}
|
||||
|
||||
if (
|
||||
DateTime.fromISO(options.endDate)
|
||||
.diff(DateTime.fromISO(values.startDate), 'days')
|
||||
.toObject().days < NUM_DAYS_PER_FREQUENCY[freq]
|
||||
) {
|
||||
const rule = new RRule(
|
||||
buildRuleObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
frequency: freq,
|
||||
...options,
|
||||
})
|
||||
);
|
||||
if (rule.all().length === 0) {
|
||||
errors.startDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
freqErrors.endDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Object.keys(freqErrors).length > 0) {
|
||||
if (!errors.frequencyOptions) {
|
||||
errors.frequencyOptions = {};
|
||||
}
|
||||
errors.frequencyOptions[freq] = freqErrors;
|
||||
}
|
||||
});
|
||||
|
||||
if (values.exceptionFrequency.length > 0 && !scheduleHasInstances(values)) {
|
||||
errors.exceptionFrequency = t`This schedule has no occurrences due to the selected exceptions.`;
|
||||
}
|
||||
|
||||
return errors;
|
||||
};
|
||||
|
||||
const frequencies = [];
|
||||
frequencies.push(parseRuleObj(schedule));
|
||||
return (
|
||||
<Config>
|
||||
{() => (
|
||||
<Formik
|
||||
initialValues={{
|
||||
...initialValues,
|
||||
...overriddenValues,
|
||||
frequencyOptions: {
|
||||
...initialValues.frequencyOptions,
|
||||
...overriddenValues.frequencyOptions,
|
||||
},
|
||||
exceptionOptions: {
|
||||
...initialValues.exceptionOptions,
|
||||
...overriddenValues.exceptionOptions,
|
||||
},
|
||||
name: schedule.name || '',
|
||||
description: schedule.description || '',
|
||||
frequencies: frequencies || [],
|
||||
freq: RRule.DAILY,
|
||||
interval: 1,
|
||||
wkst: RRule.SU,
|
||||
byweekday: [],
|
||||
byweekno: [],
|
||||
bymonth: [],
|
||||
bymonthday: '',
|
||||
byyearday: '',
|
||||
bysetpos: '',
|
||||
until: schedule.until || null,
|
||||
endDate: currentDate,
|
||||
endTime: time,
|
||||
count: 1,
|
||||
endingType: 'never',
|
||||
timezone: schedule.timezone || now.zoneName,
|
||||
startDate: currentDate,
|
||||
startTime: time,
|
||||
}}
|
||||
onSubmit={(values) => {
|
||||
submitSchedule(
|
||||
@@ -482,73 +367,90 @@ function ScheduleForm({
|
||||
credentials
|
||||
);
|
||||
}}
|
||||
validate={validate}
|
||||
validate={() => {}}
|
||||
>
|
||||
{(formik) => (
|
||||
<Form autoComplete="off" onSubmit={formik.handleSubmit}>
|
||||
<FormColumnLayout>
|
||||
<ScheduleFormFields
|
||||
hasDaysToKeepField={hasDaysToKeepField}
|
||||
zoneOptions={zoneOptions}
|
||||
zoneLinks={zoneLinks}
|
||||
/>
|
||||
{isWizardOpen && (
|
||||
<SchedulePromptableFields
|
||||
schedule={schedule}
|
||||
credentials={credentials}
|
||||
surveyConfig={surveyConfig}
|
||||
launchConfig={launchConfig}
|
||||
resource={resource}
|
||||
onCloseWizard={() => {
|
||||
setIsWizardOpen(false);
|
||||
}}
|
||||
onSave={() => {
|
||||
setIsWizardOpen(false);
|
||||
setIsSaveDisabled(false);
|
||||
}}
|
||||
resourceDefaultCredentials={resourceDefaultCredentials}
|
||||
labels={originalLabels.current}
|
||||
instanceGroups={originalInstanceGroups.current}
|
||||
<>
|
||||
<Form autoComplete="off" onSubmit={formik.handleSubmit}>
|
||||
<FormColumnLayout>
|
||||
<ScheduleFormFields
|
||||
hasDaysToKeepField={hasDaysToKeepField}
|
||||
zoneOptions={zoneOptions}
|
||||
zoneLinks={zoneLinks}
|
||||
/>
|
||||
)}
|
||||
<FormSubmitError error={submitError} />
|
||||
<FormFullWidthLayout>
|
||||
<ActionGroup>
|
||||
<Button
|
||||
ouiaId="schedule-form-save-button"
|
||||
aria-label={t`Save`}
|
||||
variant="primary"
|
||||
type="button"
|
||||
onClick={formik.handleSubmit}
|
||||
isDisabled={isSaveDisabled}
|
||||
>
|
||||
{t`Save`}
|
||||
</Button>
|
||||
|
||||
{isTemplate && showPromptButton && (
|
||||
{isWizardOpen && (
|
||||
<SchedulePromptableFields
|
||||
schedule={schedule}
|
||||
credentials={credentials}
|
||||
surveyConfig={surveyConfig}
|
||||
launchConfig={launchConfig}
|
||||
resource={resource}
|
||||
onCloseWizard={() => {
|
||||
setIsWizardOpen(false);
|
||||
}}
|
||||
onSave={() => {
|
||||
setIsWizardOpen(false);
|
||||
setIsSaveDisabled(false);
|
||||
}}
|
||||
resourceDefaultCredentials={resourceDefaultCredentials}
|
||||
labels={originalLabels.current}
|
||||
instanceGroups={originalInstanceGroups.current}
|
||||
/>
|
||||
)}
|
||||
<FormFullWidthLayout>
|
||||
<FrequenciesList openWizard={setIsScheduleWizardOpen} />
|
||||
</FormFullWidthLayout>
|
||||
<FormSubmitError error={submitError} />
|
||||
<FormFullWidthLayout>
|
||||
<ActionGroup>
|
||||
<Button
|
||||
ouiaId="schedule-form-prompt-button"
|
||||
ouiaId="schedule-form-save-button"
|
||||
aria-label={t`Save`}
|
||||
variant="primary"
|
||||
type="button"
|
||||
onClick={formik.handleSubmit}
|
||||
isDisabled={isSaveDisabled}
|
||||
>
|
||||
{t`Save`}
|
||||
</Button>
|
||||
|
||||
<Button
|
||||
onClick={() => {}}
|
||||
>{t`Preview occurances`}</Button>
|
||||
|
||||
{isTemplate && showPromptButton && (
|
||||
<Button
|
||||
ouiaId="schedule-form-prompt-button"
|
||||
variant="secondary"
|
||||
type="button"
|
||||
aria-label={t`Prompt`}
|
||||
onClick={() => setIsWizardOpen(true)}
|
||||
>
|
||||
{t`Prompt`}
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
ouiaId="schedule-form-cancel-button"
|
||||
aria-label={t`Cancel`}
|
||||
variant="secondary"
|
||||
type="button"
|
||||
aria-label={t`Prompt`}
|
||||
onClick={() => setIsWizardOpen(true)}
|
||||
onClick={handleCancel}
|
||||
>
|
||||
{t`Prompt`}
|
||||
{t`Cancel`}
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
ouiaId="schedule-form-cancel-button"
|
||||
aria-label={t`Cancel`}
|
||||
variant="secondary"
|
||||
type="button"
|
||||
onClick={handleCancel}
|
||||
>
|
||||
{t`Cancel`}
|
||||
</Button>
|
||||
</ActionGroup>
|
||||
</FormFullWidthLayout>
|
||||
</FormColumnLayout>
|
||||
</Form>
|
||||
</ActionGroup>
|
||||
</FormFullWidthLayout>
|
||||
</FormColumnLayout>
|
||||
</Form>
|
||||
{isScheduleWizardOpen && (
|
||||
<ScheduleFormWizard
|
||||
staticFormFormkik={formik}
|
||||
isOpen={isScheduleWizardOpen}
|
||||
handleSave={() => {}}
|
||||
setIsOpen={setIsScheduleWizardOpen}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Formik>
|
||||
)}
|
||||
@@ -569,24 +471,3 @@ ScheduleForm.defaultProps = {
|
||||
};
|
||||
|
||||
export default ScheduleForm;
|
||||
|
||||
function scheduleHasInstances(values) {
|
||||
let rangeToCheck = 1;
|
||||
values.frequency.forEach((freq) => {
|
||||
if (NUM_DAYS_PER_FREQUENCY[freq] > rangeToCheck) {
|
||||
rangeToCheck = NUM_DAYS_PER_FREQUENCY[freq];
|
||||
}
|
||||
});
|
||||
|
||||
const ruleSet = buildRuleSet(values, true);
|
||||
const startDate = DateTime.fromISO(values.startDate);
|
||||
const endDate = startDate.plus({ days: rangeToCheck });
|
||||
const instances = ruleSet.between(
|
||||
startDate.toJSDate(),
|
||||
endDate.toJSDate(),
|
||||
true,
|
||||
(date, i) => i === 0
|
||||
);
|
||||
|
||||
return instances.length > 0;
|
||||
}
|
||||
|
||||
@@ -900,6 +900,36 @@ describe('<ScheduleForm />', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('should create schedule with the same start and end date provided that the end date is at a later time', async () => {
|
||||
const today = DateTime.now().toFormat('yyyy-LL-dd');
|
||||
const laterTime = DateTime.now().plus({ hours: 1 }).toFormat('h:mm a');
|
||||
await act(async () => {
|
||||
wrapper.find('DatePicker[aria-label="End date"]').prop('onChange')(
|
||||
today,
|
||||
new Date(today)
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(
|
||||
wrapper
|
||||
.find('FormGroup[data-cy="schedule-End date/time"]')
|
||||
.prop('helperTextInvalid')
|
||||
).toBe(
|
||||
'Please select an end date/time that comes after the start date/time.'
|
||||
);
|
||||
await act(async () => {
|
||||
wrapper.find('TimePicker[aria-label="End time"]').prop('onChange')(
|
||||
laterTime
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(
|
||||
wrapper
|
||||
.find('FormGroup[data-cy="schedule-End date/time"]')
|
||||
.prop('helperTextInvalid')
|
||||
).toBe(undefined);
|
||||
});
|
||||
|
||||
test('error shown when on day number is not between 1 and 31', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([
|
||||
|
||||
@@ -1,41 +1,27 @@
|
||||
import React, { useState } from 'react';
|
||||
import { useField } from 'formik';
|
||||
import { FormGroup, Title } from '@patternfly/react-core';
|
||||
import { FormGroup } from '@patternfly/react-core';
|
||||
import { t } from '@lingui/macro';
|
||||
import styled from 'styled-components';
|
||||
import 'styled-components/macro';
|
||||
import FormField from 'components/FormField';
|
||||
import { required } from 'util/validators';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import Popover from '../../Popover';
|
||||
import AnsibleSelect from '../../AnsibleSelect';
|
||||
import FrequencySelect, { SelectOption } from './FrequencySelect';
|
||||
import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext';
|
||||
import { SubFormLayout, FormColumnLayout } from '../../FormLayout';
|
||||
import FrequencyDetailSubform from './FrequencyDetailSubform';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
import sortFrequencies from './sortFrequencies';
|
||||
|
||||
const SelectClearOption = styled(SelectOption)`
|
||||
& > input[type='checkbox'] {
|
||||
display: none;
|
||||
}
|
||||
`;
|
||||
|
||||
export default function ScheduleFormFields({
|
||||
hasDaysToKeepField,
|
||||
zoneOptions,
|
||||
zoneLinks,
|
||||
setTimeZone,
|
||||
}) {
|
||||
const helpText = getHelpText();
|
||||
const [timezone, timezoneMeta] = useField({
|
||||
name: 'timezone',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [frequency, frequencyMeta, frequencyHelper] = useField({
|
||||
name: 'frequency',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
|
||||
const [timezoneMessage, setTimezoneMessage] = useState('');
|
||||
const warnLinkedTZ = (event, selectedValue) => {
|
||||
if (zoneLinks[selectedValue]) {
|
||||
@@ -46,6 +32,7 @@ export default function ScheduleFormFields({
|
||||
setTimezoneMessage('');
|
||||
}
|
||||
timezone.onChange(event, selectedValue);
|
||||
setTimeZone(zoneLinks(selectedValue));
|
||||
};
|
||||
let timezoneValidatedStatus = 'default';
|
||||
if (timezoneMeta.touched && timezoneMeta.error) {
|
||||
@@ -55,16 +42,6 @@ export default function ScheduleFormFields({
|
||||
}
|
||||
const config = useConfig();
|
||||
|
||||
const [exceptionFrequency, exceptionFrequencyMeta, exceptionFrequencyHelper] =
|
||||
useField({
|
||||
name: 'exceptionFrequency',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
|
||||
const updateFrequency = (setFrequency) => (values) => {
|
||||
setFrequency(values.sort(sortFrequencies));
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormField
|
||||
@@ -103,33 +80,7 @@ export default function ScheduleFormFields({
|
||||
onChange={warnLinkedTZ}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup
|
||||
name="frequency"
|
||||
fieldId="schedule-frequency"
|
||||
helperTextInvalid={frequencyMeta.error}
|
||||
validated={
|
||||
!frequencyMeta.touched || !frequencyMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={t`Repeat frequency`}
|
||||
>
|
||||
<FrequencySelect
|
||||
id="schedule-frequency"
|
||||
onChange={updateFrequency(frequencyHelper.setValue)}
|
||||
value={frequency.value}
|
||||
placeholderText={
|
||||
frequency.value.length ? t`Select frequency` : t`None (run once)`
|
||||
}
|
||||
onBlur={frequencyHelper.setTouched}
|
||||
>
|
||||
<SelectClearOption value="none">{t`None (run once)`}</SelectClearOption>
|
||||
<SelectOption value="minute">{t`Minute`}</SelectOption>
|
||||
<SelectOption value="hour">{t`Hour`}</SelectOption>
|
||||
<SelectOption value="day">{t`Day`}</SelectOption>
|
||||
<SelectOption value="week">{t`Week`}</SelectOption>
|
||||
<SelectOption value="month">{t`Month`}</SelectOption>
|
||||
<SelectOption value="year">{t`Year`}</SelectOption>
|
||||
</FrequencySelect>
|
||||
</FormGroup>
|
||||
|
||||
{hasDaysToKeepField ? (
|
||||
<FormField
|
||||
id="schedule-days-to-keep"
|
||||
@@ -140,68 +91,6 @@ export default function ScheduleFormFields({
|
||||
isRequired
|
||||
/>
|
||||
) : null}
|
||||
{frequency.value.length ? (
|
||||
<SubFormLayout>
|
||||
<Title size="md" headingLevel="h4">
|
||||
{t`Frequency Details`}
|
||||
</Title>
|
||||
{frequency.value.map((val) => (
|
||||
<FormColumnLayout key={val} stacked>
|
||||
<FrequencyDetailSubform
|
||||
frequency={val}
|
||||
prefix={`frequencyOptions.${val}`}
|
||||
/>
|
||||
</FormColumnLayout>
|
||||
))}
|
||||
<Title
|
||||
size="md"
|
||||
headingLevel="h4"
|
||||
css="margin-top: var(--pf-c-card--child--PaddingRight)"
|
||||
>{t`Exceptions`}</Title>
|
||||
<FormColumnLayout stacked>
|
||||
<FormGroup
|
||||
name="exceptions"
|
||||
fieldId="exception-frequency"
|
||||
helperTextInvalid={exceptionFrequencyMeta.error}
|
||||
validated={
|
||||
!exceptionFrequencyMeta.touched || !exceptionFrequencyMeta.error
|
||||
? 'default'
|
||||
: 'error'
|
||||
}
|
||||
label={t`Add exceptions`}
|
||||
>
|
||||
<FrequencySelect
|
||||
id="exception-frequency"
|
||||
onChange={updateFrequency(exceptionFrequencyHelper.setValue)}
|
||||
value={exceptionFrequency.value}
|
||||
placeholderText={
|
||||
exceptionFrequency.value.length
|
||||
? t`Select frequency`
|
||||
: t`None`
|
||||
}
|
||||
onBlur={exceptionFrequencyHelper.setTouched}
|
||||
>
|
||||
<SelectClearOption value="none">{t`None`}</SelectClearOption>
|
||||
<SelectOption value="minute">{t`Minute`}</SelectOption>
|
||||
<SelectOption value="hour">{t`Hour`}</SelectOption>
|
||||
<SelectOption value="day">{t`Day`}</SelectOption>
|
||||
<SelectOption value="week">{t`Week`}</SelectOption>
|
||||
<SelectOption value="month">{t`Month`}</SelectOption>
|
||||
<SelectOption value="year">{t`Year`}</SelectOption>
|
||||
</FrequencySelect>
|
||||
</FormGroup>
|
||||
</FormColumnLayout>
|
||||
{exceptionFrequency.value.map((val) => (
|
||||
<FormColumnLayout key={val} stacked>
|
||||
<FrequencyDetailSubform
|
||||
frequency={val}
|
||||
prefix={`exceptionOptions.${val}`}
|
||||
isException
|
||||
/>
|
||||
</FormColumnLayout>
|
||||
))}
|
||||
</SubFormLayout>
|
||||
) : null}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
199
awx/ui/src/components/Schedule/shared/ScheduleFormWizard.js
Normal file
199
awx/ui/src/components/Schedule/shared/ScheduleFormWizard.js
Normal file
@@ -0,0 +1,199 @@
|
||||
import React from 'react';
|
||||
import {
|
||||
Button,
|
||||
FormGroup,
|
||||
TextInput,
|
||||
Title,
|
||||
Wizard,
|
||||
WizardContextConsumer,
|
||||
WizardFooter,
|
||||
} from '@patternfly/react-core';
|
||||
import { t } from '@lingui/macro';
|
||||
import styled from 'styled-components';
|
||||
import { RRule } from 'rrule';
|
||||
import { useField, useFormikContext } from 'formik';
|
||||
import { DateTime } from 'luxon';
|
||||
import { formatDateString } from 'util/dates';
|
||||
import FrequencySelect from './FrequencySelect';
|
||||
import MonthandYearForm from './MonthandYearForm';
|
||||
import OrdinalDayForm from './OrdinalDayForm';
|
||||
import WeekdayForm from './WeekdayForm';
|
||||
import ScheduleEndForm from './ScheduleEndForm';
|
||||
import parseRuleObj from './parseRuleObj';
|
||||
import { buildDtStartObj } from './buildRuleObj';
|
||||
|
||||
const GroupWrapper = styled(FormGroup)`
|
||||
&& .pf-c-form__group-control {
|
||||
display: flex;
|
||||
padding-top: 10px;
|
||||
}
|
||||
&& .pf-c-form__group-label {
|
||||
padding-top: 20px;
|
||||
}
|
||||
`;
|
||||
|
||||
function ScheduleFormWizard({ isOpen, setIsOpen }) {
|
||||
const { values, resetForm, initialValues } = useFormikContext();
|
||||
const [freq, freqMeta] = useField('freq');
|
||||
const [{ value: frequenciesValue }] = useField('frequencies');
|
||||
const [interval, , intervalHelpers] = useField('interval');
|
||||
|
||||
const handleSubmit = (goToStepById) => {
|
||||
const {
|
||||
name,
|
||||
description,
|
||||
endingType,
|
||||
endTime,
|
||||
endDate,
|
||||
timezone,
|
||||
startDate,
|
||||
startTime,
|
||||
frequencies,
|
||||
...rest
|
||||
} = values;
|
||||
if (endingType === 'onDate') {
|
||||
const dt = DateTime.fromFormat(
|
||||
`${endDate} ${endTime}`,
|
||||
'yyyy-MM-dd h:mm a',
|
||||
{
|
||||
zone: timezone,
|
||||
}
|
||||
);
|
||||
rest.until = formatDateString(dt, timezone);
|
||||
|
||||
delete rest.count;
|
||||
}
|
||||
if (endingType === 'never') delete rest.count;
|
||||
|
||||
const rule = new RRule(rest);
|
||||
|
||||
const start = buildDtStartObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency: values.freq,
|
||||
});
|
||||
const newFrequency = parseRuleObj({
|
||||
timezone,
|
||||
frequency: freq.value,
|
||||
rrule: rule.toString(),
|
||||
dtstart: start,
|
||||
});
|
||||
if (goToStepById) {
|
||||
goToStepById(1);
|
||||
}
|
||||
|
||||
resetForm({
|
||||
values: {
|
||||
...initialValues,
|
||||
description: values.description,
|
||||
name: values.name,
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequencies: frequenciesValue[0].frequency.length
|
||||
? [...frequenciesValue, newFrequency]
|
||||
: [newFrequency],
|
||||
},
|
||||
});
|
||||
};
|
||||
const CustomFooter = (
|
||||
<WizardFooter>
|
||||
<WizardContextConsumer>
|
||||
{({ activeStep, onNext, onBack, goToStepById }) => (
|
||||
<>
|
||||
{activeStep.id === 2 ? (
|
||||
<>
|
||||
<Button
|
||||
variant="primary"
|
||||
onClick={() => {
|
||||
handleSubmit(true, goToStepById);
|
||||
}}
|
||||
>{t`Finish and create new`}</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() => {
|
||||
handleSubmit(false);
|
||||
setIsOpen(false);
|
||||
}}
|
||||
>{t`Finish and close`}</Button>
|
||||
</>
|
||||
) : (
|
||||
<Button variant="primary" onClick={onNext}>{t`Next`}</Button>
|
||||
)}
|
||||
|
||||
<Button variant="secondary" onClick={onBack}>{t`Back`}</Button>
|
||||
<Button
|
||||
variant="plain"
|
||||
onClick={() => {
|
||||
setIsOpen(false);
|
||||
resetForm({
|
||||
values: {
|
||||
...initialValues,
|
||||
description: values.description,
|
||||
name: values.name,
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequencies: values.frequencies,
|
||||
},
|
||||
});
|
||||
}}
|
||||
>{t`Cancel`}</Button>
|
||||
</>
|
||||
)}
|
||||
</WizardContextConsumer>
|
||||
</WizardFooter>
|
||||
);
|
||||
|
||||
return (
|
||||
<Wizard
|
||||
onClose={() => setIsOpen(false)}
|
||||
isOpen={isOpen}
|
||||
footer={CustomFooter}
|
||||
steps={[
|
||||
{
|
||||
key: 'frequency',
|
||||
name: 'Frequency',
|
||||
id: 1,
|
||||
component: (
|
||||
<>
|
||||
<Title size="md" headingLevel="h4">{t`Repeat frequency`}</Title>
|
||||
<GroupWrapper
|
||||
name="freq"
|
||||
fieldId="schedule-frequency"
|
||||
isRequired
|
||||
helperTextInvalid={freqMeta.error}
|
||||
validated={
|
||||
!freqMeta.touched || !freqMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={<b>{t`Frequency`}</b>}
|
||||
>
|
||||
<FrequencySelect />
|
||||
</GroupWrapper>
|
||||
<GroupWrapper isRequired label={<b>{t`Interval`}</b>}>
|
||||
<TextInput
|
||||
type="number"
|
||||
value={interval.value}
|
||||
placeholder={t`Choose an interval for the schedule`}
|
||||
aria-label={t`Choose an interval for the schedule`}
|
||||
onChange={(v) => intervalHelpers.setValue(v)}
|
||||
/>
|
||||
</GroupWrapper>
|
||||
<WeekdayForm />
|
||||
<MonthandYearForm />
|
||||
<OrdinalDayForm />
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'End',
|
||||
key: 'end',
|
||||
id: 2,
|
||||
component: <ScheduleEndForm />,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
);
|
||||
}
|
||||
export default ScheduleFormWizard;
|
||||
164
awx/ui/src/components/Schedule/shared/WeekdayForm.js
Normal file
164
awx/ui/src/components/Schedule/shared/WeekdayForm.js
Normal file
@@ -0,0 +1,164 @@
|
||||
import React, { useState } from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import {
|
||||
Checkbox as _Checkbox,
|
||||
FormGroup,
|
||||
Select,
|
||||
SelectOption,
|
||||
} from '@patternfly/react-core';
|
||||
import { useField } from 'formik';
|
||||
import { RRule } from 'rrule';
|
||||
import styled from 'styled-components';
|
||||
import { weekdayOptions } from './scheduleFormHelpers';
|
||||
|
||||
const Checkbox = styled(_Checkbox)`
|
||||
:not(:last-of-type) {
|
||||
margin-right: 10px;
|
||||
}
|
||||
`;
|
||||
const GroupWrapper = styled(FormGroup)`
|
||||
&& .pf-c-form__group-control {
|
||||
display: flex;
|
||||
padding-top: 10px;
|
||||
}
|
||||
&& .pf-c-form__group-label {
|
||||
padding-top: 20px;
|
||||
}
|
||||
`;
|
||||
|
||||
function WeekdayForm({ id }) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [daysOfWeek, daysOfWeekMeta, daysOfWeekHelpers] = useField('byweekday');
|
||||
const [weekStartDay, , weekStartDayHelpers] = useField('wkst');
|
||||
const updateDaysOfWeek = (day, checked) => {
|
||||
const newDaysOfWeek = daysOfWeek.value ? [...daysOfWeek.value] : [];
|
||||
daysOfWeekHelpers.setTouched(true);
|
||||
|
||||
if (checked) {
|
||||
newDaysOfWeek.push(day);
|
||||
daysOfWeekHelpers.setValue(newDaysOfWeek);
|
||||
} else {
|
||||
daysOfWeekHelpers.setValue(
|
||||
newDaysOfWeek.filter((selectedDay) => selectedDay !== day)
|
||||
);
|
||||
}
|
||||
};
|
||||
return (
|
||||
<>
|
||||
<GroupWrapper
|
||||
name="wkst"
|
||||
label={<b>{t`Select the first day of the week`}</b>}
|
||||
>
|
||||
<Select
|
||||
onSelect={(e, value) => {
|
||||
weekStartDayHelpers.setValue(value);
|
||||
setIsOpen(false);
|
||||
}}
|
||||
onBlur={() => setIsOpen(false)}
|
||||
selections={weekStartDay.value}
|
||||
onToggle={(isopen) => setIsOpen(isopen)}
|
||||
isOpen={isOpen}
|
||||
id={`schedule-run-on-the-day-${id}`}
|
||||
onChange={(e, v) => {
|
||||
weekStartDayHelpers.setValue(v);
|
||||
}}
|
||||
{...weekStartDay}
|
||||
>
|
||||
{weekdayOptions.map(({ key, value, label }) => (
|
||||
<SelectOption key={key} value={value}>
|
||||
{label}
|
||||
</SelectOption>
|
||||
))}
|
||||
</Select>
|
||||
</GroupWrapper>
|
||||
<GroupWrapper
|
||||
name="byweekday"
|
||||
fieldId={`schedule-days-of-week-${id}`}
|
||||
helperTextInvalid={daysOfWeekMeta.error}
|
||||
validated={
|
||||
!daysOfWeekMeta.touched || !daysOfWeekMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={<b>{t`On selected day(s) of the week`}</b>}
|
||||
>
|
||||
<Checkbox
|
||||
label={t`Sun`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SU, checked);
|
||||
}}
|
||||
aria-label={t`Sunday`}
|
||||
id={`schedule-days-of-week-sun-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sun-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Mon`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.MO)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.MO, checked);
|
||||
}}
|
||||
aria-label={t`Monday`}
|
||||
id={`schedule-days-of-week-mon-${id}`}
|
||||
ouiaId={`schedule-days-of-week-mon-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Tue`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TU, checked);
|
||||
}}
|
||||
aria-label={t`Tuesday`}
|
||||
id={`schedule-days-of-week-tue-${id}`}
|
||||
ouiaId={`schedule-days-of-week-tue-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Wed`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.WE)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.WE, checked);
|
||||
}}
|
||||
aria-label={t`Wednesday`}
|
||||
id={`schedule-days-of-week-wed-${id}`}
|
||||
ouiaId={`schedule-days-of-week-wed-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Thu`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TH)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TH, checked);
|
||||
}}
|
||||
aria-label={t`Thursday`}
|
||||
id={`schedule-days-of-week-thu-${id}`}
|
||||
ouiaId={`schedule-days-of-week-thu-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Fri`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.FR)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.FR, checked);
|
||||
}}
|
||||
aria-label={t`Friday`}
|
||||
id={`schedule-days-of-week-fri-${id}`}
|
||||
ouiaId={`schedule-days-of-week-fri-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Sat`}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SA)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SA, checked);
|
||||
}}
|
||||
aria-label={t`Saturday`}
|
||||
id={`schedule-days-of-week-sat-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sat-${id}`}
|
||||
name="daysOfWeek"
|
||||
/>
|
||||
</GroupWrapper>
|
||||
</>
|
||||
);
|
||||
}
|
||||
export default WeekdayForm;
|
||||
@@ -1,7 +1,5 @@
|
||||
import { t } from '@lingui/macro';
|
||||
import { RRule } from 'rrule';
|
||||
import { DateTime } from 'luxon';
|
||||
import { getRRuleDayConstants } from 'util/dates';
|
||||
|
||||
window.RRule = RRule;
|
||||
window.DateTime = DateTime;
|
||||
@@ -22,7 +20,7 @@ export function buildDtStartObj(values) {
|
||||
startHour
|
||||
)}${pad(startMinute)}00`;
|
||||
const rruleString = values.timezone
|
||||
? `DTSTART;TZID=${values.timezone}:${dateString}`
|
||||
? `DTSTART;TZID=${values.timezone}${dateString}`
|
||||
: `DTSTART:${dateString}Z`;
|
||||
const rule = RRule.fromString(rruleString);
|
||||
|
||||
@@ -38,7 +36,8 @@ function pad(num) {
|
||||
|
||||
export default function buildRuleObj(values, includeStart) {
|
||||
const ruleObj = {
|
||||
interval: values.interval,
|
||||
interval: values.interval || 1,
|
||||
freq: values.freq,
|
||||
};
|
||||
|
||||
if (includeStart) {
|
||||
@@ -49,68 +48,6 @@ export default function buildRuleObj(values, includeStart) {
|
||||
);
|
||||
}
|
||||
|
||||
switch (values.frequency) {
|
||||
case 'none':
|
||||
ruleObj.count = 1;
|
||||
ruleObj.freq = RRule.MINUTELY;
|
||||
break;
|
||||
case 'minute':
|
||||
ruleObj.freq = RRule.MINUTELY;
|
||||
break;
|
||||
case 'hour':
|
||||
ruleObj.freq = RRule.HOURLY;
|
||||
break;
|
||||
case 'day':
|
||||
ruleObj.freq = RRule.DAILY;
|
||||
break;
|
||||
case 'week':
|
||||
ruleObj.freq = RRule.WEEKLY;
|
||||
ruleObj.byweekday = values.daysOfWeek;
|
||||
break;
|
||||
case 'month':
|
||||
ruleObj.freq = RRule.MONTHLY;
|
||||
if (values.runOn === 'day') {
|
||||
ruleObj.bymonthday = values.runOnDayNumber;
|
||||
} else if (values.runOn === 'the') {
|
||||
ruleObj.bysetpos = parseInt(values.runOnTheOccurrence, 10);
|
||||
ruleObj.byweekday = getRRuleDayConstants(values.runOnTheDay);
|
||||
}
|
||||
break;
|
||||
case 'year':
|
||||
ruleObj.freq = RRule.YEARLY;
|
||||
if (values.runOn === 'day') {
|
||||
ruleObj.bymonth = parseInt(values.runOnDayMonth, 10);
|
||||
ruleObj.bymonthday = values.runOnDayNumber;
|
||||
} else if (values.runOn === 'the') {
|
||||
ruleObj.bysetpos = parseInt(values.runOnTheOccurrence, 10);
|
||||
ruleObj.byweekday = getRRuleDayConstants(values.runOnTheDay);
|
||||
ruleObj.bymonth = parseInt(values.runOnTheMonth, 10);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new Error(t`Frequency did not match an expected value`);
|
||||
}
|
||||
|
||||
if (values.frequency !== 'none') {
|
||||
switch (values.end) {
|
||||
case 'never':
|
||||
break;
|
||||
case 'after':
|
||||
ruleObj.count = values.occurrences;
|
||||
break;
|
||||
case 'onDate': {
|
||||
ruleObj.until = buildDateTime(
|
||||
values.endDate,
|
||||
values.endTime,
|
||||
values.timezone
|
||||
);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new Error(t`End did not match an expected value (${values.end})`);
|
||||
}
|
||||
}
|
||||
|
||||
return ruleObj;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { RRule, RRuleSet } from 'rrule';
|
||||
import buildRuleObj, { buildDtStartObj } from './buildRuleObj';
|
||||
import { FREQUENCIESCONSTANTS } from './scheduleFormHelpers';
|
||||
|
||||
window.RRuleSet = RRuleSet;
|
||||
|
||||
@@ -12,42 +13,31 @@ export default function buildRuleSet(values, useUTCStart) {
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency: values.freq,
|
||||
});
|
||||
set.rrule(startRule);
|
||||
}
|
||||
|
||||
if (values.frequency.length === 0) {
|
||||
const rule = buildRuleObj(
|
||||
{
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
},
|
||||
useUTCStart
|
||||
);
|
||||
set.rrule(new RRule(rule));
|
||||
}
|
||||
|
||||
frequencies.forEach((frequency) => {
|
||||
if (!values.frequency.includes(frequency)) {
|
||||
values.frequencies.forEach(({ frequency, rrule }) => {
|
||||
if (!frequencies.includes(frequency)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const rule = buildRuleObj(
|
||||
{
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency,
|
||||
...values.frequencyOptions[frequency],
|
||||
freq: FREQUENCIESCONSTANTS[frequency],
|
||||
rrule,
|
||||
},
|
||||
useUTCStart
|
||||
true
|
||||
);
|
||||
|
||||
set.rrule(new RRule(rule));
|
||||
});
|
||||
|
||||
frequencies.forEach((frequency) => {
|
||||
values.exceptions?.forEach(({ frequency, rrule }) => {
|
||||
if (!values.exceptionFrequency?.includes(frequency)) {
|
||||
return;
|
||||
}
|
||||
@@ -56,8 +46,8 @@ export default function buildRuleSet(values, useUTCStart) {
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency,
|
||||
...values.exceptionOptions[frequency],
|
||||
freq: FREQUENCIESCONSTANTS[frequency],
|
||||
rrule,
|
||||
},
|
||||
useUTCStart
|
||||
);
|
||||
|
||||
@@ -12,12 +12,14 @@ export class UnsupportedRRuleError extends Error {
|
||||
|
||||
export default function parseRuleObj(schedule) {
|
||||
let values = {
|
||||
frequency: [],
|
||||
frequencyOptions: {},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
frequency: '',
|
||||
rrules: '',
|
||||
timezone: schedule.timezone,
|
||||
};
|
||||
if (Object.values(schedule).length === 0) {
|
||||
return values;
|
||||
}
|
||||
|
||||
const ruleset = rrulestr(schedule.rrule.replace(' ', '\n'), {
|
||||
forceset: true,
|
||||
});
|
||||
@@ -40,25 +42,9 @@ export default function parseRuleObj(schedule) {
|
||||
}
|
||||
});
|
||||
|
||||
if (isSingleOccurrence(values)) {
|
||||
values.frequency = [];
|
||||
values.frequencyOptions = {};
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
function isSingleOccurrence(values) {
|
||||
if (values.frequency.length > 1) {
|
||||
return false;
|
||||
}
|
||||
if (values.frequency[0] !== 'minute') {
|
||||
return false;
|
||||
}
|
||||
const options = values.frequencyOptions.minute;
|
||||
return options.end === 'after' && options.occurrences === 1;
|
||||
}
|
||||
|
||||
function parseDtstart(schedule, values) {
|
||||
// TODO: should this rely on DTSTART in rruleset rather than schedule.dtstart?
|
||||
const [startDate, startTime] = dateToInputDateTime(
|
||||
@@ -81,27 +67,12 @@ const frequencyTypes = {
|
||||
[RRule.YEARLY]: 'year',
|
||||
};
|
||||
|
||||
function parseRrule(rruleString, schedule, values) {
|
||||
const { frequency, options } = parseRule(
|
||||
rruleString,
|
||||
schedule,
|
||||
values.exceptionFrequency
|
||||
);
|
||||
function parseRrule(rruleString, schedule) {
|
||||
const { frequency } = parseRule(rruleString, schedule);
|
||||
|
||||
if (values.frequencyOptions[frequency]) {
|
||||
throw new UnsupportedRRuleError(
|
||||
'Duplicate exception frequency types not supported'
|
||||
);
|
||||
}
|
||||
const freq = { frequency, rrule: rruleString };
|
||||
|
||||
return {
|
||||
...values,
|
||||
frequency: [...values.frequency, frequency].sort(sortFrequencies),
|
||||
frequencyOptions: {
|
||||
...values.frequencyOptions,
|
||||
[frequency]: options,
|
||||
},
|
||||
};
|
||||
return freq;
|
||||
}
|
||||
|
||||
function parseExRule(exruleString, schedule, values) {
|
||||
@@ -129,20 +100,10 @@ function parseExRule(exruleString, schedule, values) {
|
||||
};
|
||||
}
|
||||
|
||||
function parseRule(ruleString, schedule, frequencies) {
|
||||
function parseRule(ruleString, schedule) {
|
||||
const {
|
||||
origOptions: {
|
||||
bymonth,
|
||||
bymonthday,
|
||||
bysetpos,
|
||||
byweekday,
|
||||
count,
|
||||
freq,
|
||||
interval,
|
||||
until,
|
||||
},
|
||||
origOptions: { count, freq, interval, until, ...rest },
|
||||
} = RRule.fromString(ruleString);
|
||||
|
||||
const now = DateTime.now();
|
||||
const closestQuarterHour = DateTime.fromMillis(
|
||||
Math.ceil(now.ts / 900000) * 900000
|
||||
@@ -156,17 +117,17 @@ function parseRule(ruleString, schedule, frequencies) {
|
||||
endTime: time,
|
||||
occurrences: 1,
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
endingType: 'never',
|
||||
};
|
||||
|
||||
if (until) {
|
||||
options.end = 'onDate';
|
||||
if (until?.length) {
|
||||
options.endingType = 'onDate';
|
||||
const end = DateTime.fromISO(until.toISOString());
|
||||
const [endDate, endTime] = dateToInputDateTime(end, schedule.timezone);
|
||||
options.endDate = endDate;
|
||||
options.endTime = endTime;
|
||||
} else if (count) {
|
||||
options.end = 'after';
|
||||
options.endingType = 'after';
|
||||
options.occurrences = count;
|
||||
}
|
||||
|
||||
@@ -178,101 +139,10 @@ function parseRule(ruleString, schedule, frequencies) {
|
||||
throw new Error(`Unexpected rrule frequency: ${freq}`);
|
||||
}
|
||||
const frequency = frequencyTypes[freq];
|
||||
if (frequencies.includes(frequency)) {
|
||||
throw new Error(`Duplicate frequency types not supported (${frequency})`);
|
||||
}
|
||||
|
||||
if (freq === RRule.WEEKLY && byweekday) {
|
||||
options.daysOfWeek = byweekday;
|
||||
}
|
||||
|
||||
if (freq === RRule.MONTHLY) {
|
||||
options.runOn = 'day';
|
||||
options.runOnTheOccurrence = 1;
|
||||
options.runOnTheDay = 'sunday';
|
||||
options.runOnDayNumber = 1;
|
||||
|
||||
if (bymonthday) {
|
||||
options.runOnDayNumber = bymonthday;
|
||||
}
|
||||
if (bysetpos) {
|
||||
options.runOn = 'the';
|
||||
options.runOnTheOccurrence = bysetpos;
|
||||
options.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
}
|
||||
}
|
||||
|
||||
if (freq === RRule.YEARLY) {
|
||||
options.runOn = 'day';
|
||||
options.runOnTheOccurrence = 1;
|
||||
options.runOnTheDay = 'sunday';
|
||||
options.runOnTheMonth = 1;
|
||||
options.runOnDayMonth = 1;
|
||||
options.runOnDayNumber = 1;
|
||||
|
||||
if (bymonthday) {
|
||||
options.runOnDayNumber = bymonthday;
|
||||
options.runOnDayMonth = bymonth;
|
||||
}
|
||||
if (bysetpos) {
|
||||
options.runOn = 'the';
|
||||
options.runOnTheOccurrence = bysetpos;
|
||||
options.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
options.runOnTheMonth = bymonth;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
frequency,
|
||||
options,
|
||||
...options,
|
||||
...rest,
|
||||
};
|
||||
}
|
||||
|
||||
function generateRunOnTheDay(days = []) {
|
||||
if (
|
||||
[
|
||||
RRule.MO,
|
||||
RRule.TU,
|
||||
RRule.WE,
|
||||
RRule.TH,
|
||||
RRule.FR,
|
||||
RRule.SA,
|
||||
RRule.SU,
|
||||
].every((element) => days.indexOf(element) > -1)
|
||||
) {
|
||||
return 'day';
|
||||
}
|
||||
if (
|
||||
[RRule.MO, RRule.TU, RRule.WE, RRule.TH, RRule.FR].every(
|
||||
(element) => days.indexOf(element) > -1
|
||||
)
|
||||
) {
|
||||
return 'weekday';
|
||||
}
|
||||
if ([RRule.SA, RRule.SU].every((element) => days.indexOf(element) > -1)) {
|
||||
return 'weekendDay';
|
||||
}
|
||||
if (days.indexOf(RRule.MO) > -1) {
|
||||
return 'monday';
|
||||
}
|
||||
if (days.indexOf(RRule.TU) > -1) {
|
||||
return 'tuesday';
|
||||
}
|
||||
if (days.indexOf(RRule.WE) > -1) {
|
||||
return 'wednesday';
|
||||
}
|
||||
if (days.indexOf(RRule.TH) > -1) {
|
||||
return 'thursday';
|
||||
}
|
||||
if (days.indexOf(RRule.FR) > -1) {
|
||||
return 'friday';
|
||||
}
|
||||
if (days.indexOf(RRule.SA) > -1) {
|
||||
return 'saturday';
|
||||
}
|
||||
if (days.indexOf(RRule.SU) > -1) {
|
||||
return 'sunday';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
232
awx/ui/src/components/Schedule/shared/scheduleFormHelpers.js
Normal file
232
awx/ui/src/components/Schedule/shared/scheduleFormHelpers.js
Normal file
@@ -0,0 +1,232 @@
|
||||
import { t } from '@lingui/macro';
|
||||
import { DateTime } from 'luxon';
|
||||
import { RRule } from 'rrule';
|
||||
import buildRuleObj from './buildRuleObj';
|
||||
import buildRuleSet from './buildRuleSet';
|
||||
|
||||
// const NUM_DAYS_PER_FREQUENCY = {
|
||||
// week: 7,
|
||||
// month: 31,
|
||||
// year: 365,
|
||||
// };
|
||||
// const validateSchedule = () =>
|
||||
// const errors = {};
|
||||
|
||||
// values.frequencies.forEach((freq) => {
|
||||
// const options = values.frequencyOptions[freq];
|
||||
// const freqErrors = {};
|
||||
|
||||
// if (
|
||||
// (freq === 'month' || freq === 'year') &&
|
||||
// options.runOn === 'day' &&
|
||||
// (options.runOnDayNumber < 1 || options.runOnDayNumber > 31)
|
||||
// ) {
|
||||
// freqErrors.runOn = t`Please select a day number between 1 and 31.`;
|
||||
// }
|
||||
|
||||
// if (options.end === 'after' && !options.occurrences) {
|
||||
// freqErrors.occurrences = t`Please enter a number of occurrences.`;
|
||||
// }
|
||||
|
||||
// if (options.end === 'onDate') {
|
||||
// if (
|
||||
// DateTime.fromFormat(
|
||||
// `${values.startDate} ${values.startTime}`,
|
||||
// 'yyyy-LL-dd h:mm a'
|
||||
// ).toMillis() >=
|
||||
// DateTime.fromFormat(
|
||||
// `${options.endDate} ${options.endTime}`,
|
||||
// 'yyyy-LL-dd h:mm a'
|
||||
// ).toMillis()
|
||||
// ) {
|
||||
// freqErrors.endDate = t`Please select an end date/time that comes after the start date/time.`;
|
||||
// }
|
||||
|
||||
// if (
|
||||
// DateTime.fromISO(options.endDate)
|
||||
// .diff(DateTime.fromISO(values.startDate), 'days')
|
||||
// .toObject().days < NUM_DAYS_PER_FREQUENCY[freq]
|
||||
// ) {
|
||||
// const rule = new RRule(
|
||||
// buildRuleObj({
|
||||
// startDate: values.startDate,
|
||||
// startTime: values.startTime,
|
||||
// frequencies: freq,
|
||||
// ...options,
|
||||
// })
|
||||
// );
|
||||
// if (rule.all().length === 0) {
|
||||
// errors.startDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
// freqErrors.endDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if (Object.keys(freqErrors).length > 0) {
|
||||
// if (!errors.frequencyOptions) {
|
||||
// errors.frequencyOptions = {};
|
||||
// }
|
||||
// errors.frequencyOptions[freq] = freqErrors;
|
||||
// }
|
||||
// });
|
||||
|
||||
// if (values.exceptionFrequency.length > 0 && !scheduleHasInstances(values)) {
|
||||
// errors.exceptionFrequency = t`This schedule has no occurrences due to the
|
||||
// selected exceptions.`;
|
||||
// }
|
||||
|
||||
// ({});
|
||||
// function scheduleHasInstances(values) {
|
||||
// let rangeToCheck = 1;
|
||||
// values.frequencies.forEach((freq) => {
|
||||
// if (NUM_DAYS_PER_FREQUENCY[freq] > rangeToCheck) {
|
||||
// rangeToCheck = NUM_DAYS_PER_FREQUENCY[freq];
|
||||
// }
|
||||
// });
|
||||
|
||||
// const ruleSet = buildRuleSet(values, true);
|
||||
// const startDate = DateTime.fromISO(values.startDate);
|
||||
// const endDate = startDate.plus({ days: rangeToCheck });
|
||||
// const instances = ruleSet.between(
|
||||
// startDate.toJSDate(),
|
||||
// endDate.toJSDate(),
|
||||
// true,
|
||||
// (date, i) => i === 0
|
||||
// );
|
||||
|
||||
// return instances.length > 0;
|
||||
// }
|
||||
|
||||
const bysetposOptions = [
|
||||
{ value: '', key: 'none', label: 'None' },
|
||||
{ value: 1, key: 'first', label: t`First` },
|
||||
{
|
||||
value: 2,
|
||||
key: 'second',
|
||||
label: t`Second`,
|
||||
},
|
||||
{ value: 3, key: 'third', label: t`Third` },
|
||||
{
|
||||
value: 4,
|
||||
key: 'fourth',
|
||||
label: t`Fourth`,
|
||||
},
|
||||
{ value: 5, key: 'fifth', label: t`Fifth` },
|
||||
{ value: -1, key: 'last', label: t`Last` },
|
||||
];
|
||||
|
||||
const monthOptions = [
|
||||
{
|
||||
key: 'january',
|
||||
value: 1,
|
||||
label: t`January`,
|
||||
},
|
||||
{
|
||||
key: 'february',
|
||||
value: 2,
|
||||
label: t`February`,
|
||||
},
|
||||
{
|
||||
key: 'march',
|
||||
value: 3,
|
||||
label: t`March`,
|
||||
},
|
||||
{
|
||||
key: 'april',
|
||||
value: 4,
|
||||
label: t`April`,
|
||||
},
|
||||
{
|
||||
key: 'may',
|
||||
value: 5,
|
||||
label: t`May`,
|
||||
},
|
||||
{
|
||||
key: 'june',
|
||||
value: 6,
|
||||
label: t`June`,
|
||||
},
|
||||
{
|
||||
key: 'july',
|
||||
value: 7,
|
||||
label: t`July`,
|
||||
},
|
||||
{
|
||||
key: 'august',
|
||||
value: 8,
|
||||
label: t`August`,
|
||||
},
|
||||
{
|
||||
key: 'september',
|
||||
value: 9,
|
||||
label: t`September`,
|
||||
},
|
||||
{
|
||||
key: 'october',
|
||||
value: 10,
|
||||
label: t`October`,
|
||||
},
|
||||
{
|
||||
key: 'november',
|
||||
value: 11,
|
||||
label: t`November`,
|
||||
},
|
||||
{
|
||||
key: 'december',
|
||||
value: 12,
|
||||
label: t`December`,
|
||||
},
|
||||
];
|
||||
|
||||
const weekdayOptions = [
|
||||
{
|
||||
value: RRule.SU,
|
||||
key: 'sunday',
|
||||
label: t`Sunday`,
|
||||
},
|
||||
{
|
||||
value: RRule.MO,
|
||||
key: 'monday',
|
||||
label: t`Monday`,
|
||||
},
|
||||
{
|
||||
value: RRule.TU,
|
||||
key: 'tuesday',
|
||||
label: t`Tuesday`,
|
||||
},
|
||||
{
|
||||
value: RRule.WE,
|
||||
key: 'wednesday',
|
||||
label: t`Wednesday`,
|
||||
},
|
||||
{
|
||||
value: RRule.TH,
|
||||
key: 'thursday',
|
||||
label: t`Thursday`,
|
||||
},
|
||||
{
|
||||
value: RRule.FR,
|
||||
key: 'friday',
|
||||
label: t`Friday`,
|
||||
},
|
||||
{
|
||||
value: RRule.SA,
|
||||
key: 'saturday',
|
||||
label: t`Saturday`,
|
||||
},
|
||||
];
|
||||
|
||||
const FREQUENCIESCONSTANTS = {
|
||||
minute: RRule.MINUTELY,
|
||||
hour: RRule.HOURLY,
|
||||
day: RRule.DAILY,
|
||||
week: RRule.WEEKLY,
|
||||
month: RRule.MONTHLY,
|
||||
year: RRule.YEARLY,
|
||||
};
|
||||
export {
|
||||
monthOptions,
|
||||
weekdayOptions,
|
||||
bysetposOptions,
|
||||
// validateSchedule,
|
||||
FREQUENCIESCONSTANTS,
|
||||
};
|
||||
@@ -24,12 +24,10 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
|
||||
const { id } = useParams();
|
||||
|
||||
const relevantResults = relatedJobs.filter(
|
||||
({
|
||||
job: jobId,
|
||||
summary_fields: {
|
||||
unified_job_template: { unified_job_type },
|
||||
},
|
||||
}) => jobId && `${jobId}` !== id && unified_job_type !== 'workflow_approval'
|
||||
({ job: jobId, summary_fields }) =>
|
||||
jobId &&
|
||||
`${jobId}` !== id &&
|
||||
summary_fields.job.type !== 'workflow_approval'
|
||||
);
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
@@ -101,16 +99,14 @@ function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
|
||||
{sortedJobs?.map((node) => (
|
||||
<SelectOption
|
||||
key={node.id}
|
||||
to={`/jobs/${
|
||||
JOB_URL_SEGMENT_MAP[
|
||||
node.summary_fields.unified_job_template.unified_job_type
|
||||
]
|
||||
}/${node.summary_fields.job?.id}/output`}
|
||||
to={`/jobs/${JOB_URL_SEGMENT_MAP[node.summary_fields.job.type]}/${
|
||||
node.summary_fields.job?.id
|
||||
}/output`}
|
||||
component={Link}
|
||||
value={node.summary_fields.unified_job_template.name}
|
||||
value={node.summary_fields.job.name}
|
||||
>
|
||||
{stringIsUUID(node.identifier)
|
||||
? node.summary_fields.unified_job_template.name
|
||||
? node.summary_fields.job.name
|
||||
: node.identifier}
|
||||
</SelectOption>
|
||||
))}
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
import React from 'react';
|
||||
import { within, render, screen, waitFor } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import WorkflowOutputNavigation from './WorkflowOutputNavigation';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import { I18nProvider } from '@lingui/react';
|
||||
import { i18n } from '@lingui/core';
|
||||
import { en } from 'make-plural/plurals';
|
||||
import english from '../../../src/locales/en/messages';
|
||||
import { Router } from 'react-router-dom';
|
||||
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useParams: () => ({
|
||||
id: 1,
|
||||
}),
|
||||
}));
|
||||
const jobs = [
|
||||
{
|
||||
id: 1,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Ansible',
|
||||
type: 'project_update',
|
||||
id: 1,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 4,
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Durham',
|
||||
type: 'job',
|
||||
id: 2,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 3,
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
summary_fields: {
|
||||
job: {
|
||||
name: 'Red hat',
|
||||
type: 'job',
|
||||
id: 3,
|
||||
status: 'successful',
|
||||
},
|
||||
},
|
||||
job: 2,
|
||||
},
|
||||
];
|
||||
|
||||
describe('<WorkflowOuputNavigation/>', () => {
|
||||
test('Should open modal and deprovision node', async () => {
|
||||
i18n.loadLocaleData({ en: { plurals: en } });
|
||||
i18n.load({ en: english });
|
||||
i18n.activate('en');
|
||||
const user = userEvent.setup();
|
||||
const ref = jest
|
||||
.spyOn(React, 'useRef')
|
||||
.mockReturnValueOnce({ current: 'div' });
|
||||
const history = createMemoryHistory({
|
||||
initialEntries: ['jobs/playbook/2/output'],
|
||||
});
|
||||
render(
|
||||
<I18nProvider i18n={i18n}>
|
||||
<Router history={history}>
|
||||
<WorkflowOutputNavigation relatedJobs={jobs} parentRef={ref} />
|
||||
</Router>
|
||||
</I18nProvider>
|
||||
);
|
||||
|
||||
const button = screen.getByRole('button');
|
||||
await user.click(button);
|
||||
|
||||
await waitFor(() => screen.getByText('Workflow Nodes'));
|
||||
await waitFor(() => screen.getByText('Red hat'));
|
||||
await waitFor(() => screen.getByText('Durham'));
|
||||
await waitFor(() => screen.getByText('Ansible'));
|
||||
});
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user