mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
128 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff49cc5636 | ||
|
|
9946e644c8 | ||
|
|
1ed7a50755 | ||
|
|
9f3396d867 | ||
|
|
bcd018707a | ||
|
|
a462978433 | ||
|
|
6d11003975 | ||
|
|
017e474325 | ||
|
|
5d717af778 | ||
|
|
8d08ac559d | ||
|
|
4e24867a0b | ||
|
|
2b4b8839d1 | ||
|
|
dba33f9ef5 | ||
|
|
db2649d7ba | ||
|
|
edc3da85cc | ||
|
|
2357e24d1d | ||
|
|
e4d1056450 | ||
|
|
37d9c9eb1b | ||
|
|
d42a85714a | ||
|
|
4b8a56be39 | ||
|
|
2aa99234f4 | ||
|
|
bf9f1b1d56 | ||
|
|
704e4781d9 | ||
|
|
4a8613ce4c | ||
|
|
e87fabe6bb | ||
|
|
532aa83555 | ||
|
|
d87bb973d5 | ||
|
|
a72da3bd1a | ||
|
|
56df3f0c2a | ||
|
|
e0c59d12c1 | ||
|
|
7645cc2707 | ||
|
|
6719010050 | ||
|
|
ccd46a1c0f | ||
|
|
cc1e349ea8 | ||
|
|
e509d5f1de | ||
|
|
4fca27c664 | ||
|
|
51be22aebd | ||
|
|
54b21e5872 | ||
|
|
85beb9eb70 | ||
|
|
56739ac246 | ||
|
|
1ea3c564df | ||
|
|
621833ef0e | ||
|
|
16be38bb54 | ||
|
|
c5976e2584 | ||
|
|
3c51cb130f | ||
|
|
c649809eb2 | ||
|
|
43a53f41dd | ||
|
|
a3fef27002 | ||
|
|
cfc1255812 | ||
|
|
278db2cdde | ||
|
|
64157f7207 | ||
|
|
9e8ba6ca09 | ||
|
|
268ab128d7 | ||
|
|
fad5934c1e | ||
|
|
c9e3873a28 | ||
|
|
6a19aabd44 | ||
|
|
11e63e2e89 | ||
|
|
7c885dcadb | ||
|
|
b84a192bad | ||
|
|
35afb10add | ||
|
|
f1bd1f1dfc | ||
|
|
67c9e1a0cb | ||
|
|
f6da9a5073 | ||
|
|
38a0950f46 | ||
|
|
55d295c2a6 | ||
|
|
be45919ee4 | ||
|
|
0a4a9f96c2 | ||
|
|
1ae1da3f9c | ||
|
|
cae2c06190 | ||
|
|
993dd61024 | ||
|
|
ea07aef73e | ||
|
|
268a4ad32d | ||
|
|
3712af4df8 | ||
|
|
8cf75fce8c | ||
|
|
46be2d9e5b | ||
|
|
998000bfbe | ||
|
|
43a50cc62c | ||
|
|
30f556f845 | ||
|
|
c5985c4c81 | ||
|
|
a9170236e1 | ||
|
|
85a5b58d18 | ||
|
|
6fb3c8daa8 | ||
|
|
a0103acbef | ||
|
|
f7e6a32444 | ||
|
|
7bbc256ff1 | ||
|
|
64f62d6755 | ||
|
|
b4cfe868fb | ||
|
|
8d8681580d | ||
|
|
8892cf2622 | ||
|
|
585d3f4e2a | ||
|
|
2c9a0444e6 | ||
|
|
279cebcef3 | ||
|
|
e6f8852b05 | ||
|
|
d06a3f060d | ||
|
|
957b2b7188 | ||
|
|
b94b3a1e91 | ||
|
|
7776a81e22 | ||
|
|
bf89093fac | ||
|
|
76d76d13b0 | ||
|
|
e603c23b40 | ||
|
|
8af4dd5988 | ||
|
|
0a47d05d26 | ||
|
|
b3eb9e0193 | ||
|
|
b26d2ab0e9 | ||
|
|
7eb0c7dd28 | ||
|
|
236c1df676 | ||
|
|
ff118f2177 | ||
|
|
29d91da1d2 | ||
|
|
ad08eafb9a | ||
|
|
431b9370df | ||
|
|
3e93eefe62 | ||
|
|
782667a34e | ||
|
|
90524611ea | ||
|
|
583086ae62 | ||
|
|
19c24cba10 | ||
|
|
5290c692c1 | ||
|
|
90a19057d5 | ||
|
|
a05c328081 | ||
|
|
6d9e353a4e | ||
|
|
82c062eab9 | ||
|
|
c0d59801d5 | ||
|
|
93ea8a0919 | ||
|
|
6d0d8e57a4 | ||
|
|
1fca505b61 | ||
|
|
a0e9c30b4a | ||
|
|
bc94dc0257 | ||
|
|
3aa8320fc7 | ||
|
|
29702400f1 |
46
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
46
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -20,6 +20,19 @@ body:
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: feature-type
|
||||
attributes:
|
||||
label: Feature type
|
||||
description: >-
|
||||
What kind of feature is this?
|
||||
multiple: false
|
||||
options:
|
||||
- "New Feature"
|
||||
- "Enhancement to Existing Feature"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
@@ -40,3 +53,36 @@ body:
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: >-
|
||||
Describe the necessary steps to understand the scenario of the requested enhancement.
|
||||
Include all the steps that will help the developer and QE team understand what you are requesting.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: current-results
|
||||
attributes:
|
||||
label: Current results
|
||||
description: What is currently happening on the scenario?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: sugested-results
|
||||
attributes:
|
||||
label: Sugested feature result
|
||||
description: What is the result this new feature will bring?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: additional-information
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Please provide any other information you think is relevant that could help us understand your feature request.
|
||||
validations:
|
||||
required: false
|
||||
31
.github/workflows/label_issue.yml
vendored
31
.github/workflows/label_issue.yml
vendored
@@ -19,3 +19,34 @@ jobs:
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
configuration-path: .github/issue_labeler.yml
|
||||
enable-versioned-regex: 0
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
with:
|
||||
script: |
|
||||
import requests
|
||||
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||
response = requests.get('${{ fromJson(toJson(github.event.issue.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||
is_member = False
|
||||
for org in response.json():
|
||||
if org['login'] == 'ansible':
|
||||
is_member = True
|
||||
if is_member:
|
||||
print("User is member")
|
||||
else:
|
||||
print("User is community")
|
||||
- name: Add community label if not a member
|
||||
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "community"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
31
.github/workflows/label_pr.yml
vendored
31
.github/workflows/label_pr.yml
vendored
@@ -18,3 +18,34 @@ jobs:
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/pr_labeler.yml
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
with:
|
||||
script: |
|
||||
import requests
|
||||
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||
response = requests.get('${{ fromJson(toJson(github.event.pull_request.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||
is_member = False
|
||||
for org in response.json():
|
||||
if org['login'] == 'ansible':
|
||||
is_member = True
|
||||
if is_member:
|
||||
print("User is member")
|
||||
else:
|
||||
print("User is community")
|
||||
- name: Add community label if not a member
|
||||
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "community"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
26
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
26
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Dependency Pr Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, reopened]
|
||||
|
||||
jobs:
|
||||
pr-check:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Update PR Body
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
REPO: ${{ github.event.repository.name }}
|
||||
BRANCH: ${{github.event.pull_request.head.ref}}
|
||||
PR: ${{github.event.pull_request}}
|
||||
run: |
|
||||
gh pr checkout ${{ env.BRANCH }}
|
||||
gh pr edit --body "${{ env.PR }}\nBug, Docs Fix or other nominal change"
|
||||
76
Makefile
76
Makefile
@@ -72,7 +72,7 @@ clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
@@ -94,7 +94,7 @@ clean-api:
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
## convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@if [ "$${$*}" = "" ]; then \
|
||||
echo "The required environment variable '$*' is not set"; \
|
||||
@@ -117,7 +117,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -136,7 +136,7 @@ requirements_dev: requirements_awx requirements_awx_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
# "Install" awx package in development mode.
|
||||
## "Install" awx package in development mode.
|
||||
develop:
|
||||
@if [ "$(VIRTUAL_ENV)" ]; then \
|
||||
pip uninstall -y awx; \
|
||||
@@ -153,21 +153,21 @@ version_file:
|
||||
fi; \
|
||||
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
## Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
|
||||
# Create Django superuser.
|
||||
## Create Django superuser.
|
||||
adduser:
|
||||
$(MANAGEMENT_COMMAND) createsuperuser
|
||||
|
||||
# Create database tables and apply any new migrations.
|
||||
## Create database tables and apply any new migrations.
|
||||
migrate:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) migrate --noinput
|
||||
|
||||
# Run after making changes to the models to create a new migration.
|
||||
## Run after making changes to the models to create a new migration.
|
||||
dbchange:
|
||||
$(MANAGEMENT_COMMAND) makemigrations
|
||||
|
||||
@@ -218,7 +218,7 @@ wsbroadcast:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
# Run to start the background task dispatcher for development.
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -226,7 +226,7 @@ dispatcher:
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -278,7 +278,7 @@ awx-link:
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
# Run all API unit tests.
|
||||
## Run all API unit tests.
|
||||
test:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -341,23 +341,24 @@ test_unit:
|
||||
fi; \
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||
|
||||
# Run all API unit tests with coverage enabled.
|
||||
## Run all API unit tests with coverage enabled.
|
||||
test_coverage:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS)
|
||||
|
||||
# Output test coverage as HTML (into htmlcov directory).
|
||||
## Output test coverage as HTML (into htmlcov directory).
|
||||
coverage_html:
|
||||
coverage html
|
||||
|
||||
# Run API unit tests across multiple Python/Django versions with Tox.
|
||||
## Run API unit tests across multiple Python/Django versions with Tox.
|
||||
test_tox:
|
||||
tox -v
|
||||
|
||||
# Make fake data
|
||||
|
||||
DATA_GEN_PRESET = ""
|
||||
## Make fake data
|
||||
bulk_data:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -502,7 +503,7 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
# Base development image build
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
@@ -520,7 +521,7 @@ docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
## Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
@@ -567,16 +568,16 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# generate UI .pot file, an empty template of strings yet to be translated
|
||||
## generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||
|
||||
# generate UI .po files for each locale (will update translated strings for `en`)
|
||||
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
|
||||
# generate API django .pot .po
|
||||
LANG = "en-us"
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -585,3 +586,38 @@ messages:
|
||||
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
# HELP related targets
|
||||
# --------------------------------------
|
||||
|
||||
HELP_FILTER=.PHONY
|
||||
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@awk '/^[-a-zA-Z_0-9%:\\\.\/]+:/ { \
|
||||
helpMessage = match(lastLine, /^## (.*)/); \
|
||||
if (helpMessage) { \
|
||||
helpCommand = $$1; \
|
||||
helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
|
||||
gsub("\\\\", "", helpCommand); \
|
||||
gsub(":+$$", "", helpCommand); \
|
||||
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, helpMessage; \
|
||||
} else { \
|
||||
helpCommand = $$1; \
|
||||
gsub("\\\\", "", helpCommand); \
|
||||
gsub(":+$$", "", helpCommand); \
|
||||
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, "No help available"; \
|
||||
} \
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
@@ -190,7 +190,7 @@ def manage():
|
||||
sys.stdout.write('%s\n' % __version__)
|
||||
# If running as a user without permission to read settings, display an
|
||||
# error message. Allow --help to still work.
|
||||
elif settings.SECRET_KEY == 'permission-denied':
|
||||
elif not os.getenv('SKIP_SECRET_KEY_CHECK', False) and settings.SECRET_KEY == 'permission-denied':
|
||||
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
|
||||
execute_from_command_line(sys.argv)
|
||||
sys.stdout.write('\n')
|
||||
|
||||
@@ -157,7 +157,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField)
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
|
||||
17
awx/api/urls/debug.py
Normal file
17
awx/api/urls/debug.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views.debug import (
|
||||
DebugRootView,
|
||||
TaskManagerDebugView,
|
||||
DependencyManagerDebugView,
|
||||
WorkflowManagerDebugView,
|
||||
)
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', DebugRootView.as_view(), name='debug'),
|
||||
re_path(r'^task_manager/$', TaskManagerDebugView.as_view(), name='task_manager'),
|
||||
re_path(r'^dependency_manager/$', DependencyManagerDebugView.as_view(), name='dependency_manager'),
|
||||
re_path(r'^workflow_manager/$', WorkflowManagerDebugView.as_view(), name='workflow_manager'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -2,9 +2,9 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from django.conf import settings
|
||||
from django.urls import include, re_path
|
||||
|
||||
from awx import MODE
|
||||
from awx.api.generics import LoggedLoginView, LoggedLogoutView
|
||||
from awx.api.views import (
|
||||
ApiRootView,
|
||||
@@ -145,7 +145,12 @@ urlpatterns = [
|
||||
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
|
||||
re_path(r'^o/', include(oauth2_root_urls)),
|
||||
]
|
||||
if settings.SETTINGS_MODULE == 'awx.settings.development':
|
||||
if MODE == 'development':
|
||||
# Only include these if we are in the development environment
|
||||
from awx.api.swagger import SwaggerSchemaView
|
||||
|
||||
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
||||
|
||||
from awx.api.urls.debug import urls as debug_urls
|
||||
|
||||
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
|
||||
|
||||
@@ -93,7 +93,7 @@ from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ScheduleWorkflowManager,
|
||||
ignore_inventory_computed_fields,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
@@ -3391,7 +3391,7 @@ class WorkflowJobCancel(RetrieveAPIView):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
schedule_task_manager()
|
||||
ScheduleWorkflowManager().schedule()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
@@ -3839,7 +3839,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||
return job.get_event_queryset().prefetch_related('job__job_template', 'host').order_by('start_line')
|
||||
|
||||
|
||||
class JobJobEventsChildrenSummary(APIView):
|
||||
|
||||
68
awx/api/views/debug.py
Normal file
68
awx/api/views/debug.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.response import Response
|
||||
from awx.api.generics import APIView
|
||||
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
|
||||
|
||||
class TaskManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Task'
|
||||
|
||||
def get(self, request):
|
||||
TaskManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class DependencyManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Dependency'
|
||||
|
||||
def get(self, request):
|
||||
DependencyManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class WorkflowManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Workflow'
|
||||
|
||||
def get(self, request):
|
||||
WorkflowManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class DebugRootView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def get(self, request, format=None):
|
||||
'''List of available debug urls'''
|
||||
data = OrderedDict()
|
||||
data['task_manager'] = '/api/debug/task_manager/'
|
||||
data['dependency_manager'] = '/api/debug/dependency_manager/'
|
||||
data['workflow_manager'] = '/api/debug/workflow_manager/'
|
||||
return Response(data)
|
||||
@@ -80,7 +80,7 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
yield
|
||||
except DBError as exc:
|
||||
if trans_safe:
|
||||
level = logger.exception
|
||||
level = logger.warning
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
@@ -89,7 +89,7 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
|
||||
@@ -166,7 +166,11 @@ class Metrics:
|
||||
elif settings.IS_TESTING():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.me().hostname
|
||||
try:
|
||||
self.instance_name = Instance.objects.me().hostname
|
||||
except Exception as e:
|
||||
self.instance_name = settings.CLUSTER_HOST_ID
|
||||
logger.info(f'Instance {self.instance_name} seems to be unregistered, error: {e}')
|
||||
|
||||
# metric name, help_text
|
||||
METRICSLIST = [
|
||||
@@ -184,19 +188,29 @@ class Metrics:
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading all tasks from db'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('task_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow jobs'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager_schedule_calls', 'Number of calls to task manager schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
@@ -303,7 +317,12 @@ class Metrics:
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
lock.release()
|
||||
try:
|
||||
lock.release()
|
||||
except Exception as exc:
|
||||
# After system failures, we might throw redis.exceptions.LockNotOwnedError
|
||||
# this is to avoid print a Traceback, and importantly, avoid raising an exception into parent context
|
||||
logger.warning(f'Error releasing subsystem metrics redis lock, error: {str(exc)}')
|
||||
|
||||
def load_other_metrics(self, request):
|
||||
# data received from other nodes are stored in their own keys
|
||||
|
||||
@@ -446,7 +446,7 @@ register(
|
||||
label=_('Default Job Idle Timeout'),
|
||||
help_text=_(
|
||||
'If no output is detected from ansible in this number of seconds the execution will be terminated. '
|
||||
'Use value of 0 to used default idle_timeout is 600s.'
|
||||
'Use value of 0 to indicate that no idle timeout should be imposed.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
|
||||
@@ -4,6 +4,7 @@ import select
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
@@ -15,7 +16,6 @@ def get_local_queuename():
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
assert conn.autocommit, "Connection must be in autocommit mode."
|
||||
self.conn = conn
|
||||
|
||||
def listen(self, channel):
|
||||
@@ -31,6 +31,9 @@ class PubSub(object):
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
if not pg_connection.get_autocommit():
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
@@ -45,11 +48,32 @@ class PubSub(object):
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn():
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {}))
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
||||
conn.set_session(autocommit=True)
|
||||
def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
Any listeners probably want to establish a new database connection,
|
||||
separate from the Django connection used for queries, because that will prevent
|
||||
losing connection to the channel whenever a .close() happens.
|
||||
|
||||
Any publishers probably want to use the existing connection
|
||||
so that messages follow postgres transaction rules
|
||||
https://www.postgresql.org/docs/current/sql-notify.html
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
else:
|
||||
if pg_connection.connection is None:
|
||||
pg_connection.connect()
|
||||
if pg_connection.connection is None:
|
||||
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||
conn = pg_connection.connection
|
||||
|
||||
pubsub = PubSub(conn)
|
||||
yield pubsub
|
||||
conn.close()
|
||||
if new_connection:
|
||||
conn.close()
|
||||
|
||||
@@ -16,13 +16,14 @@ from queue import Full as QueueFull, Empty as QueueEmpty
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django_guid import set_guid
|
||||
from jinja2 import Template
|
||||
import psutil
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity, log_excess_runtime
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -71,9 +72,11 @@ class PoolWorker(object):
|
||||
self.messages_finished = 0
|
||||
self.managed_tasks = collections.OrderedDict()
|
||||
self.finished = MPQueue(queue_size) if self.track_managed_tasks else NoOpResultQueue()
|
||||
self.last_finished = None
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
self.scale_down_in = settings.DISPATCHER_SCALE_DOWN_WAIT_TIME
|
||||
|
||||
def start(self):
|
||||
self.process.start()
|
||||
@@ -144,6 +147,9 @@ class PoolWorker(object):
|
||||
# state of which events are *currently* being processed.
|
||||
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
|
||||
if finished:
|
||||
self.last_finished = time.time()
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
if not self.track_managed_tasks:
|
||||
@@ -189,6 +195,14 @@ class PoolWorker(object):
|
||||
def idle(self):
|
||||
return not self.busy
|
||||
|
||||
@property
|
||||
def ready_to_scale_down(self):
|
||||
if self.busy:
|
||||
return False
|
||||
if self.last_finished is None:
|
||||
return True
|
||||
return time.time() - self.last_finished > self.scale_down_in
|
||||
|
||||
|
||||
class StatefulPoolWorker(PoolWorker):
|
||||
|
||||
@@ -249,7 +263,7 @@ class WorkerPool(object):
|
||||
except Exception:
|
||||
logger.exception('could not fork')
|
||||
else:
|
||||
logger.debug('scaling up worker pid:{}'.format(worker.pid))
|
||||
logger.info(f'scaling up worker pid:{worker.pid} total:{len(self.workers)}')
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
@@ -328,12 +342,16 @@ class AutoscalePool(WorkerPool):
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
return super(AutoscalePool, self).debug(*args, **kwargs)
|
||||
# the task manager enforces settings.TASK_MANAGER_TIMEOUT on its own
|
||||
# but if the task takes longer than the time defined here, we will force it to stop here
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
@@ -351,6 +369,7 @@ class AutoscalePool(WorkerPool):
|
||||
def debug_meta(self):
|
||||
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def cleanup(self):
|
||||
"""
|
||||
Perform some internal account and cleanup. This is run on
|
||||
@@ -359,8 +378,6 @@ class AutoscalePool(WorkerPool):
|
||||
1. Discover worker processes that exited, and recover messages they
|
||||
were handling.
|
||||
2. Clean up unnecessary, idle workers.
|
||||
3. Check to see if the database says this node is running any tasks
|
||||
that aren't actually running. If so, reap them.
|
||||
|
||||
IMPORTANT: this function is one of the few places in the dispatcher
|
||||
(aside from setting lookups) where we talk to the database. As such,
|
||||
@@ -385,12 +402,12 @@ class AutoscalePool(WorkerPool):
|
||||
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:
|
||||
# the process has an empty queue (it's idle) and we have
|
||||
# more processes in the pool than we need (> min)
|
||||
# send this process a message so it will exit gracefully
|
||||
# at the next opportunity
|
||||
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
||||
logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}')
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
@@ -401,13 +418,15 @@ class AutoscalePool(WorkerPool):
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||
current_task_name = current_task.get('task', '')
|
||||
if any(current_task_name.endswith(e) for e in endings):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}') # noqa
|
||||
if age > self.task_manager_timeout:
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
@@ -417,13 +436,17 @@ class AutoscalePool(WorkerPool):
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
self.write(idx, m)
|
||||
|
||||
# if the database says a job is running on this node, but it's *not*,
|
||||
# then reap it
|
||||
running_uuids = []
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
running_uuids.extend(list(worker.managed_tasks.keys()))
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
def add_bind_kwargs(self, body):
|
||||
bind_kwargs = body.pop('bind_kwargs', [])
|
||||
body.setdefault('kwargs', {})
|
||||
if 'dispatch_time' in bind_kwargs:
|
||||
body['kwargs']['dispatch_time'] = tz_now().isoformat()
|
||||
if 'worker_tasks' in bind_kwargs:
|
||||
worker_tasks = {}
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
worker_tasks[worker.pid] = list(worker.managed_tasks.keys())
|
||||
body['kwargs']['worker_tasks'] = worker_tasks
|
||||
|
||||
def up(self):
|
||||
if self.full:
|
||||
@@ -438,6 +461,8 @@ class AutoscalePool(WorkerPool):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
try:
|
||||
if isinstance(body, dict) and body.get('bind_kwargs'):
|
||||
self.add_bind_kwargs(body)
|
||||
# when the cluster heartbeat occurs, clean up internally
|
||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||
self.cleanup()
|
||||
@@ -452,6 +477,10 @@ class AutoscalePool(WorkerPool):
|
||||
w.put(body)
|
||||
break
|
||||
else:
|
||||
task_name = 'unknown'
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -2,6 +2,7 @@ import inspect
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
@@ -49,13 +50,21 @@ class task:
|
||||
@task(queue='tower_broadcast')
|
||||
def announce():
|
||||
print("Run this everywhere!")
|
||||
|
||||
# The special parameter bind_kwargs tells the main dispatcher process to add certain kwargs
|
||||
|
||||
@task(bind_kwargs=['dispatch_time'])
|
||||
def print_time(dispatch_time=None):
|
||||
print(f"Time I was dispatched: {dispatch_time}")
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None):
|
||||
def __init__(self, queue=None, bind_kwargs=None):
|
||||
self.queue = queue
|
||||
self.bind_kwargs = bind_kwargs
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
bind_kwargs = self.bind_kwargs
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
@@ -75,10 +84,12 @@ class task:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name}
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||
guid = get_guid()
|
||||
if guid:
|
||||
obj['guid'] = guid
|
||||
if bind_kwargs:
|
||||
obj['bind_kwargs'] = bind_kwargs
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
@@ -15,44 +16,71 @@ def startup_reaping():
|
||||
If this particular instance is starting, then we know that any running jobs are invalid
|
||||
so we will reap those jobs as a special action here
|
||||
"""
|
||||
me = Instance.objects.me()
|
||||
try:
|
||||
me = Instance.objects.me()
|
||||
except RuntimeError as e:
|
||||
logger.warning(f'Local instance is not registered, not running startup reaper: {e}')
|
||||
return
|
||||
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
|
||||
job_ids = []
|
||||
for j in jobs:
|
||||
job_ids.append(j.id)
|
||||
j.status = 'failed'
|
||||
j.start_args = ''
|
||||
j.job_explanation += 'Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.'
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status('failed')
|
||||
reap_job(
|
||||
j,
|
||||
'failed',
|
||||
job_explanation='Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.',
|
||||
)
|
||||
if job_ids:
|
||||
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
||||
def reap_job(j, status, job_explanation=None):
|
||||
j.refresh_from_db(fields=['status', 'job_explanation'])
|
||||
status_before = j.status
|
||||
if status_before not in ('running', 'waiting'):
|
||||
# just in case, don't reap jobs that aren't running
|
||||
return
|
||||
j.status = status
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join(
|
||||
(
|
||||
'Task was marked as running but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
)
|
||||
)
|
||||
if j.job_explanation:
|
||||
j.job_explanation += ' ' # Separate messages for readability
|
||||
if job_explanation is None:
|
||||
j.job_explanation += 'Task was marked as running but was not present in the job queue, so it has been marked as failed.'
|
||||
else:
|
||||
j.job_explanation += job_explanation
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status(status)
|
||||
logger.error('{} is no longer running; reaping'.format(j.log_format))
|
||||
logger.error(f'{j.log_format} is no longer {status_before}; reaping')
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
def reap_waiting(instance=None, status='failed', job_explanation=None, grace_period=None, excluded_uuids=None, ref_time=None):
|
||||
"""
|
||||
Reap all jobs in waiting|running for this instance.
|
||||
Reap all jobs in waiting for this instance.
|
||||
"""
|
||||
if grace_period is None:
|
||||
grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT
|
||||
|
||||
me = instance
|
||||
if me is None:
|
||||
try:
|
||||
me = Instance.objects.me()
|
||||
except RuntimeError as e:
|
||||
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
||||
return
|
||||
if ref_time is None:
|
||||
ref_time = tz_now()
|
||||
jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=me.hostname)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
|
||||
"""
|
||||
Reap all jobs in running for this instance.
|
||||
"""
|
||||
me = instance
|
||||
if me is None:
|
||||
@@ -61,12 +89,11 @@ def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
except RuntimeError as e:
|
||||
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
||||
return
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
(Q(status='running') | Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
|
||||
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
|
||||
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
).exclude(celery_task_id__in=excluded_uuids)
|
||||
Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status)
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
@@ -17,6 +17,7 @@ from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -81,6 +82,9 @@ class AWXConsumerBase(object):
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
|
||||
def process_task(self, body):
|
||||
if isinstance(body, dict):
|
||||
body['time_ack'] = time.time()
|
||||
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
@@ -101,6 +105,7 @@ class AWXConsumerBase(object):
|
||||
self.total_messages += 1
|
||||
self.record_statistics()
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
@@ -149,7 +154,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
while True:
|
||||
try:
|
||||
with pg_bus_conn() as conn:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
for queue in self.queues:
|
||||
conn.listen(queue)
|
||||
if init is False:
|
||||
|
||||
@@ -167,17 +167,27 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
metrics_bulk_events_saved += len(events)
|
||||
except Exception:
|
||||
except Exception as exc:
|
||||
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale
|
||||
consecutive_errors = 0
|
||||
events_saved = 0
|
||||
metrics_events_batch_save_errors += 1
|
||||
for e in events:
|
||||
try:
|
||||
e.save()
|
||||
metrics_singular_events_saved += 1
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
events_saved += 1
|
||||
consecutive_errors = 0
|
||||
except Exception as exc_indv:
|
||||
consecutive_errors += 1
|
||||
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
|
||||
if consecutive_errors >= 5:
|
||||
raise
|
||||
metrics_singular_events_saved += events_saved
|
||||
if events_saved == 0:
|
||||
raise
|
||||
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
||||
for e in events:
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
@@ -257,17 +267,18 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
try:
|
||||
self.flush(force=flush)
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
except (OperationalError, InterfaceError, InternalError) as exc:
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
|
||||
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}')
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
logger.exception('Database Error Flushing Job Events')
|
||||
django_connection.close()
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
import importlib
|
||||
import sys
|
||||
import traceback
|
||||
import time
|
||||
|
||||
from kubernetes.config import kube_config
|
||||
|
||||
@@ -60,8 +61,19 @@ class TaskWorker(BaseWorker):
|
||||
# the callable is a class, e.g., RunJob; instantiate and
|
||||
# return its `run()` method
|
||||
_call = _call().run
|
||||
|
||||
log_extra = ''
|
||||
logger_method = logger.debug
|
||||
if ('time_ack' in body) and ('time_pub' in body):
|
||||
time_publish = body['time_ack'] - body['time_pub']
|
||||
time_waiting = time.time() - body['time_ack']
|
||||
if time_waiting > 5.0 or time_publish > 5.0:
|
||||
# If task too a very long time to process, add this information to the log
|
||||
log_extra = f' took {time_publish:.4f} to ack, {time_waiting:.4f} in local dispatcher'
|
||||
logger_method = logger.info
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
|
||||
logger_method(f'task {uuid} starting {task}(*{args}){log_extra}')
|
||||
|
||||
return _call(*args, **kwargs)
|
||||
|
||||
def perform_work(self, body):
|
||||
|
||||
@@ -862,7 +862,7 @@ class Command(BaseCommand):
|
||||
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
||||
)
|
||||
inventory_update = inventory_source.create_inventory_update(
|
||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
|
||||
@@ -7,7 +7,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@@ -53,7 +53,6 @@ class Command(BaseCommand):
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.startup_reaping()
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
|
||||
@@ -95,8 +95,13 @@ class Command(BaseCommand):
|
||||
# database migrations are still running
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
try:
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
except Exception as exc:
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
# In containerized deployments, migrations happen in the task container,
|
||||
# and the services running there don't start until migrations are
|
||||
|
||||
35
awx/main/migrations/0165_task_manager_refactor.py
Normal file
35
awx/main/migrations/0165_task_manager_refactor.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Generated by Django 3.2.13 on 2022-08-10 14:03
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0164_remove_inventorysource_update_on_project_update'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='preferred_instance_groups_cache',
|
||||
field=models.JSONField(
|
||||
blank=True, default=None, editable=False, help_text='A cached list with pk values from preferred instance groups.', null=True
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='task_impact',
|
||||
field=models.PositiveIntegerField(default=0, editable=False, help_text='Number of forks an instance consumes when running this job.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowapproval',
|
||||
name='expires',
|
||||
field=models.DateTimeField(
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text='The time this approval will expire. This is the created time plus timeout, used for filtering.',
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
40
awx/main/migrations/0166_alter_jobevent_host.py
Normal file
40
awx/main/migrations/0166_alter_jobevent_host.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Generated by Django 3.2.13 on 2022-07-06 13:19
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0165_task_manager_refactor'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='host',
|
||||
field=models.ForeignKey(
|
||||
db_constraint=False,
|
||||
default=None,
|
||||
editable=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name='ad_hoc_command_events',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='host',
|
||||
field=models.ForeignKey(
|
||||
db_constraint=False,
|
||||
default=None,
|
||||
editable=False,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||
related_name='job_events_as_primary_host',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -90,6 +90,9 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
def clean_inventory(self):
|
||||
inv = self.inventory
|
||||
if not inv:
|
||||
@@ -178,12 +181,12 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
def get_passwords_needed_to_start(self):
|
||||
return self.passwords_needed_to_start
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
# NOTE: We sorta have to assume the host count matches and that forks default to 5
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
count_hosts = Host.objects.filter(enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
|
||||
if self.inventory:
|
||||
count_hosts = self.inventory.total_hosts
|
||||
else:
|
||||
count_hosts = 5
|
||||
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
|
||||
|
||||
def copy(self):
|
||||
@@ -207,10 +210,20 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
|
||||
def add_to_update_fields(name):
|
||||
if name not in update_fields:
|
||||
update_fields.append(name)
|
||||
|
||||
if not self.preferred_instance_groups_cache:
|
||||
self.preferred_instance_groups_cache = self._get_preferred_instance_group_cache()
|
||||
add_to_update_fields("preferred_instance_groups_cache")
|
||||
if not self.name:
|
||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
add_to_update_fields("name")
|
||||
if self.task_impact == 0:
|
||||
self.task_impact = self._get_task_impact()
|
||||
add_to_update_fields("task_impact")
|
||||
super(AdHocCommand, self).save(*args, **kwargs)
|
||||
|
||||
@property
|
||||
|
||||
@@ -316,16 +316,17 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
||||
user = get_current_user()
|
||||
if user and not user.id:
|
||||
user = None
|
||||
if not self.pk and not self.created_by:
|
||||
if (not self.pk) and (user is not None) and (not self.created_by):
|
||||
self.created_by = user
|
||||
if 'created_by' not in update_fields:
|
||||
update_fields.append('created_by')
|
||||
# Update modified_by if any editable fields have changed
|
||||
new_values = self._get_fields_snapshot()
|
||||
if (not self.pk and not self.modified_by) or self._values_have_edits(new_values):
|
||||
self.modified_by = user
|
||||
if 'modified_by' not in update_fields:
|
||||
update_fields.append('modified_by')
|
||||
if self.modified_by != user:
|
||||
self.modified_by = user
|
||||
if 'modified_by' not in update_fields:
|
||||
update_fields.append('modified_by')
|
||||
super(PrimordialModel, self).save(*args, **kwargs)
|
||||
self._prior_values_store = new_values
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
logger = logging.getLogger('awx.main.models.events')
|
||||
|
||||
|
||||
__all__ = ['JobEvent', 'ProjectUpdateEvent', 'AdHocCommandEvent', 'InventoryUpdateEvent', 'SystemJobEvent']
|
||||
|
||||
|
||||
@@ -486,13 +485,18 @@ class JobEvent(BasePlaybookEvent):
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
# When we partitioned the table we accidentally "lost" the foreign key constraint.
|
||||
# However this is good because the cascade on delete at the django layer was causing DB issues
|
||||
# We are going to leave this as a foreign key but mark it as not having a DB relation and
|
||||
# prevent cascading on delete.
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='job_events_as_primary_host',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_constraint=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
@@ -794,6 +798,10 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
# We need to keep this as a FK in the model because AdHocCommand uses a ManyToMany field
|
||||
# to hosts through adhoc_events. But in https://github.com/ansible/awx/pull/8236/ we
|
||||
# removed the nulling of the field in case of a host going away before an event is saved
|
||||
# so this needs to stay SET_NULL on the ORM level
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='ad_hoc_command_events',
|
||||
@@ -801,6 +809,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
db_constraint=False,
|
||||
)
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
|
||||
@@ -12,6 +12,7 @@ from django.dispatch import receiver
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.db.models import Sum
|
||||
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
@@ -149,10 +150,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
def consumed_capacity(self):
|
||||
capacity_consumed = 0
|
||||
if self.node_type in ('hybrid', 'execution'):
|
||||
capacity_consumed += sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')))
|
||||
capacity_consumed += (
|
||||
UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')).aggregate(Sum("task_impact"))["task_impact__sum"]
|
||||
or 0
|
||||
)
|
||||
if self.node_type in ('hybrid', 'control'):
|
||||
capacity_consumed += sum(
|
||||
settings.AWX_CONTROL_NODE_TASK_IMPACT for x in UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting'))
|
||||
capacity_consumed += (
|
||||
settings.AWX_CONTROL_NODE_TASK_IMPACT * UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting')).count()
|
||||
)
|
||||
return capacity_consumed
|
||||
|
||||
@@ -203,7 +207,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
return True
|
||||
if ref_time is None:
|
||||
ref_time = now()
|
||||
grace_period = settings.CLUSTER_NODE_HEARTBEAT_PERIOD * 2
|
||||
grace_period = settings.CLUSTER_NODE_HEARTBEAT_PERIOD * settings.CLUSTER_NODE_MISSED_HEARTBEAT_TOLERANCE
|
||||
if self.node_type in ('execution', 'hop'):
|
||||
grace_period += settings.RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD
|
||||
return self.last_seen < ref_time - timedelta(seconds=grace_period)
|
||||
|
||||
@@ -337,9 +337,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
else:
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
|
||||
total_hosts = active_hosts.count()
|
||||
# if total_hosts has changed, set update_task_impact to True
|
||||
update_task_impact = total_hosts != self.total_hosts
|
||||
computed_fields = {
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'total_hosts': active_hosts.count(),
|
||||
'total_hosts': total_hosts,
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
@@ -357,6 +360,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
iobj.save(update_fields=computed_fields.keys())
|
||||
if update_task_impact:
|
||||
# if total hosts count has changed, re-calculate task_impact for any
|
||||
# job that is still in pending for this inventory, since task_impact
|
||||
# is cached on task creation and used in task management system
|
||||
tasks = self.jobs.filter(status="pending")
|
||||
for t in tasks:
|
||||
t.task_impact = t._get_task_impact()
|
||||
UnifiedJob.objects.bulk_update(tasks, ['task_impact'])
|
||||
logger.debug("Finished updating inventory computed fields, pk={0}, in " "{1:.3f} seconds".format(self.pk, time.time() - start_time))
|
||||
|
||||
def websocket_emit_status(self, status):
|
||||
@@ -1220,8 +1231,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
return UnpartitionedInventoryUpdateEvent
|
||||
return InventoryUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
return 1
|
||||
|
||||
# InventoryUpdate credential required
|
||||
|
||||
@@ -600,6 +600,19 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_ui_url(self):
|
||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
"""
|
||||
This sets the initial value of dependencies_processed
|
||||
and here we use this as a shortcut to avoid the DependencyManager for jobs that do not need it
|
||||
"""
|
||||
if (not self.project) or self.project.scm_update_on_launch:
|
||||
self.dependencies_processed = False
|
||||
elif (not self.inventory) or self.inventory.inventory_sources.filter(update_on_launch=True).exists():
|
||||
self.dependencies_processed = False
|
||||
else:
|
||||
# No dependencies to process
|
||||
self.dependencies_processed = True
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
@@ -644,8 +657,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
raise ParseError(_('{status_value} is not a valid status option.').format(status_value=status))
|
||||
return self._get_hosts(**kwargs)
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
if self.launch_type == 'callback':
|
||||
count_hosts = 2
|
||||
else:
|
||||
@@ -847,7 +859,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
host.save()
|
||||
host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
@@ -1213,6 +1225,9 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
return 'system_job_template'
|
||||
@@ -1238,8 +1253,7 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
return UnpartitionedSystemJobEvent
|
||||
return SystemJobEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
return 5
|
||||
|
||||
@property
|
||||
|
||||
@@ -513,6 +513,9 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
help_text=_('The SCM Revision discovered by this update for the given project and branch.'),
|
||||
)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
return 'project'
|
||||
|
||||
@@ -560,8 +563,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
return UnpartitionedProjectUpdateEvent
|
||||
return ProjectUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
return 0 if self.job_type == 'run' else 1
|
||||
|
||||
@property
|
||||
|
||||
@@ -45,7 +45,8 @@ from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
parse_yaml_or_json,
|
||||
getattr_dne,
|
||||
schedule_task_manager,
|
||||
ScheduleDependencyManager,
|
||||
ScheduleTaskManager,
|
||||
get_event_partition_epoch,
|
||||
get_capacity_type,
|
||||
)
|
||||
@@ -381,6 +382,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
unified_job.survey_passwords = new_job_passwords
|
||||
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
|
||||
|
||||
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
|
||||
|
||||
unified_job._set_default_dependencies_processed()
|
||||
unified_job.task_impact = unified_job._get_task_impact()
|
||||
|
||||
from awx.main.signals import disable_activity_stream, activity_stream_create
|
||||
|
||||
with disable_activity_stream():
|
||||
@@ -693,6 +699,14 @@ class UnifiedJob(
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
help_text=_('The Instance group the job was run under'),
|
||||
)
|
||||
preferred_instance_groups_cache = models.JSONField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("A cached list with pk values from preferred instance groups."),
|
||||
)
|
||||
task_impact = models.PositiveIntegerField(default=0, editable=False, help_text=_("Number of forks an instance consumes when running this job."))
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
@@ -754,6 +768,9 @@ class UnifiedJob(
|
||||
def _get_parent_field_name(self):
|
||||
return 'unified_job_template' # Override in subclasses.
|
||||
|
||||
def _get_preferred_instance_group_cache(self):
|
||||
return [ig.pk for ig in self.preferred_instance_groups]
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_template_class(cls):
|
||||
"""
|
||||
@@ -808,6 +825,9 @@ class UnifiedJob(
|
||||
update_fields = self._update_parent_instance_no_save(parent_instance)
|
||||
parent_instance.save(update_fields=update_fields)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
pass
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
"""Save the job, with current status, to the database.
|
||||
Ensure that all data is consistent before doing so.
|
||||
@@ -821,7 +841,8 @@ class UnifiedJob(
|
||||
|
||||
# If this job already exists in the database, retrieve a copy of
|
||||
# the job in its prior state.
|
||||
if self.pk:
|
||||
# If update_fields are given without status, then that indicates no change
|
||||
if self.pk and ((not update_fields) or ('status' in update_fields)):
|
||||
self_before = self.__class__.objects.get(pk=self.pk)
|
||||
if self_before.status != self.status:
|
||||
status_before = self_before.status
|
||||
@@ -1026,7 +1047,6 @@ class UnifiedJob(
|
||||
event_qs = self.get_event_queryset()
|
||||
except NotImplementedError:
|
||||
return True # Model without events, such as WFJT
|
||||
self.log_lifecycle("event_processing_finished")
|
||||
return self.emitted_events == event_qs.count()
|
||||
|
||||
def result_stdout_raw_handle(self, enforce_max_bytes=True):
|
||||
@@ -1241,9 +1261,8 @@ class UnifiedJob(
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
return False
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
def _get_task_impact(self):
|
||||
return self.task_impact # return default, should implement in subclass.
|
||||
|
||||
def websocket_emit_data(self):
|
||||
'''Return extra data that should be included when submitting data to the browser over the websocket connection'''
|
||||
@@ -1255,7 +1274,7 @@ class UnifiedJob(
|
||||
def _websocket_emit_status(self, status):
|
||||
try:
|
||||
status_data = dict(unified_job_id=self.id, status=status)
|
||||
if status == 'waiting':
|
||||
if status == 'running':
|
||||
if self.instance_group:
|
||||
status_data['instance_group_name'] = self.instance_group.name
|
||||
else:
|
||||
@@ -1358,7 +1377,10 @@ class UnifiedJob(
|
||||
self.update_fields(start_args=json.dumps(kwargs), status='pending')
|
||||
self.websocket_emit_status("pending")
|
||||
|
||||
schedule_task_manager()
|
||||
if self.dependencies_processed:
|
||||
ScheduleTaskManager().schedule()
|
||||
else:
|
||||
ScheduleDependencyManager().schedule()
|
||||
|
||||
# Each type of unified job has a different Task class; get the
|
||||
# appropirate one.
|
||||
@@ -1515,8 +1537,8 @@ class UnifiedJob(
|
||||
'state': state,
|
||||
'work_unit_id': self.work_unit_id,
|
||||
}
|
||||
if self.unified_job_template:
|
||||
extra["template_name"] = self.unified_job_template.name
|
||||
if self.name:
|
||||
extra["task_name"] = self.name
|
||||
if state == "blocked" and blocked_by:
|
||||
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
|
||||
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
|
||||
@@ -1528,7 +1550,7 @@ class UnifiedJob(
|
||||
extra["controller_node"] = self.controller_node or "NOT_SET"
|
||||
elif state == "execution_node_chosen":
|
||||
extra["execution_node"] = self.execution_node or "NOT_SET"
|
||||
logger_job_lifecycle.debug(msg, extra=extra)
|
||||
logger_job_lifecycle.info(msg, extra=extra)
|
||||
|
||||
@property
|
||||
def launched_by(self):
|
||||
|
||||
@@ -13,6 +13,7 @@ from django.db import connection, models
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.utils.timezone import now, timedelta
|
||||
|
||||
# from django import settings as tower_settings
|
||||
|
||||
@@ -40,7 +41,7 @@ from awx.main.models.mixins import (
|
||||
from awx.main.models.jobs import LaunchTimeConfigBase, LaunchTimeConfig, JobTemplate
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.redact import REPLACE_STR
|
||||
from awx.main.utils import schedule_task_manager
|
||||
from awx.main.utils import ScheduleWorkflowManager
|
||||
|
||||
|
||||
__all__ = [
|
||||
@@ -622,6 +623,9 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
)
|
||||
is_sliced_job = models.BooleanField(default=False)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
@property
|
||||
def workflow_nodes(self):
|
||||
return self.workflow_job_nodes
|
||||
@@ -668,8 +672,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
)
|
||||
return result
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
def _get_task_impact(self):
|
||||
return 0
|
||||
|
||||
def get_ancestor_workflows(self):
|
||||
@@ -783,6 +786,12 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
default=0,
|
||||
help_text=_("The amount of time (in seconds) before the approval node expires and fails."),
|
||||
)
|
||||
expires = models.DateTimeField(
|
||||
default=None,
|
||||
null=True,
|
||||
editable=False,
|
||||
help_text=_("The time this approval will expire. This is the created time plus timeout, used for filtering."),
|
||||
)
|
||||
timed_out = models.BooleanField(default=False, help_text=_("Shows when an approval node (with a timeout assigned to it) has timed out."))
|
||||
approved_or_denied_by = models.ForeignKey(
|
||||
'auth.User',
|
||||
@@ -793,6 +802,9 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_template_class(cls):
|
||||
return WorkflowApprovalTemplate
|
||||
@@ -810,13 +822,32 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
def _get_parent_field_name(self):
|
||||
return 'workflow_approval_template'
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = list(kwargs.get('update_fields', []))
|
||||
if self.timeout != 0 and ((not self.pk) or (not update_fields) or ('timeout' in update_fields)):
|
||||
if not self.created: # on creation, created will be set by parent class, so we fudge it here
|
||||
created = now()
|
||||
else:
|
||||
created = self.created
|
||||
new_expires = created + timedelta(seconds=self.timeout)
|
||||
if new_expires != self.expires:
|
||||
self.expires = new_expires
|
||||
if update_fields and 'expires' not in update_fields:
|
||||
update_fields.append('expires')
|
||||
elif self.timeout == 0 and ((not update_fields) or ('timeout' in update_fields)):
|
||||
if self.expires:
|
||||
self.expires = None
|
||||
if update_fields and 'expires' not in update_fields:
|
||||
update_fields.append('expires')
|
||||
super(WorkflowApproval, self).save(*args, **kwargs)
|
||||
|
||||
def approve(self, request=None):
|
||||
self.status = 'successful'
|
||||
self.approved_or_denied_by = get_current_user()
|
||||
self.save()
|
||||
self.send_approval_notification('approved')
|
||||
self.websocket_emit_status(self.status)
|
||||
schedule_task_manager()
|
||||
ScheduleWorkflowManager().schedule()
|
||||
return reverse('api:workflow_approval_approve', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def deny(self, request=None):
|
||||
@@ -825,7 +856,7 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
self.save()
|
||||
self.send_approval_notification('denied')
|
||||
self.websocket_emit_status(self.status)
|
||||
schedule_task_manager()
|
||||
ScheduleWorkflowManager().schedule()
|
||||
return reverse('api:workflow_approval_deny', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
#
|
||||
|
||||
from .task_manager import TaskManager
|
||||
from .task_manager import TaskManager, DependencyManager, WorkflowManager
|
||||
|
||||
__all__ = ['TaskManager']
|
||||
__all__ = ['TaskManager', 'DependencyManager', 'WorkflowManager']
|
||||
|
||||
@@ -7,6 +7,11 @@ from awx.main.models import (
|
||||
WorkflowJob,
|
||||
)
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler.dependency_graph')
|
||||
|
||||
|
||||
class DependencyGraph(object):
|
||||
PROJECT_UPDATES = 'project_updates'
|
||||
@@ -36,6 +41,9 @@ class DependencyGraph(object):
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}
|
||||
|
||||
def mark_if_no_key(self, job_type, id, job):
|
||||
if id is None:
|
||||
logger.warning(f'Null dependency graph key from {job}, could be integrity error or bug, ignoring')
|
||||
return
|
||||
# only mark first occurrence of a task. If 10 of JobA are launched
|
||||
# (concurrent disabled), the dependency graph should return that jobs
|
||||
# 2 through 10 are blocked by job1
|
||||
@@ -66,7 +74,10 @@ class DependencyGraph(object):
|
||||
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)
|
||||
|
||||
def mark_workflow_job(self, job):
|
||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
||||
if job.workflow_job_template_id:
|
||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
||||
elif job.unified_job_template_id: # for sliced jobs
|
||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.unified_job_template_id, job)
|
||||
|
||||
def project_update_blocked_by(self, job):
|
||||
return self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||
@@ -85,7 +96,13 @@ class DependencyGraph(object):
|
||||
|
||||
def workflow_job_blocked_by(self, job):
|
||||
if job.allow_simultaneous is False:
|
||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
||||
if job.workflow_job_template_id:
|
||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
||||
elif job.unified_job_template_id:
|
||||
# Sliced jobs can be either Job or WorkflowJob type, and either should block a sliced WorkflowJob
|
||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.unified_job_template_id) or self.get_item(
|
||||
self.JOB_TEMPLATE_JOBS, job.unified_job_template_id
|
||||
)
|
||||
return None
|
||||
|
||||
def system_job_blocked_by(self, job):
|
||||
|
||||
@@ -11,31 +11,35 @@ import sys
|
||||
import signal
|
||||
|
||||
# Django
|
||||
from django.db import transaction, connection
|
||||
from django.db import transaction
|
||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.reaper import reap_job
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Instance,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
Job,
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobNode,
|
||||
WorkflowJobTemplate,
|
||||
)
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.utils.common import create_partition
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
ScheduleTaskManager,
|
||||
ScheduleWorkflowManager,
|
||||
)
|
||||
from awx.main.utils.common import task_manager_bulk_reschedule
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
@@ -53,167 +57,101 @@ def timeit(func):
|
||||
t_now = time.perf_counter()
|
||||
result = func(*args, **kwargs)
|
||||
dur = time.perf_counter() - t_now
|
||||
args[0].subsystem_metrics.inc("task_manager_" + func.__name__ + "_seconds", dur)
|
||||
args[0].subsystem_metrics.inc(f"{args[0].prefix}_{func.__name__}_seconds", dur)
|
||||
return result
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
class TaskManager:
|
||||
def __init__(self):
|
||||
"""
|
||||
Do NOT put database queries or other potentially expensive operations
|
||||
in the task manager init. The task manager object is created every time a
|
||||
job is created, transitions state, and every 30 seconds on each tower node.
|
||||
More often then not, the object is destroyed quickly because the NOOP case is hit.
|
||||
|
||||
The NOOP case is short-circuit logic. If the task manager realizes that another instance
|
||||
of the task manager is already running, then it short-circuits and decides not to run.
|
||||
"""
|
||||
# start task limit indicates how many pending jobs can be started on this
|
||||
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
||||
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
class TaskBase:
|
||||
def __init__(self, prefix=""):
|
||||
self.prefix = prefix
|
||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||
# ensures each task manager metric will be overridden when pipe_execute
|
||||
# is called later.
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.start_time = time.time()
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
for m in self.subsystem_metrics.METRICS:
|
||||
if m.startswith("task_manager"):
|
||||
if m.startswith(self.prefix):
|
||||
self.subsystem_metrics.set(m, 0)
|
||||
|
||||
def after_lock_init(self, all_sorted_tasks):
|
||||
"""
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
self.dependency_graph = DependencyGraph()
|
||||
self.instances = TaskManagerInstances(all_sorted_tasks)
|
||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
blocked_by = self.dependency_graph.task_blocked_by(task)
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
def timed_out(self):
|
||||
"""Return True/False if we have met or exceeded the timeout for the task manager."""
|
||||
elapsed = time.time() - self.start_time
|
||||
if elapsed >= settings.TASK_MANAGER_TIMEOUT:
|
||||
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
|
||||
return True
|
||||
return False
|
||||
|
||||
@timeit
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
inventory_updates_qs = (
|
||||
InventoryUpdate.objects.filter(status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
||||
def get_tasks(self, filter_args):
|
||||
wf_approval_ctype_id = ContentType.objects.get_for_model(WorkflowApproval).id
|
||||
qs = (
|
||||
UnifiedJob.objects.filter(**filter_args)
|
||||
.exclude(launch_type='sync')
|
||||
.exclude(polymorphic_ctype_id=wf_approval_ctype_id)
|
||||
.order_by('created')
|
||||
.prefetch_related('dependent_jobs')
|
||||
)
|
||||
inventory_updates = [i for i in inventory_updates_qs]
|
||||
# Notice the job_type='check': we want to prevent implicit project updates from blocking our jobs.
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
|
||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
||||
all_tasks = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs, key=lambda task: task.created)
|
||||
return all_tasks
|
||||
self.all_tasks = [t for t in qs]
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in WorkflowJob.objects.filter(status='running')]
|
||||
return graph_workflow_jobs
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not settings.IS_TESTING():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
|
||||
def get_inventory_source_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
def record_aggregate_metrics_and_exit(self, *args):
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def schedule(self):
|
||||
# Lock
|
||||
with task_manager_bulk_reschedule():
|
||||
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
||||
with transaction.atomic():
|
||||
if acquired is False:
|
||||
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug(f"Starting {self.prefix} Scheduler")
|
||||
# if sigterm due to timeout, still record metrics
|
||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||
self._schedule()
|
||||
commit_start = time.time()
|
||||
|
||||
if self.prefix == "task_manager":
|
||||
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug(f"Finishing {self.prefix} Scheduler")
|
||||
|
||||
|
||||
class WorkflowManager(TaskBase):
|
||||
def __init__(self):
|
||||
super().__init__(prefix="workflow_manager")
|
||||
|
||||
@timeit
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
if workflow_job.cancel_flag:
|
||||
logger.debug('Not spawning jobs for %s because it is pending cancelation.', workflow_job.log_format)
|
||||
continue
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
if spawn_nodes:
|
||||
logger.debug('Spawning jobs for %s', workflow_job.log_format)
|
||||
else:
|
||||
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
kv = spawn_node.get_job_kwargs()
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
logger.debug('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
||||
can_start = True
|
||||
if isinstance(spawn_node.unified_job_template, WorkflowJobTemplate):
|
||||
workflow_ancestors = job.get_ancestor_workflows()
|
||||
if spawn_node.unified_job_template in set(workflow_ancestors):
|
||||
can_start = False
|
||||
logger.info(
|
||||
'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||
)
|
||||
)
|
||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||
job.job_explanation = gettext_noop(
|
||||
"Workflow Job spawned from workflow could not start because it " "would result in recursion (spawn order, most recent first: {})"
|
||||
).format(', '.join(['<{}>'.format(tmp) for tmp in display_list]))
|
||||
else:
|
||||
logger.debug(
|
||||
'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||
)
|
||||
)
|
||||
if not job._resources_sufficient_for_launch():
|
||||
can_start = False
|
||||
job.job_explanation = gettext_noop(
|
||||
"Job spawned from workflow could not start because it " "was missing a related resource such as project or inventory"
|
||||
)
|
||||
if can_start:
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
else:
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = gettext_noop(
|
||||
"Job spawned from workflow could not start because it " "was not in the right state or required manual credentials"
|
||||
)
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
||||
def spawn_workflow_graph_jobs(self):
|
||||
result = []
|
||||
for workflow_job in workflow_jobs:
|
||||
for workflow_job in self.all_tasks:
|
||||
if self.timed_out():
|
||||
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
|
||||
ScheduleWorkflowManager().schedule()
|
||||
# Do not process any more workflow jobs. Stop here.
|
||||
# Maybe we should schedule another WorkflowManager run
|
||||
break
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
status_changed = False
|
||||
if workflow_job.cancel_flag:
|
||||
@@ -228,99 +166,106 @@ class TaskManager:
|
||||
status_changed = True
|
||||
else:
|
||||
workflow_nodes = dag.mark_dnr_nodes()
|
||||
for n in workflow_nodes:
|
||||
n.save(update_fields=['do_not_run'])
|
||||
WorkflowJobNode.objects.bulk_update(workflow_nodes, ['do_not_run'])
|
||||
# If workflow is now done, we do special things to mark it as done.
|
||||
is_done = dag.is_workflow_done()
|
||||
if not is_done:
|
||||
continue
|
||||
has_failed, reason = dag.has_workflow_failed()
|
||||
logger.debug('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||
result.append(workflow_job.id)
|
||||
new_status = 'failed' if has_failed else 'successful'
|
||||
logger.debug("Transitioning {} to {} status.".format(workflow_job.log_format, new_status))
|
||||
update_fields = ['status', 'start_args']
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(f'Workflow job {workflow_job.id} failed due to reason: {reason}')
|
||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
status_changed = True
|
||||
if is_done:
|
||||
has_failed, reason = dag.has_workflow_failed()
|
||||
logger.debug('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||
result.append(workflow_job.id)
|
||||
new_status = 'failed' if has_failed else 'successful'
|
||||
logger.debug("Transitioning {} to {} status.".format(workflow_job.log_format, new_status))
|
||||
update_fields = ['status', 'start_args']
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(f'Workflow job {workflow_job.id} failed due to reason: {reason}')
|
||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
status_changed = True
|
||||
|
||||
if status_changed:
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
ScheduleWorkflowManager().schedule()
|
||||
workflow_job.websocket_emit_status(workflow_job.status)
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
workflow_job.send_notification_templates('succeeded' if workflow_job.status == 'successful' else 'failed')
|
||||
|
||||
if workflow_job.status == 'running':
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
if spawn_nodes:
|
||||
logger.debug('Spawning jobs for %s', workflow_job.log_format)
|
||||
else:
|
||||
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
kv = spawn_node.get_job_kwargs()
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
logger.debug('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
||||
can_start = True
|
||||
if isinstance(spawn_node.unified_job_template, WorkflowJobTemplate):
|
||||
workflow_ancestors = job.get_ancestor_workflows()
|
||||
if spawn_node.unified_job_template in set(workflow_ancestors):
|
||||
can_start = False
|
||||
logger.info(
|
||||
'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||
)
|
||||
)
|
||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||
job.job_explanation = gettext_noop(
|
||||
"Workflow Job spawned from workflow could not start because it "
|
||||
"would result in recursion (spawn order, most recent first: {})"
|
||||
).format(', '.join('<{}>'.format(tmp) for tmp in display_list))
|
||||
else:
|
||||
logger.debug(
|
||||
'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||
)
|
||||
)
|
||||
if not job._resources_sufficient_for_launch():
|
||||
can_start = False
|
||||
job.job_explanation = gettext_noop(
|
||||
"Job spawned from workflow could not start because it was missing a related resource such as project or inventory"
|
||||
)
|
||||
if can_start:
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
else:
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = gettext_noop(
|
||||
"Job spawned from workflow could not start because it was not in the right state or required manual credentials"
|
||||
)
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
return result
|
||||
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
self.subsystem_metrics.inc("task_manager_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
schedule_task_manager()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
if not start_status:
|
||||
task.status = 'failed'
|
||||
if task.job_explanation:
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
task.send_notification_templates('running')
|
||||
logger.debug('Transitioning %s to running status.', task.log_format)
|
||||
schedule_task_manager()
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = instance_group
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||
)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
task.log_lifecycle("waiting")
|
||||
|
||||
def post_commit():
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
# Before task is dispatched, ensure that job_event partitions exist
|
||||
create_partition(task.event_class._meta.db_table, start=task.created)
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
||||
)
|
||||
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
connection.on_commit(post_commit)
|
||||
def get_tasks(self, filter_args):
|
||||
self.all_tasks = [wf for wf in WorkflowJob.objects.filter(**filter_args)]
|
||||
|
||||
@timeit
|
||||
def process_running_tasks(self, running_tasks):
|
||||
for task in running_tasks:
|
||||
self.dependency_graph.add_job(task)
|
||||
def _schedule(self):
|
||||
self.get_tasks(dict(status__in=["running"], dependencies_processed=True))
|
||||
if len(self.all_tasks) > 0:
|
||||
self.spawn_workflow_graph_jobs()
|
||||
|
||||
|
||||
class DependencyManager(TaskBase):
|
||||
def __init__(self):
|
||||
super().__init__(prefix="dependency_manager")
|
||||
|
||||
def create_project_update(self, task, project_id=None):
|
||||
if project_id is None:
|
||||
@@ -341,14 +286,20 @@ class TaskManager:
|
||||
inventory_task.status = 'pending'
|
||||
inventory_task.save()
|
||||
logger.debug('Spawned {} as dependency of {}'.format(inventory_task.log_format, task.log_format))
|
||||
# inventory_sources = self.get_inventory_source_tasks([task])
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
|
||||
return inventory_task
|
||||
|
||||
def add_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
def get_inventory_source_tasks(self):
|
||||
inventory_ids = set()
|
||||
for task in self.all_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
self.all_inventory_sources = [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
|
||||
def get_latest_inventory_update(self, inventory_source):
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||
if not latest_inventory_update.exists():
|
||||
@@ -481,16 +432,167 @@ class TaskManager:
|
||||
|
||||
return created_dependencies
|
||||
|
||||
def process_tasks(self):
|
||||
deps = self.generate_dependencies(self.all_tasks)
|
||||
self.generate_dependencies(deps)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(deps))
|
||||
|
||||
@timeit
|
||||
def _schedule(self):
|
||||
self.get_tasks(dict(status__in=["pending"], dependencies_processed=False))
|
||||
|
||||
if len(self.all_tasks) > 0:
|
||||
self.get_inventory_source_tasks()
|
||||
self.process_tasks()
|
||||
ScheduleTaskManager().schedule()
|
||||
|
||||
|
||||
class TaskManager(TaskBase):
|
||||
def __init__(self):
|
||||
"""
|
||||
Do NOT put database queries or other potentially expensive operations
|
||||
in the task manager init. The task manager object is created every time a
|
||||
job is created, transitions state, and every 30 seconds on each tower node.
|
||||
More often then not, the object is destroyed quickly because the NOOP case is hit.
|
||||
|
||||
The NOOP case is short-circuit logic. If the task manager realizes that another instance
|
||||
of the task manager is already running, then it short-circuits and decides not to run.
|
||||
"""
|
||||
# start task limit indicates how many pending jobs can be started on this
|
||||
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
||||
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
super().__init__(prefix="task_manager")
|
||||
|
||||
def after_lock_init(self):
|
||||
"""
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
self.dependency_graph = DependencyGraph()
|
||||
self.instances = TaskManagerInstances(self.all_tasks)
|
||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
blocked_by = self.dependency_graph.task_blocked_by(task)
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
self.dependency_graph.add_job(task)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
# update capacity for control node and execution node
|
||||
if task.controller_node:
|
||||
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
if task.execution_node:
|
||||
self.instances[task.execution_node].consume_capacity(task.task_impact)
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
if not start_status:
|
||||
task.status = 'failed'
|
||||
if task.job_explanation:
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
task.send_notification_templates('running')
|
||||
logger.debug('Transitioning %s to running status.', task.log_format)
|
||||
# Call this to ensure Workflow nodes get spawned in timely manner
|
||||
ScheduleWorkflowManager().schedule()
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = instance_group
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||
)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
task.log_lifecycle("waiting")
|
||||
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
# for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
if task.status != 'waiting':
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
|
||||
@timeit
|
||||
def process_running_tasks(self, running_tasks):
|
||||
for task in running_tasks:
|
||||
if type(task) is WorkflowJob:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
self.dependency_graph.add_job(task)
|
||||
|
||||
@timeit
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = {wf.unified_job_template_id for wf in self.get_running_workflow_jobs()}
|
||||
tasks_to_update_job_explanation = []
|
||||
for task in pending_tasks:
|
||||
if self.start_task_limit <= 0:
|
||||
break
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing pending jobs, exiting loop early")
|
||||
break
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc("task_manager_tasks_blocked", 1)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
|
||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||
if task.job_explanation != job_explanation:
|
||||
@@ -499,19 +601,16 @@ class TaskManager:
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
|
||||
found_acceptable_queue = False
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
logger.debug("{} is blocked from running, workflow already running".format(task.log_format))
|
||||
continue
|
||||
else:
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
# Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
|
||||
# Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
|
||||
found_acceptable_queue = False
|
||||
|
||||
preferred_instance_groups = self.instance_groups.get_instance_groups_from_task_cache(task)
|
||||
|
||||
# Determine if there is control capacity for the task
|
||||
if task.capacity_type == 'control':
|
||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
@@ -530,8 +629,6 @@ class TaskManager:
|
||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||
if task.capacity_type == 'control':
|
||||
task.execution_node = control_instance.hostname
|
||||
control_instance.consume_capacity(control_impact)
|
||||
self.dependency_graph.add_job(task)
|
||||
execution_instance = self.instances[control_instance.hostname].obj
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
@@ -541,7 +638,6 @@ class TaskManager:
|
||||
|
||||
for instance_group in preferred_instance_groups:
|
||||
if instance_group.is_container_group:
|
||||
self.dependency_graph.add_job(task)
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
@@ -563,9 +659,7 @@ class TaskManager:
|
||||
control_instance = execution_instance
|
||||
task.controller_node = execution_instance.hostname
|
||||
|
||||
control_instance.consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
execution_instance.consume_capacity(task.task_impact)
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
logger.debug(
|
||||
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
@@ -573,7 +667,6 @@ class TaskManager:
|
||||
)
|
||||
)
|
||||
execution_instance = self.instances[execution_instance.hostname].obj
|
||||
self.dependency_graph.add_job(task)
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
@@ -599,25 +692,6 @@ class TaskManager:
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
|
||||
def timeout_approval_node(self):
|
||||
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
|
||||
now = tz_now()
|
||||
for task in workflow_approvals:
|
||||
approval_timeout_seconds = timedelta(seconds=task.timeout)
|
||||
if task.timeout == 0:
|
||||
continue
|
||||
if (now - task.created) >= approval_timeout_seconds:
|
||||
timeout_message = _("The approval node {name} ({pk}) has expired after {timeout} seconds.").format(
|
||||
name=task.name, pk=task.pk, timeout=task.timeout
|
||||
)
|
||||
logger.warning(timeout_message)
|
||||
task.timed_out = True
|
||||
task.status = 'failed'
|
||||
task.send_approval_notification('timed_out')
|
||||
task.websocket_emit_status(task.status)
|
||||
task.job_explanation = timeout_message
|
||||
task.save(update_fields=['status', 'job_explanation', 'timed_out'])
|
||||
|
||||
def reap_jobs_from_orphaned_instances(self):
|
||||
# discover jobs that are in running state but aren't on an execution node
|
||||
# that we know about; this is a fairly rare event, but it can occur if you,
|
||||
@@ -630,92 +704,45 @@ class TaskManager:
|
||||
logger.error(f'{j.execution_node} is not a registered instance; reaping {j.log_format}')
|
||||
reap_job(j, 'failed')
|
||||
|
||||
def process_tasks(self, all_sorted_tasks):
|
||||
running_tasks = [t for t in all_sorted_tasks if t.status in ['waiting', 'running']]
|
||||
def process_tasks(self):
|
||||
running_tasks = [t for t in self.all_tasks if t.status in ['waiting', 'running']]
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_running_processed", len(running_tasks))
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_running_processed", len(running_tasks))
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
deps_of_deps = self.generate_dependencies(dependencies)
|
||||
dependencies += deps_of_deps
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(dependencies))
|
||||
pending_tasks = [t for t in self.all_tasks if t.status == 'pending']
|
||||
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(pending_tasks))
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(pending_tasks))
|
||||
|
||||
def timeout_approval_node(self, task):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing approval nodes, exiting loop early")
|
||||
# Do not process any more workflow approval nodes. Stop here.
|
||||
# Maybe we should schedule another TaskManager run
|
||||
return
|
||||
timeout_message = _("The approval node {name} ({pk}) has expired after {timeout} seconds.").format(name=task.name, pk=task.pk, timeout=task.timeout)
|
||||
logger.warning(timeout_message)
|
||||
task.timed_out = True
|
||||
task.status = 'failed'
|
||||
task.send_approval_notification('timed_out')
|
||||
task.websocket_emit_status(task.status)
|
||||
task.job_explanation = timeout_message
|
||||
task.save(update_fields=['status', 'job_explanation', 'timed_out'])
|
||||
|
||||
def get_expired_workflow_approvals(self):
|
||||
# timeout of 0 indicates that it never expires
|
||||
qs = WorkflowApproval.objects.filter(status='pending').exclude(timeout=0).filter(expires__lt=tz_now())
|
||||
return qs
|
||||
|
||||
@timeit
|
||||
def _schedule(self):
|
||||
finished_wfjs = []
|
||||
all_sorted_tasks = self.get_tasks()
|
||||
self.get_tasks(dict(status__in=["pending", "waiting", "running"], dependencies_processed=True))
|
||||
|
||||
self.after_lock_init(all_sorted_tasks)
|
||||
self.after_lock_init()
|
||||
self.reap_jobs_from_orphaned_instances()
|
||||
|
||||
if len(all_sorted_tasks) > 0:
|
||||
# TODO: Deal with
|
||||
# latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_project_updates(latest_project_updates)
|
||||
if len(self.all_tasks) > 0:
|
||||
self.process_tasks()
|
||||
|
||||
# latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_inventory_updates(latest_inventory_updates)
|
||||
|
||||
self.all_inventory_sources = self.get_inventory_source_tasks(all_sorted_tasks)
|
||||
|
||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
||||
|
||||
previously_running_workflow_tasks = running_workflow_tasks
|
||||
running_workflow_tasks = []
|
||||
for workflow_job in previously_running_workflow_tasks:
|
||||
if workflow_job.status == 'running':
|
||||
running_workflow_tasks.append(workflow_job)
|
||||
else:
|
||||
logger.debug('Removed %s from job spawning consideration.', workflow_job.log_format)
|
||||
|
||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
||||
|
||||
self.timeout_approval_node()
|
||||
self.reap_jobs_from_orphaned_instances()
|
||||
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not settings.IS_TESTING():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc("task_manager_schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode("task_manager_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set("task_manager_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
|
||||
def record_aggregate_metrics_and_exit(self, *args):
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def schedule(self):
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
with transaction.atomic():
|
||||
if acquired is False:
|
||||
logger.debug("Not running scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
with task_manager_bulk_reschedule():
|
||||
# if sigterm due to timeout, still record metrics
|
||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||
self._schedule()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug("Finishing Scheduler")
|
||||
for workflow_approval in self.get_expired_workflow_approvals():
|
||||
self.timeout_approval_node(workflow_approval)
|
||||
|
||||
@@ -67,6 +67,7 @@ class TaskManagerInstanceGroups:
|
||||
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
||||
self.instance_groups = dict()
|
||||
self.controlplane_ig = None
|
||||
self.pk_ig_map = dict()
|
||||
|
||||
if instance_groups is not None: # for testing
|
||||
self.instance_groups = instance_groups
|
||||
@@ -81,6 +82,7 @@ class TaskManagerInstanceGroups:
|
||||
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
||||
],
|
||||
)
|
||||
self.pk_ig_map[instance_group.pk] = instance_group
|
||||
|
||||
def get_remaining_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
@@ -121,3 +123,17 @@ class TaskManagerInstanceGroups:
|
||||
elif i.capacity > largest_instance.capacity:
|
||||
largest_instance = i
|
||||
return largest_instance
|
||||
|
||||
def get_instance_groups_from_task_cache(self, task):
|
||||
igs = []
|
||||
if task.preferred_instance_groups_cache:
|
||||
for pk in task.preferred_instance_groups_cache:
|
||||
ig = self.pk_ig_map.get(pk, None)
|
||||
if ig:
|
||||
igs.append(ig)
|
||||
else:
|
||||
logger.warn(f"Unknown instance group with pk {pk} for task {task}")
|
||||
if len(igs) == 0:
|
||||
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
|
||||
return task.global_instance_groups
|
||||
return igs
|
||||
|
||||
@@ -1,15 +1,35 @@
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
def run_manager(manager, prefix):
|
||||
if MODE == 'development' and settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
logger.debug(f"Not running {prefix} manager, AWX_DISABLE_TASK_MANAGERS is True. Trigger with GET to /api/debug/{prefix}_manager/")
|
||||
return
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def run_task_manager():
|
||||
logger.debug("Running task manager.")
|
||||
TaskManager().schedule()
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# Python
|
||||
from collections import OrderedDict
|
||||
from distutils.dir_util import copy_tree
|
||||
import errno
|
||||
import functools
|
||||
import fcntl
|
||||
@@ -15,7 +14,6 @@ import tempfile
|
||||
import traceback
|
||||
import time
|
||||
import urllib.parse as urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -38,6 +36,7 @@ from awx.main.constants import (
|
||||
JOB_FOLDER_PREFIX,
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@@ -211,14 +210,22 @@ class BaseTask(object):
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
if settings.AWX_CLEANUP_PATHS:
|
||||
self.cleanup_paths.append(path)
|
||||
# Ansible runner requires that project exists,
|
||||
# and we will write files in the other folders without pre-creating the folder
|
||||
for subfolder in ('project', 'inventory', 'env'):
|
||||
# We will write files in these folders later
|
||||
for subfolder in ('inventory', 'env'):
|
||||
runner_subfolder = os.path.join(path, subfolder)
|
||||
if not os.path.exists(runner_subfolder):
|
||||
os.mkdir(runner_subfolder)
|
||||
return path
|
||||
|
||||
def build_project_dir(self, instance, private_data_dir):
|
||||
"""
|
||||
Create the ansible-runner project subdirectory. In many cases this is the source checkout.
|
||||
In cases that do not even need the source checkout, we create an empty dir to be the workdir.
|
||||
"""
|
||||
project_dir = os.path.join(private_data_dir, 'project')
|
||||
if not os.path.exists(project_dir):
|
||||
os.mkdir(project_dir)
|
||||
|
||||
def build_private_data_files(self, instance, private_data_dir):
|
||||
"""
|
||||
Creates temporary files containing the private data.
|
||||
@@ -354,12 +361,61 @@ class BaseTask(object):
|
||||
expect_passwords[k] = passwords.get(v, '') or ''
|
||||
return expect_passwords
|
||||
|
||||
def release_lock(self, project):
|
||||
try:
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
||||
except IOError as e:
|
||||
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, project.get_lock_file(), e.strerror))
|
||||
os.close(self.lock_fd)
|
||||
raise
|
||||
|
||||
os.close(self.lock_fd)
|
||||
self.lock_fd = None
|
||||
|
||||
def acquire_lock(self, project, unified_job_id=None):
|
||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||
os.mkdir(settings.PROJECTS_ROOT)
|
||||
|
||||
lock_path = project.get_lock_file()
|
||||
if lock_path is None:
|
||||
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
||||
project.save()
|
||||
lock_path = project.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
|
||||
try:
|
||||
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
||||
except OSError as e:
|
||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
break
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
os.close(self.lock_fd)
|
||||
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
if waiting_time > 1.0:
|
||||
logger.info(f'Job {unified_job_id} waited {waiting_time} to acquire lock for local source tree for path {lock_path}.')
|
||||
|
||||
def pre_run_hook(self, instance, private_data_dir):
|
||||
"""
|
||||
Hook for any steps to run before the job/task starts
|
||||
"""
|
||||
instance.log_lifecycle("pre_run")
|
||||
|
||||
# Before task is started, ensure that job_event partitions exist
|
||||
create_partition(instance.event_class._meta.db_table, start=instance.created)
|
||||
|
||||
def post_run_hook(self, instance, status):
|
||||
"""
|
||||
Hook for any steps to run before job/task is marked as complete.
|
||||
@@ -372,15 +428,9 @@ class BaseTask(object):
|
||||
"""
|
||||
instance.log_lifecycle("finalize_run")
|
||||
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
|
||||
job_profiling_dir = os.path.join(artifact_dir, 'playbook_profiling')
|
||||
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
|
||||
collections_info = os.path.join(artifact_dir, 'collections.json')
|
||||
ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')
|
||||
|
||||
if not os.path.exists(awx_profiling_dir):
|
||||
os.mkdir(awx_profiling_dir)
|
||||
if os.path.isdir(job_profiling_dir):
|
||||
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
|
||||
if os.path.exists(collections_info):
|
||||
with open(collections_info) as ee_json_info:
|
||||
ee_collections_info = json.loads(ee_json_info.read())
|
||||
@@ -399,6 +449,11 @@ class BaseTask(object):
|
||||
Run the job/task and capture its output.
|
||||
"""
|
||||
self.instance = self.model.objects.get(pk=pk)
|
||||
if self.instance.status != 'canceled' and self.instance.cancel_flag:
|
||||
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
|
||||
if self.instance.status not in ACTIVE_STATES:
|
||||
# Prevent starting the job if it has been reaped or handled by another process.
|
||||
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
|
||||
|
||||
if self.instance.execution_environment_id is None:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
@@ -424,6 +479,7 @@ class BaseTask(object):
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
self.build_project_dir(self.instance, private_data_dir)
|
||||
self.instance.log_lifecycle("preparing_playbook")
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||
@@ -549,8 +605,12 @@ class BaseTask(object):
|
||||
status = 'failed'
|
||||
elif status == 'canceled':
|
||||
self.instance = self.update_model(pk)
|
||||
if (getattr(self.instance, 'cancel_flag', False) is False) and signal_callback():
|
||||
self.runner_callback.delay_update(job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
|
||||
if (cancel_flag_value is False) and signal_callback():
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||
status = 'failed'
|
||||
elif cancel_flag_value is False:
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
|
||||
status = 'failed'
|
||||
except ReceptorNodeNotFound as exc:
|
||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||
@@ -593,8 +653,143 @@ class BaseTask(object):
|
||||
raise AwxTaskError.TaskError(self.instance, rc)
|
||||
|
||||
|
||||
class SourceControlMixin(BaseTask):
|
||||
"""Utility methods for tasks that run use content from source control"""
|
||||
|
||||
def get_sync_needs(self, project, scm_branch=None):
|
||||
project_path = project.get_project_path(check_if_exists=False)
|
||||
job_revision = project.scm_revision
|
||||
sync_needs = []
|
||||
source_update_tag = 'update_{}'.format(project.scm_type)
|
||||
branch_override = bool(scm_branch and scm_branch != project.scm_branch)
|
||||
# TODO: skip syncs for inventory updates. Now, UI needs a link added so clients can link to project
|
||||
# source_project is only a field on inventory sources.
|
||||
if isinstance(self.instance, InventoryUpdate):
|
||||
sync_needs.append(source_update_tag)
|
||||
elif not project.scm_type:
|
||||
pass # manual projects are not synced, user has responsibility for that
|
||||
elif not os.path.exists(project_path):
|
||||
logger.debug(f'Performing fresh clone of {project.id} for unified job {self.instance.id} on this instance.')
|
||||
sync_needs.append(source_update_tag)
|
||||
elif project.scm_type == 'git' and project.scm_revision and (not branch_override):
|
||||
try:
|
||||
git_repo = git.Repo(project_path)
|
||||
|
||||
if job_revision == git_repo.head.commit.hexsha:
|
||||
logger.debug(f'Skipping project sync for {self.instance.id} because commit is locally available')
|
||||
else:
|
||||
sync_needs.append(source_update_tag)
|
||||
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
|
||||
logger.debug(f'Needed commit for {self.instance.id} not in local source tree, will sync with remote')
|
||||
sync_needs.append(source_update_tag)
|
||||
else:
|
||||
logger.debug(f'Project not available locally, {self.instance.id} will sync with remote')
|
||||
sync_needs.append(source_update_tag)
|
||||
|
||||
has_cache = os.path.exists(os.path.join(project.get_cache_path(), project.cache_id))
|
||||
# Galaxy requirements are not supported for manual projects
|
||||
if project.scm_type and ((not has_cache) or branch_override):
|
||||
sync_needs.extend(['install_roles', 'install_collections'])
|
||||
|
||||
return sync_needs
|
||||
|
||||
def spawn_project_sync(self, project, sync_needs, scm_branch=None):
|
||||
pu_ig = self.instance.instance_group
|
||||
pu_en = Instance.objects.me().hostname
|
||||
|
||||
sync_metafields = dict(
|
||||
launch_type="sync",
|
||||
job_type='run',
|
||||
job_tags=','.join(sync_needs),
|
||||
status='running',
|
||||
instance_group=pu_ig,
|
||||
execution_node=pu_en,
|
||||
controller_node=pu_en,
|
||||
celery_task_id=self.instance.celery_task_id,
|
||||
)
|
||||
if scm_branch and scm_branch != project.scm_branch:
|
||||
sync_metafields['scm_branch'] = scm_branch
|
||||
sync_metafields['scm_clean'] = True # to accomidate force pushes
|
||||
if 'update_' not in sync_metafields['job_tags']:
|
||||
sync_metafields['scm_revision'] = project.scm_revision
|
||||
local_project_sync = project.create_project_update(_eager_fields=sync_metafields)
|
||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||
return local_project_sync
|
||||
|
||||
def sync_and_copy_without_lock(self, project, private_data_dir, scm_branch=None):
|
||||
sync_needs = self.get_sync_needs(project, scm_branch=scm_branch)
|
||||
|
||||
if sync_needs:
|
||||
local_project_sync = self.spawn_project_sync(project, sync_needs, scm_branch=scm_branch)
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
if isinstance(self.instance, Job):
|
||||
self.instance = self.update_model(self.instance.pk, project_update=local_project_sync)
|
||||
else:
|
||||
self.instance = self.update_model(self.instance.pk, source_project_update=local_project_sync)
|
||||
|
||||
try:
|
||||
# the job private_data_dir is passed so sync can download roles and collections there
|
||||
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
if isinstance(self.instance, Job):
|
||||
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
|
||||
except Exception:
|
||||
local_project_sync.refresh_from_db()
|
||||
if local_project_sync.status != 'canceled':
|
||||
self.instance = self.update_model(
|
||||
self.instance.pk,
|
||||
status='failed',
|
||||
job_explanation=(
|
||||
'Previous Task Failed: {"job_type": "project_update", '
|
||||
f'"job_name": "{local_project_sync.name}", "job_id": "{local_project_sync.id}"}}'
|
||||
),
|
||||
)
|
||||
raise
|
||||
self.instance.refresh_from_db()
|
||||
if self.instance.cancel_flag:
|
||||
return
|
||||
else:
|
||||
# Case where a local sync is not needed, meaning that local tree is
|
||||
# up-to-date with project, job is running project current version
|
||||
if isinstance(self.instance, Job):
|
||||
self.instance = self.update_model(self.instance.pk, scm_revision=project.scm_revision)
|
||||
# Project update does not copy the folder, so copy here
|
||||
RunProjectUpdate.make_local_copy(project, private_data_dir)
|
||||
|
||||
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
|
||||
self.acquire_lock(project, self.instance.id)
|
||||
|
||||
try:
|
||||
original_branch = None
|
||||
project_path = project.get_project_path(check_if_exists=False)
|
||||
if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):
|
||||
if os.path.exists(project_path):
|
||||
git_repo = git.Repo(project_path)
|
||||
if git_repo.head.is_detached:
|
||||
original_branch = git_repo.head.commit
|
||||
else:
|
||||
original_branch = git_repo.active_branch
|
||||
|
||||
return self.sync_and_copy_without_lock(project, private_data_dir, scm_branch=scm_branch)
|
||||
finally:
|
||||
# We have made the copy so we can set the tree back to its normal state
|
||||
if original_branch:
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
|
||||
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
class RunJob(BaseTask):
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
"""
|
||||
@@ -863,98 +1058,14 @@ class RunJob(BaseTask):
|
||||
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
project_path = job.project.get_project_path(check_if_exists=False)
|
||||
job_revision = job.project.scm_revision
|
||||
sync_needs = []
|
||||
source_update_tag = 'update_{}'.format(job.project.scm_type)
|
||||
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
|
||||
if not job.project.scm_type:
|
||||
pass # manual projects are not synced, user has responsibility for that
|
||||
elif not os.path.exists(project_path):
|
||||
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
|
||||
sync_needs.append(source_update_tag)
|
||||
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
|
||||
try:
|
||||
git_repo = git.Repo(project_path)
|
||||
|
||||
if job_revision == git_repo.head.commit.hexsha:
|
||||
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
else:
|
||||
sync_needs.append(source_update_tag)
|
||||
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
|
||||
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
|
||||
sync_needs.append(source_update_tag)
|
||||
else:
|
||||
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
|
||||
sync_needs.append(source_update_tag)
|
||||
|
||||
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
|
||||
# Galaxy requirements are not supported for manual projects
|
||||
if job.project.scm_type and ((not has_cache) or branch_override):
|
||||
sync_needs.extend(['install_roles', 'install_collections'])
|
||||
|
||||
if sync_needs:
|
||||
pu_ig = job.instance_group
|
||||
pu_en = Instance.objects.me().hostname
|
||||
|
||||
sync_metafields = dict(
|
||||
launch_type="sync",
|
||||
job_type='run',
|
||||
job_tags=','.join(sync_needs),
|
||||
status='running',
|
||||
instance_group=pu_ig,
|
||||
execution_node=pu_en,
|
||||
controller_node=pu_en,
|
||||
celery_task_id=job.celery_task_id,
|
||||
)
|
||||
if branch_override:
|
||||
sync_metafields['scm_branch'] = job.scm_branch
|
||||
sync_metafields['scm_clean'] = True # to accomidate force pushes
|
||||
if 'update_' not in sync_metafields['job_tags']:
|
||||
sync_metafields['scm_revision'] = job_revision
|
||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
job = self.update_model(job.pk, project_update=local_project_sync)
|
||||
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
# the job private_data_dir is passed so sync can download roles and collections there
|
||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
|
||||
except Exception:
|
||||
local_project_sync.refresh_from_db()
|
||||
if local_project_sync.status != 'canceled':
|
||||
job = self.update_model(
|
||||
job.pk,
|
||||
status='failed',
|
||||
job_explanation=(
|
||||
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
|
||||
% ('project_update', local_project_sync.name, local_project_sync.id)
|
||||
),
|
||||
)
|
||||
raise
|
||||
job.refresh_from_db()
|
||||
if job.cancel_flag:
|
||||
return
|
||||
else:
|
||||
# Case where a local sync is not needed, meaning that local tree is
|
||||
# up-to-date with project, job is running project current version
|
||||
if job_revision:
|
||||
job = self.update_model(job.pk, scm_revision=job_revision)
|
||||
# Project update does not copy the folder, so copy here
|
||||
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
|
||||
|
||||
if job.inventory.kind == 'smart':
|
||||
# cache smart inventory memberships so that the host_filter query is not
|
||||
# ran inside of the event saving code
|
||||
update_smart_memberships_for_inventory(job.inventory)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
||||
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
||||
if not private_data_dir:
|
||||
@@ -986,7 +1097,6 @@ class RunProjectUpdate(BaseTask):
|
||||
|
||||
def __init__(self, *args, job_private_data_dir=None, **kwargs):
|
||||
super(RunProjectUpdate, self).__init__(*args, **kwargs)
|
||||
self.original_branch = None
|
||||
self.job_private_data_dir = job_private_data_dir
|
||||
|
||||
def build_private_data(self, project_update, private_data_dir):
|
||||
@@ -1173,74 +1283,17 @@ class RunProjectUpdate(BaseTask):
|
||||
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
|
||||
return d
|
||||
|
||||
def release_lock(self, instance):
|
||||
try:
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
||||
except IOError as e:
|
||||
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
|
||||
os.close(self.lock_fd)
|
||||
raise
|
||||
|
||||
os.close(self.lock_fd)
|
||||
self.lock_fd = None
|
||||
|
||||
'''
|
||||
Note: We don't support blocking=False
|
||||
'''
|
||||
|
||||
def acquire_lock(self, instance, blocking=True):
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
||||
instance.save()
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
|
||||
try:
|
||||
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
||||
except OSError as e:
|
||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
|
||||
return
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
break
|
||||
except IOError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||
os.close(self.lock_fd)
|
||||
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||
raise
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
if waiting_time > 1.0:
|
||||
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
|
||||
|
||||
def pre_run_hook(self, instance, private_data_dir):
|
||||
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
|
||||
# re-create root project folder if a natural disaster has destroyed it
|
||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||
os.mkdir(settings.PROJECTS_ROOT)
|
||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||
|
||||
self.acquire_lock(instance)
|
||||
|
||||
self.original_branch = None
|
||||
if instance.scm_type == 'git' and instance.branch_override:
|
||||
if os.path.exists(project_path):
|
||||
git_repo = git.Repo(project_path)
|
||||
if git_repo.head.is_detached:
|
||||
self.original_branch = git_repo.head.commit
|
||||
else:
|
||||
self.original_branch = git_repo.active_branch
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
|
||||
return
|
||||
if instance.launch_type != 'sync':
|
||||
self.acquire_lock(instance.project, instance.id)
|
||||
|
||||
if not os.path.exists(project_path):
|
||||
os.makedirs(project_path) # used as container mount
|
||||
@@ -1251,11 +1304,12 @@ class RunProjectUpdate(BaseTask):
|
||||
shutil.rmtree(stage_path)
|
||||
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
|
||||
|
||||
def build_project_dir(self, instance, private_data_dir):
|
||||
# the project update playbook is not in a git repo, but uses a vendoring directory
|
||||
# to be consistent with the ansible-runner model,
|
||||
# that is moved into the runner project folder here
|
||||
awx_playbooks = self.get_path_to('../../', 'playbooks')
|
||||
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
|
||||
shutil.copytree(awx_playbooks, os.path.join(private_data_dir, 'project'))
|
||||
|
||||
@staticmethod
|
||||
def clear_project_cache(cache_dir, keep_value):
|
||||
@@ -1272,50 +1326,18 @@ class RunProjectUpdate(BaseTask):
|
||||
logger.warning(f"Could not remove cache directory {old_path}")
|
||||
|
||||
@staticmethod
|
||||
def make_local_copy(p, job_private_data_dir, scm_revision=None):
|
||||
def make_local_copy(project, job_private_data_dir):
|
||||
"""Copy project content (roles and collections) to a job private_data_dir
|
||||
|
||||
:param object p: Either a project or a project update
|
||||
:param object project: Either a project or a project update
|
||||
:param str job_private_data_dir: The root of the target ansible-runner folder
|
||||
:param str scm_revision: For branch_override cases, the git revision to copy
|
||||
"""
|
||||
project_path = p.get_project_path(check_if_exists=False)
|
||||
project_path = project.get_project_path(check_if_exists=False)
|
||||
destination_folder = os.path.join(job_private_data_dir, 'project')
|
||||
if not scm_revision:
|
||||
scm_revision = p.scm_revision
|
||||
|
||||
if p.scm_type == 'git':
|
||||
git_repo = git.Repo(project_path)
|
||||
if not os.path.exists(destination_folder):
|
||||
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
|
||||
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
|
||||
# always clone based on specific job revision
|
||||
if not p.scm_revision:
|
||||
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
|
||||
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
|
||||
# git clone must take file:// syntax for source repo or else options like depth will be ignored
|
||||
source_as_uri = Path(project_path).as_uri()
|
||||
git.Repo.clone_from(
|
||||
source_as_uri,
|
||||
destination_folder,
|
||||
branch=source_branch,
|
||||
depth=1,
|
||||
single_branch=True, # shallow, do not copy full history
|
||||
)
|
||||
# submodules copied in loop because shallow copies from local HEADs are ideal
|
||||
# and no git clone submodule options are compatible with minimum requirements
|
||||
for submodule in git_repo.submodules:
|
||||
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
|
||||
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
|
||||
subrepo_uri = Path(subrepo_path).as_uri()
|
||||
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
|
||||
# force option is necessary because remote refs are not counted, although no information is lost
|
||||
git_repo.delete_head(tmp_branch_name, force=True)
|
||||
else:
|
||||
copy_tree(project_path, destination_folder, preserve_symlinks=1)
|
||||
shutil.copytree(project_path, destination_folder, ignore=shutil.ignore_patterns('.git'), symlinks=True)
|
||||
|
||||
# copy over the roles and collection cache to job folder
|
||||
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
|
||||
cache_path = os.path.join(project.get_cache_path(), project.cache_id)
|
||||
subfolders = []
|
||||
if settings.AWX_COLLECTIONS_ENABLED:
|
||||
subfolders.append('requirements_collections')
|
||||
@@ -1325,8 +1347,8 @@ class RunProjectUpdate(BaseTask):
|
||||
cache_subpath = os.path.join(cache_path, subfolder)
|
||||
if os.path.exists(cache_subpath):
|
||||
dest_subpath = os.path.join(job_private_data_dir, subfolder)
|
||||
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
|
||||
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
|
||||
shutil.copytree(cache_subpath, dest_subpath, symlinks=True)
|
||||
logger.debug('{0} {1} prepared {2} from cache'.format(type(project).__name__, project.pk, dest_subpath))
|
||||
|
||||
def post_run_hook(self, instance, status):
|
||||
super(RunProjectUpdate, self).post_run_hook(instance, status)
|
||||
@@ -1356,23 +1378,13 @@ class RunProjectUpdate(BaseTask):
|
||||
if self.job_private_data_dir:
|
||||
if status == 'successful':
|
||||
# copy project folder before resetting to default branch
|
||||
# because some git-tree-specific resources (like submodules) might matter
|
||||
self.make_local_copy(instance, self.job_private_data_dir)
|
||||
if self.original_branch:
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
self.original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
|
||||
finally:
|
||||
self.release_lock(instance)
|
||||
if instance.launch_type != 'sync':
|
||||
self.release_lock(instance.project)
|
||||
|
||||
p = instance.project
|
||||
if instance.job_type == 'check' and status not in (
|
||||
'failed',
|
||||
'canceled',
|
||||
):
|
||||
if instance.job_type == 'check' and status not in ('failed', 'canceled'):
|
||||
if self.runner_callback.playbook_new_revision:
|
||||
p.scm_revision = self.runner_callback.playbook_new_revision
|
||||
else:
|
||||
@@ -1400,7 +1412,7 @@ class RunProjectUpdate(BaseTask):
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@@ -1556,54 +1568,18 @@ class RunInventoryUpdate(BaseTask):
|
||||
# All credentials not used by inventory source injector
|
||||
return inventory_update.get_extra_credentials()
|
||||
|
||||
def pre_run_hook(self, inventory_update, private_data_dir):
|
||||
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
|
||||
def build_project_dir(self, inventory_update, private_data_dir):
|
||||
source_project = None
|
||||
if inventory_update.inventory_source:
|
||||
source_project = inventory_update.inventory_source.source_project
|
||||
if inventory_update.source == 'scm' and source_project and source_project.scm_type: # never ever update manual projects
|
||||
|
||||
# Check if the content cache exists, so that we do not unnecessarily re-download roles
|
||||
sync_needs = ['update_{}'.format(source_project.scm_type)]
|
||||
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
|
||||
# Galaxy requirements are not supported for manual projects
|
||||
if not has_cache:
|
||||
sync_needs.extend(['install_roles', 'install_collections'])
|
||||
|
||||
local_project_sync = source_project.create_project_update(
|
||||
_eager_fields=dict(
|
||||
launch_type="sync",
|
||||
job_type='run',
|
||||
job_tags=','.join(sync_needs),
|
||||
status='running',
|
||||
execution_node=Instance.objects.me().hostname,
|
||||
controller_node=Instance.objects.me().hostname,
|
||||
instance_group=inventory_update.instance_group,
|
||||
celery_task_id=inventory_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||
# associate the inventory update before calling run() so that a
|
||||
# cancel() call on the inventory update can cancel the project update
|
||||
local_project_sync.scm_inventory_updates.add(inventory_update)
|
||||
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
except Exception:
|
||||
inventory_update = self.update_model(
|
||||
inventory_update.pk,
|
||||
status='failed',
|
||||
job_explanation=(
|
||||
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
|
||||
% ('project_update', local_project_sync.name, local_project_sync.id)
|
||||
),
|
||||
)
|
||||
raise
|
||||
if inventory_update.source == 'scm':
|
||||
if not source_project:
|
||||
raise RuntimeError('Could not find project to run SCM inventory update from.')
|
||||
self.sync_and_copy(source_project, private_data_dir)
|
||||
else:
|
||||
# If source is not SCM make an empty project directory, content is built inside inventory folder
|
||||
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)
|
||||
|
||||
def post_run_hook(self, inventory_update, status):
|
||||
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
|
||||
|
||||
@@ -99,16 +99,22 @@ def administrative_workunit_reaper(work_list=None):
|
||||
|
||||
for unit_id, work_data in work_list.items():
|
||||
extra_data = work_data.get('ExtraData')
|
||||
if (extra_data is None) or (extra_data.get('RemoteWorkType') != 'ansible-runner'):
|
||||
if extra_data is None:
|
||||
continue # if this is not ansible-runner work, we do not want to touch it
|
||||
params = extra_data.get('RemoteParams', {}).get('params')
|
||||
if not params:
|
||||
continue
|
||||
if not (params == '--worker-info' or params.startswith('cleanup')):
|
||||
continue # if this is not a cleanup or health check, we do not want to touch it
|
||||
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||
continue # do not want to touch active work units
|
||||
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
||||
if isinstance(extra_data, str):
|
||||
if not work_data.get('StateName', None) or work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||
continue
|
||||
else:
|
||||
if extra_data.get('RemoteWorkType') != 'ansible-runner':
|
||||
continue
|
||||
params = extra_data.get('RemoteParams', {}).get('params')
|
||||
if not params:
|
||||
continue
|
||||
if not (params == '--worker-info' or params.startswith('cleanup')):
|
||||
continue # if this is not a cleanup or health check, we do not want to touch it
|
||||
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||
continue # do not want to touch active work units
|
||||
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
||||
receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
|
||||
|
||||
|
||||
@@ -10,12 +10,13 @@ from contextlib import redirect_stdout
|
||||
import shutil
|
||||
import time
|
||||
from distutils.version import LooseVersion as Version
|
||||
from datetime import datetime
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
@@ -53,7 +54,8 @@ from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
schedule_task_manager,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
@@ -103,6 +105,8 @@ def dispatch_startup():
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
reaper.startup_reaping()
|
||||
reaper.reap_waiting(grace_period=0)
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
@@ -114,6 +118,10 @@ def inform_cluster_of_shutdown():
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
try:
|
||||
reaper.reap_waiting(this_inst, grace_period=0)
|
||||
except Exception:
|
||||
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
@@ -475,8 +483,8 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_heartbeat():
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
instance_list = list(Instance.objects.all())
|
||||
@@ -499,12 +507,23 @@ def cluster_node_heartbeat():
|
||||
|
||||
if this_inst:
|
||||
startup_event = this_inst.is_lost(ref_time=nowtime)
|
||||
last_last_seen = this_inst.last_seen
|
||||
this_inst.local_health_check()
|
||||
if startup_event and this_inst.capacity != 0:
|
||||
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
|
||||
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
|
||||
return
|
||||
elif not last_last_seen:
|
||||
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
|
||||
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
|
||||
logger.warning(f'Heartbeat skew - interval={(nowtime - last_last_seen).total_seconds():.4f}, expected={settings.CLUSTER_NODE_HEARTBEAT_PERIOD}')
|
||||
else:
|
||||
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
(changed, this_inst) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
if changed:
|
||||
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
|
||||
this_inst.local_health_check()
|
||||
else:
|
||||
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||
# IFF any node has a greater version than we do, then we'll shutdown services
|
||||
for other_inst in instance_list:
|
||||
if other_inst.node_type in ('execution', 'hop'):
|
||||
@@ -524,7 +543,9 @@ def cluster_node_heartbeat():
|
||||
|
||||
for other_inst in lost_instances:
|
||||
try:
|
||||
reaper.reap(other_inst)
|
||||
explanation = "Job reaped due to instance shutdown"
|
||||
reaper.reap(other_inst, job_explanation=explanation)
|
||||
reaper.reap_waiting(other_inst, grace_period=0, job_explanation=explanation)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
|
||||
try:
|
||||
@@ -542,6 +563,15 @@ def cluster_node_heartbeat():
|
||||
else:
|
||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
# Run local reaper
|
||||
if worker_tasks is not None:
|
||||
active_task_ids = []
|
||||
for task_list in worker_tasks.values():
|
||||
active_task_ids.extend(task_list)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids)
|
||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
@@ -589,7 +619,8 @@ def awx_k8s_reaper():
|
||||
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
|
||||
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
|
||||
pods = PodManager.list_active_jobs(group)
|
||||
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
|
||||
time_cutoff = now() - timedelta(seconds=settings.K8S_POD_REAPER_GRACE_PERIOD)
|
||||
for job in UnifiedJob.objects.filter(pk__in=pods.keys(), finished__lte=time_cutoff).exclude(status__in=ACTIVE_STATES):
|
||||
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
|
||||
try:
|
||||
pm = PodManager(job)
|
||||
@@ -657,6 +688,13 @@ def awx_periodic_scheduler():
|
||||
state.save()
|
||||
|
||||
|
||||
def schedule_manager_success_or_error(instance):
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
@@ -666,8 +704,7 @@ def handle_work_success(task_actual):
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
schedule_task_manager()
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -709,8 +746,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
if first_instance:
|
||||
schedule_task_manager()
|
||||
pass
|
||||
schedule_manager_success_or_error(first_instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
|
||||
@@ -13,7 +13,10 @@ from awx.main.models.workflow import (
|
||||
WorkflowJobTemplateNode,
|
||||
)
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler import TaskManager, WorkflowManager, DependencyManager
|
||||
|
||||
# Django
|
||||
from django.utils.timezone import now, timedelta
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -137,8 +140,9 @@ class TestApprovalNodes:
|
||||
post(url, {'name': 'Approve Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
||||
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
||||
wf_job = WorkflowJob.objects.first()
|
||||
DependencyManager().schedule() # TODO: exclude workflows from this and delete line
|
||||
TaskManager().schedule()
|
||||
TaskManager().schedule()
|
||||
WorkflowManager().schedule()
|
||||
wfj_node = wf_job.workflow_nodes.first()
|
||||
approval = wfj_node.job
|
||||
assert approval.name == 'Approve Test'
|
||||
@@ -162,8 +166,9 @@ class TestApprovalNodes:
|
||||
post(url, {'name': 'Deny Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
||||
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
||||
wf_job = WorkflowJob.objects.first()
|
||||
DependencyManager().schedule() # TODO: exclude workflows from this and delete line
|
||||
TaskManager().schedule()
|
||||
TaskManager().schedule()
|
||||
WorkflowManager().schedule()
|
||||
wfj_node = wf_job.workflow_nodes.first()
|
||||
approval = wfj_node.job
|
||||
assert approval.name == 'Deny Test'
|
||||
@@ -216,6 +221,37 @@ class TestApprovalNodes:
|
||||
approval.refresh_from_db()
|
||||
assert approval.status == 'failed'
|
||||
|
||||
def test_expires_time_on_creation(self):
|
||||
now_time = now()
|
||||
wa = WorkflowApproval.objects.create(timeout=34)
|
||||
# this is fudged, so we assert that the expires time is in reasonable range
|
||||
assert timedelta(seconds=33) < (wa.expires - now_time) < timedelta(seconds=35)
|
||||
|
||||
@pytest.mark.parametrize('with_update_fields', [True, False])
|
||||
def test_expires_time_update(self, with_update_fields):
|
||||
wa = WorkflowApproval.objects.create()
|
||||
assert wa.timeout == 0
|
||||
assert wa.expires is None
|
||||
wa.timeout = 1234
|
||||
if with_update_fields:
|
||||
wa.save(update_fields=['timeout'])
|
||||
else:
|
||||
wa.save()
|
||||
assert wa.created + timedelta(seconds=1234) == wa.expires
|
||||
|
||||
@pytest.mark.parametrize('with_update_fields', [True, False])
|
||||
def test_reset_timeout_and_expires(self, with_update_fields):
|
||||
wa = WorkflowApproval.objects.create()
|
||||
wa.timeout = 1234
|
||||
wa.save()
|
||||
assert wa.expires
|
||||
wa.timeout = 0
|
||||
if with_update_fields:
|
||||
wa.save(update_fields=['timeout'])
|
||||
else:
|
||||
wa.save()
|
||||
assert wa.expires is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestExclusiveRelationshipEnforcement:
|
||||
|
||||
40
awx/main/tests/functional/models/test_base.py
Normal file
40
awx/main/tests/functional/models/test_base.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
from crum import impersonate
|
||||
|
||||
from awx.main.models import Host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_modified_by_not_changed(inventory):
|
||||
with impersonate(None):
|
||||
host = Host.objects.create(name='foo', inventory=inventory)
|
||||
assert host.modified_by == None
|
||||
host.variables = {'foo': 'bar'}
|
||||
with mock.patch('django.db.models.Model.save') as save_mock:
|
||||
host.save(update_fields=['variables'])
|
||||
save_mock.assert_called_once_with(update_fields=['variables'])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_modified_by_changed(inventory, alice):
|
||||
with impersonate(None):
|
||||
host = Host.objects.create(name='foo', inventory=inventory)
|
||||
assert host.modified_by == None
|
||||
with impersonate(alice):
|
||||
host.variables = {'foo': 'bar'}
|
||||
with mock.patch('django.db.models.Model.save') as save_mock:
|
||||
host.save(update_fields=['variables'])
|
||||
save_mock.assert_called_once_with(update_fields=['variables', 'modified_by'])
|
||||
assert host.modified_by == alice
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_created_by(inventory, alice):
|
||||
with impersonate(alice):
|
||||
host = Host.objects.create(name='foo', inventory=inventory)
|
||||
assert host.created_by == alice
|
||||
with impersonate(None):
|
||||
host = Host.objects.create(name='bar', inventory=inventory)
|
||||
assert host.created_by == None
|
||||
@@ -252,12 +252,14 @@ class TestTaskImpact:
|
||||
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(5, 2)
|
||||
job.inventory.update_computed_fields()
|
||||
job.task_impact = job._get_task_impact()
|
||||
assert job.inventory.total_hosts == 5
|
||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||
|
||||
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(3, 5)
|
||||
job.inventory.update_computed_fields()
|
||||
job.task_impact = job._get_task_impact()
|
||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||
|
||||
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
||||
@@ -270,9 +272,13 @@ class TestTaskImpact:
|
||||
# Even distribution - all jobs run on 1 host
|
||||
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [1, 1, 1]
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
for j in jobs:
|
||||
j.task_impact = j._get_task_impact()
|
||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||
# Uneven distribution - first job takes the extra host
|
||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [2, 1, 1]
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
# recalculate task_impact
|
||||
jobs[0].task_impact = jobs[0]._get_task_impact()
|
||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||
|
||||
6
awx/main/tests/functional/task_management/__init__.py
Normal file
6
awx/main/tests/functional/task_management/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def create_job(jt, dependencies_processed=True):
|
||||
job = jt.create_unified_job()
|
||||
job.status = "pending"
|
||||
job.dependencies_processed = dependencies_processed
|
||||
job.save()
|
||||
return job
|
||||
@@ -1,9 +1,10 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
from datetime import timedelta
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.models import InstanceGroup, WorkflowJob
|
||||
from awx.main.scheduler import TaskManager, DependencyManager
|
||||
from awx.main.models import InstanceGroup
|
||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||
from . import create_job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -12,16 +13,12 @@ def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_gr
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||
objects1.job_template.instance_groups.add(ig1)
|
||||
j1 = objects1.jobs['job_should_start']
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_still_start"])
|
||||
j1 = create_job(objects1.job_template)
|
||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2')
|
||||
objects2.job_template.instance_groups.add(ig2)
|
||||
j2 = objects2.jobs['job_should_still_start']
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
j2 = create_job(objects2.job_template)
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 500
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
@@ -35,23 +32,26 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects1 = job_template_factory(
|
||||
'jt1',
|
||||
organization='org1',
|
||||
project='proj1',
|
||||
inventory='inv1',
|
||||
credential='cred1',
|
||||
)
|
||||
objects1.job_template.instance_groups.add(ig1)
|
||||
j1 = create_job(objects1.job_template, dependencies_processed=False)
|
||||
p = objects1.project
|
||||
p.scm_update_on_launch = True
|
||||
p.scm_update_cache_timeout = 0
|
||||
p.scm_type = "git"
|
||||
p.scm_url = "http://github.com/ansible/ansible.git"
|
||||
p.save()
|
||||
j1 = objects1.jobs['job_should_start']
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
objects2 = job_template_factory('jt2', organization=objects1.organization, project=p, inventory='inv2', credential='cred2', jobs=["job_should_still_start"])
|
||||
objects2 = job_template_factory('jt2', organization=objects1.organization, project=p, inventory='inv2', credential='cred2')
|
||||
objects2.job_template.instance_groups.add(ig2)
|
||||
j2 = objects2.jobs['job_should_still_start']
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
j2 = create_job(objects2.job_template, dependencies_processed=False)
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
|
||||
@@ -59,6 +59,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
pu.status = "successful"
|
||||
pu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
|
||||
@@ -69,7 +70,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlplane_instance_group, mocker):
|
||||
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
|
||||
wfj = wfjt.create_unified_job()
|
||||
wfj.status = "pending"
|
||||
wfj.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
@@ -85,39 +86,50 @@ def test_overcapacity_blocking_other_groups_unaffected(instance_factory, control
|
||||
i1.capacity = 1020
|
||||
i1.save()
|
||||
i2 = instance_factory("i2")
|
||||
i2.capacity = 1020
|
||||
i2.save()
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||
objects1.job_template.instance_groups.add(ig1)
|
||||
j1 = objects1.jobs['job_should_start']
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
objects2 = job_template_factory(
|
||||
'jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_start", "job_should_also_start"]
|
||||
)
|
||||
j1 = create_job(objects1.job_template)
|
||||
objects2 = job_template_factory('jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2')
|
||||
objects2.job_template.instance_groups.add(ig1)
|
||||
j1_1 = objects2.jobs['job_should_also_start']
|
||||
j1_1.status = 'pending'
|
||||
j1_1.save()
|
||||
objects3 = job_template_factory('jt3', organization='org2', project='proj3', inventory='inv3', credential='cred3', jobs=["job_should_still_start"])
|
||||
j1_1 = create_job(objects2.job_template)
|
||||
objects3 = job_template_factory('jt3', organization='org2', project='proj3', inventory='inv3', credential='cred3')
|
||||
objects3.job_template.instance_groups.add(ig2)
|
||||
j2 = objects3.jobs['job_should_still_start']
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
objects4 = job_template_factory(
|
||||
'jt4', organization=objects3.organization, project='proj4', inventory='inv4', credential='cred4', jobs=["job_should_not_start"]
|
||||
)
|
||||
j2 = create_job(objects3.job_template)
|
||||
objects4 = job_template_factory('jt4', organization=objects3.organization, project='proj4', inventory='inv4', credential='cred4')
|
||||
objects4.job_template.instance_groups.add(ig2)
|
||||
j2_1 = objects4.jobs['job_should_not_start']
|
||||
j2_1.status = 'pending'
|
||||
j2_1.save()
|
||||
tm = TaskManager()
|
||||
j2_1 = create_job(objects4.job_template)
|
||||
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 500
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j1_1, ig1, [], i1), mock.call(j2, ig2, [], i2)])
|
||||
assert mock_job.call_count == 3
|
||||
TaskManager().schedule()
|
||||
|
||||
# all jobs should be able to run, plenty of capacity across both instances
|
||||
for j in [j1, j1_1, j2, j2_1]:
|
||||
j.refresh_from_db()
|
||||
assert j.status == "waiting"
|
||||
|
||||
# reset to pending
|
||||
for j in [j1, j1_1, j2, j2_1]:
|
||||
j.status = "pending"
|
||||
j.save()
|
||||
|
||||
# make i2 can only be able to fit 1 job
|
||||
i2.capacity = 510
|
||||
i2.save()
|
||||
|
||||
TaskManager().schedule()
|
||||
|
||||
for j in [j1, j1_1, j2]:
|
||||
j.refresh_from_db()
|
||||
assert j.status == "waiting"
|
||||
|
||||
j2_1.refresh_from_db()
|
||||
# could not run because i2 is full
|
||||
assert j2_1.status == "pending"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -126,19 +138,13 @@ def test_failover_group_run(instance_factory, controlplane_instance_group, mocke
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||
objects1.job_template.instance_groups.add(ig1)
|
||||
j1 = objects1.jobs['job_should_start']
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
objects2 = job_template_factory(
|
||||
'jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_start", "job_should_also_start"]
|
||||
)
|
||||
j1 = create_job(objects1.job_template)
|
||||
objects2 = job_template_factory('jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2')
|
||||
objects2.job_template.instance_groups.add(ig1)
|
||||
objects2.job_template.instance_groups.add(ig2)
|
||||
j1_1 = objects2.jobs['job_should_also_start']
|
||||
j1_1.status = 'pending'
|
||||
j1_1.save()
|
||||
j1_1 = create_job(objects2.job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 500
|
||||
|
||||
@@ -3,21 +3,19 @@ from unittest import mock
|
||||
import json
|
||||
from datetime import timedelta
|
||||
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||
from awx.main.models.ha import Instance
|
||||
from . import create_job
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
j = create_job(objects.job_template)
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
@@ -32,10 +30,8 @@ class TestJobLifeCycle:
|
||||
expect_commit - list of expected on_commit calls
|
||||
If any of these are None, then the assertion is not made.
|
||||
"""
|
||||
if expect_schedule and len(expect_schedule) > 1:
|
||||
raise RuntimeError('Task manager should reschedule itself one time, at most.')
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJob.websocket_emit_status') as mock_channel:
|
||||
with mock.patch('awx.main.utils.common._schedule_task_manager') as tm_sch:
|
||||
with mock.patch('awx.main.utils.common.ScheduleManager._schedule') as tm_sch:
|
||||
# Job are ultimately submitted in on_commit hook, but this will not
|
||||
# actually run, because it waits until outer transaction, which is the test
|
||||
# itself in this case
|
||||
@@ -56,22 +52,21 @@ class TestJobLifeCycle:
|
||||
wj = wfjt.create_unified_job()
|
||||
assert wj.workflow_nodes.count() == 2
|
||||
wj.signal_start()
|
||||
tm = TaskManager()
|
||||
|
||||
# Transitions workflow job to running
|
||||
# needs to re-schedule so it spawns jobs next round
|
||||
self.run_tm(tm, [mock.call('running')], [mock.call()])
|
||||
self.run_tm(TaskManager(), [mock.call('running')])
|
||||
|
||||
# Spawns jobs
|
||||
# needs re-schedule to submit jobs next round
|
||||
self.run_tm(tm, [mock.call('pending'), mock.call('pending')], [mock.call()])
|
||||
self.run_tm(WorkflowManager(), [mock.call('pending'), mock.call('pending')])
|
||||
|
||||
assert jt.jobs.count() == 2 # task manager spawned jobs
|
||||
|
||||
# Submits jobs
|
||||
# intermission - jobs will run and reschedule TM when finished
|
||||
self.run_tm(tm, [mock.call('waiting'), mock.call('waiting')], [])
|
||||
|
||||
self.run_tm(DependencyManager()) # flip dependencies_processed to True
|
||||
self.run_tm(TaskManager())
|
||||
# I am the job runner
|
||||
for job in jt.jobs.all():
|
||||
job.status = 'successful'
|
||||
@@ -79,7 +74,7 @@ class TestJobLifeCycle:
|
||||
|
||||
# Finishes workflow
|
||||
# no further action is necessary, so rescheduling should not happen
|
||||
self.run_tm(tm, [mock.call('successful')], [])
|
||||
self.run_tm(WorkflowManager(), [mock.call('successful')])
|
||||
|
||||
def test_task_manager_workflow_workflow_rescheduling(self, controlplane_instance_group):
|
||||
wfjts = [WorkflowJobTemplate.objects.create(name='foo')]
|
||||
@@ -90,16 +85,13 @@ class TestJobLifeCycle:
|
||||
|
||||
wj = wfjts[0].create_unified_job()
|
||||
wj.signal_start()
|
||||
tm = TaskManager()
|
||||
|
||||
while wfjts[0].status != 'successful':
|
||||
wfjts[1].refresh_from_db()
|
||||
if wfjts[1].status == 'successful':
|
||||
# final run, no more work to do
|
||||
self.run_tm(tm, expect_schedule=[])
|
||||
else:
|
||||
self.run_tm(tm, expect_schedule=[mock.call()])
|
||||
attempts = 10
|
||||
while wfjts[0].status != 'successful' and attempts > 0:
|
||||
self.run_tm(TaskManager())
|
||||
self.run_tm(WorkflowManager())
|
||||
wfjts[0].refresh_from_db()
|
||||
attempts -= 1
|
||||
|
||||
def test_control_and_execution_instance(self, project, system_job_template, job_template, inventory_source, control_instance, execution_instance):
|
||||
assert Instance.objects.count() == 2
|
||||
@@ -113,6 +105,7 @@ class TestJobLifeCycle:
|
||||
for uj in all_ujs:
|
||||
uj.signal_start()
|
||||
|
||||
DependencyManager().schedule()
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
|
||||
@@ -135,6 +128,7 @@ class TestJobLifeCycle:
|
||||
for uj in all_ujs:
|
||||
uj.signal_start()
|
||||
|
||||
DependencyManager().schedule()
|
||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
@@ -157,6 +151,7 @@ class TestJobLifeCycle:
|
||||
for uj in all_ujs:
|
||||
uj.signal_start()
|
||||
|
||||
DependencyManager().schedule()
|
||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
@@ -197,63 +192,49 @@ class TestJobLifeCycle:
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_jt_multi_job_launch_blocks_last(controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory(
|
||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
||||
)
|
||||
j1 = objects.jobs["job_should_start"]
|
||||
j1.status = 'pending'
|
||||
def test_single_jt_multi_job_launch_blocks_last(job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
j1 = create_job(objects.job_template)
|
||||
j2 = create_job(objects.job_template)
|
||||
|
||||
TaskManager().schedule()
|
||||
j1.refresh_from_db()
|
||||
j2.refresh_from_db()
|
||||
assert j1.status == "waiting"
|
||||
assert j2.status == "pending"
|
||||
|
||||
# mimic running j1 to unblock j2
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
j2 = objects.jobs["job_should_not_start"]
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
TaskManager().schedule()
|
||||
|
||||
j2.refresh_from_db()
|
||||
assert j2.status == "waiting"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_jt_multi_job_launch_allow_simul_allowed(controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory(
|
||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
||||
)
|
||||
def test_single_jt_multi_job_launch_allow_simul_allowed(job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
jt = objects.job_template
|
||||
jt.allow_simultaneous = True
|
||||
jt.save()
|
||||
|
||||
j1 = objects.jobs["job_should_start"]
|
||||
j1.allow_simultaneous = True
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
j2 = objects.jobs["job_should_not_start"]
|
||||
j2.allow_simultaneous = True
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||
)
|
||||
j1 = create_job(objects.job_template)
|
||||
j2 = create_job(objects.job_template)
|
||||
TaskManager().schedule()
|
||||
j1.refresh_from_db()
|
||||
j2.refresh_from_db()
|
||||
assert j1.status == "waiting"
|
||||
assert j2.status == "waiting"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_not_start"])
|
||||
j1 = objects1.jobs["job_should_start"]
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
j2 = objects2.jobs["job_should_not_start"]
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2')
|
||||
j1 = create_job(objects1.job_template)
|
||||
j2 = create_job(objects2.job_template)
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 505
|
||||
@@ -269,11 +250,9 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_job_dependencies_project_launch(controlplane_instance_group, job_template_factory, mocker):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
j = create_job(objects.job_template, dependencies_processed=False)
|
||||
p = objects.project
|
||||
p.scm_update_on_launch = True
|
||||
p.scm_update_cache_timeout = 0
|
||||
@@ -281,12 +260,13 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
p.scm_url = "http://github.com/ansible/ansible.git"
|
||||
p.save(skip_update=True)
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu:
|
||||
tm.schedule()
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||
dm.schedule()
|
||||
mock_pu.assert_called_once_with(j)
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
assert len(pu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
|
||||
pu[0].status = "successful"
|
||||
pu[0].save()
|
||||
@@ -297,11 +277,9 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_job_dependencies_inventory_update_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
j = create_job(objects.job_template, dependencies_processed=False)
|
||||
i = objects.inventory
|
||||
ii = inventory_source_factory("ec2")
|
||||
ii.source = "ec2"
|
||||
@@ -310,12 +288,13 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
||||
ii.save()
|
||||
i.inventory_sources.add(ii)
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu:
|
||||
tm.schedule()
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||
dm.schedule()
|
||||
mock_iu.assert_called_once_with(j, ii)
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
@@ -334,19 +313,17 @@ def test_inventory_update_launches_project_update(controlplane_instance_group, s
|
||||
iu.status = "pending"
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu:
|
||||
tm.schedule()
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||
dm.schedule()
|
||||
mock_pu.assert_called_with(iu, project_id=project.id)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_dependency_with_already_updated(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
j = create_job(objects.job_template, dependencies_processed=False)
|
||||
i = objects.inventory
|
||||
ii = inventory_source_factory("ec2")
|
||||
ii.source = "ec2"
|
||||
@@ -359,9 +336,9 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
||||
j.start_args = encrypt_field(j, field_name="start_args")
|
||||
j.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu:
|
||||
tm.schedule()
|
||||
dm = DependencyManager()
|
||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||
dm.schedule()
|
||||
mock_iu.assert_not_called()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
@@ -371,13 +348,11 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
||||
@pytest.mark.django_db
|
||||
def test_shared_dependencies_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["first_job", "second_job"])
|
||||
j1 = objects.jobs["first_job"]
|
||||
j1.status = 'pending'
|
||||
j1.save()
|
||||
j2 = objects.jobs["second_job"]
|
||||
j2.status = 'pending'
|
||||
j2.save()
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
objects.job_template.allow_simultaneous = True
|
||||
objects.job_template.save()
|
||||
j1 = create_job(objects.job_template, dependencies_processed=False)
|
||||
j2 = create_job(objects.job_template, dependencies_processed=False)
|
||||
p = objects.project
|
||||
p.scm_update_on_launch = True
|
||||
p.scm_update_cache_timeout = 300
|
||||
@@ -392,8 +367,8 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
ii.update_cache_timeout = 300
|
||||
ii.save()
|
||||
i.inventory_sources.add(ii)
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
@@ -408,12 +383,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||
)
|
||||
pu = [x for x in p.project_updates.all()]
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(pu) == 1
|
||||
@@ -422,30 +394,27 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_not_blocking_project_update(controlplane_instance_group, job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
||||
job = objects.jobs["job"]
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||
job = objects.job_template.create_unified_job()
|
||||
job.instance_group = controlplane_instance_group
|
||||
job.dependencies_process = True
|
||||
job.status = "running"
|
||||
job.save()
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
task_manager = TaskManager()
|
||||
task_manager._schedule()
|
||||
|
||||
proj = objects.project
|
||||
project_update = proj.create_project_update()
|
||||
project_update.instance_group = controlplane_instance_group
|
||||
project_update.status = "pending"
|
||||
project_update.save()
|
||||
assert not task_manager.job_blocked_by(project_update)
|
||||
|
||||
dependency_graph = DependencyGraph()
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.task_blocked_by(project_update)
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_not_blocking_inventory_update(controlplane_instance_group, job_template_factory, inventory_source_factory):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
||||
job = objects.jobs["job"]
|
||||
job.instance_group = controlplane_instance_group
|
||||
@@ -453,9 +422,6 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
||||
job.save()
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
task_manager = TaskManager()
|
||||
task_manager._schedule()
|
||||
|
||||
inv = objects.inventory
|
||||
inv_source = inventory_source_factory("ec2")
|
||||
inv_source.source = "ec2"
|
||||
@@ -465,11 +431,9 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
||||
inventory_update.status = "pending"
|
||||
inventory_update.save()
|
||||
|
||||
assert not task_manager.job_blocked_by(inventory_update)
|
||||
|
||||
dependency_graph = DependencyGraph()
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.task_blocked_by(inventory_update)
|
||||
DependencyManager().schedule()
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -484,7 +448,7 @@ def test_generate_dependencies_only_once(job_template_factory):
|
||||
# job starts with dependencies_processed as False
|
||||
assert not job.dependencies_processed
|
||||
# run one cycle of ._schedule() to generate dependencies
|
||||
TaskManager()._schedule()
|
||||
DependencyManager().schedule()
|
||||
|
||||
# make sure dependencies_processed is now True
|
||||
job = Job.objects.filter(name="job_gen_dep")[0]
|
||||
@@ -492,7 +456,7 @@ def test_generate_dependencies_only_once(job_template_factory):
|
||||
|
||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||
# called with job in the argument list
|
||||
tm = TaskManager()
|
||||
tm.generate_dependencies = mock.MagicMock(return_value=[])
|
||||
tm._schedule()
|
||||
tm.generate_dependencies.assert_has_calls([mock.call([]), mock.call([])])
|
||||
dm = DependencyManager()
|
||||
dm.generate_dependencies = mock.MagicMock(return_value=[])
|
||||
dm.schedule()
|
||||
dm.generate_dependencies.assert_not_called()
|
||||
|
||||
@@ -199,9 +199,7 @@ class TestAutoScaling:
|
||||
assert len(self.pool) == 10
|
||||
|
||||
# cleanup should scale down to 8 workers
|
||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
||||
self.pool.cleanup()
|
||||
reap.assert_called()
|
||||
self.pool.cleanup()
|
||||
assert len(self.pool) == 2
|
||||
|
||||
def test_max_scale_up(self):
|
||||
@@ -249,9 +247,7 @@ class TestAutoScaling:
|
||||
time.sleep(1) # wait a moment for sigterm
|
||||
|
||||
# clean up and the dead worker
|
||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
||||
self.pool.cleanup()
|
||||
reap.assert_called()
|
||||
self.pool.cleanup()
|
||||
assert len(self.pool) == 1
|
||||
assert self.pool.workers[0].pid == alive_pid
|
||||
|
||||
@@ -353,7 +349,7 @@ class TestJobReaper(object):
|
||||
('waiting', '', '', None, False), # waiting, not assigned to the instance
|
||||
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
|
||||
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
|
||||
('waiting', 'awx', '', yesterday, True), # waiting, assigned to the execution_node, stale
|
||||
('waiting', 'awx', '', yesterday, False), # waiting, managed by another node, ignore
|
||||
('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
|
||||
],
|
||||
)
|
||||
@@ -372,6 +368,7 @@ class TestJobReaper(object):
|
||||
# (because .save() overwrites it to _now_)
|
||||
Job.objects.filter(id=j.id).update(modified=modified)
|
||||
reaper.reap(i)
|
||||
reaper.reap_waiting(i)
|
||||
job = Job.objects.first()
|
||||
if fail:
|
||||
assert job.status == 'failed'
|
||||
|
||||
@@ -261,5 +261,6 @@ def test_inventory_update_injected_content(this_kind, inventory, fake_credential
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.receptor.AWXReceptorJob.run', substitute_run):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
with mock.patch('awx.main.tasks.jobs.create_partition'):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
|
||||
@@ -4,6 +4,7 @@ import os
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
|
||||
@@ -61,3 +62,16 @@ def test_folder_cleanup_running_job(mock_job_folder, mock_me):
|
||||
job.save(update_fields=['status'])
|
||||
_cleanup_images_and_files(grace_period=0)
|
||||
assert not os.path.exists(mock_job_folder) # job is finished and no grace period, should delete
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job = Job.objects.create(status='failed', job_explanation='This job has been reaped.')
|
||||
mock_run = mocker.patch('awx.main.tasks.jobs.ansible_runner.interface.run')
|
||||
try:
|
||||
RunJob().run(job.id)
|
||||
except Exception:
|
||||
pass
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
47
awx/main/tests/functional/utils/test_update_model.py
Normal file
47
awx/main/tests/functional/utils/test_update_model.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import pytest
|
||||
|
||||
from django.db import DatabaseError
|
||||
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.utils.update_model import update_model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def normal_job(deploy_jobtemplate):
|
||||
return deploy_jobtemplate.create_unified_job()
|
||||
|
||||
|
||||
class NewException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_get(normal_job):
|
||||
mod_job = Job.objects.get(pk=normal_job.id)
|
||||
mod_job.job_explanation = 'foobar'
|
||||
mod_job.save(update_fields=['job_explanation'])
|
||||
new_job = update_model(Job, normal_job.pk)
|
||||
assert new_job.job_explanation == 'foobar'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_exception(normal_job, mocker):
|
||||
mocker.patch.object(Job.objects, 'get', side_effect=DatabaseError)
|
||||
mocker.patch('awx.main.utils.update_model.time.sleep')
|
||||
with pytest.raises(DatabaseError):
|
||||
update_model(Job, normal_job.pk)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_unknown_exception(normal_job, mocker):
|
||||
mocker.patch.object(Job.objects, 'get', side_effect=NewException)
|
||||
mocker.patch('awx.main.utils.update_model.time.sleep')
|
||||
with pytest.raises(NewException):
|
||||
update_model(Job, normal_job.pk)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_deleted_job(normal_job):
|
||||
job_pk = normal_job.pk
|
||||
normal_job.delete()
|
||||
assert update_model(Job, job_pk) is None
|
||||
@@ -90,7 +90,7 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
hosts[1].save.assert_called_once_with()
|
||||
hosts[1].save.assert_called_once_with(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
|
||||
@@ -34,7 +34,7 @@ from awx.main.models import (
|
||||
)
|
||||
from awx.main.models.credential import HIDDEN_PASSWORD, ManagedCredentialType
|
||||
|
||||
from awx.main.tasks import jobs, system
|
||||
from awx.main.tasks import jobs, system, receptor
|
||||
from awx.main.utils import encrypt_field, encrypt_value
|
||||
from awx.main.utils.safe_yaml import SafeLoader
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT
|
||||
@@ -42,6 +42,8 @@ from awx.main.utils.execution_environments import CONTAINER_ROOT
|
||||
from awx.main.utils.licensing import Licenser
|
||||
from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
|
||||
def to_host_path(path, private_data_dir):
|
||||
"""Given a path inside of the EE container, this gives the absolute path
|
||||
@@ -78,6 +80,12 @@ def patch_Job():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_create_partition():
|
||||
with mock.patch('awx.main.tasks.jobs.create_partition') as cp_mock:
|
||||
yield cp_mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_Organization():
|
||||
_credentials = []
|
||||
@@ -461,7 +469,7 @@ class TestExtraVarSanitation(TestJobExecution):
|
||||
|
||||
|
||||
class TestGenericRun:
|
||||
def test_generic_failure(self, patch_Job, execution_environment, mock_me):
|
||||
def test_generic_failure(self, patch_Job, execution_environment, mock_me, mock_create_partition):
|
||||
job = Job(status='running', inventory=Inventory(), project=Project(local_path='/projects/_23_foo'))
|
||||
job.websocket_emit_status = mock.Mock()
|
||||
job.execution_environment = execution_environment
|
||||
@@ -472,7 +480,7 @@ class TestGenericRun:
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
task.build_private_data_files = mock.Mock(side_effect=OSError())
|
||||
|
||||
with mock.patch('awx.main.tasks.jobs.copy_tree'):
|
||||
with mock.patch('awx.main.tasks.jobs.shutil.copytree'):
|
||||
with pytest.raises(Exception):
|
||||
task.run(1)
|
||||
|
||||
@@ -481,7 +489,7 @@ class TestGenericRun:
|
||||
assert update_model_call['status'] == 'error'
|
||||
assert update_model_call['emitted_events'] == 0
|
||||
|
||||
def test_cancel_flag(self, job, update_model_wrapper, execution_environment, mock_me):
|
||||
def test_cancel_flag(self, job, update_model_wrapper, execution_environment, mock_me, mock_create_partition):
|
||||
job.status = 'running'
|
||||
job.cancel_flag = True
|
||||
job.websocket_emit_status = mock.Mock()
|
||||
@@ -494,11 +502,11 @@ class TestGenericRun:
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
task.build_private_data_files = mock.Mock()
|
||||
|
||||
with mock.patch('awx.main.tasks.jobs.copy_tree'):
|
||||
with mock.patch('awx.main.tasks.jobs.shutil.copytree'):
|
||||
with pytest.raises(Exception):
|
||||
task.run(1)
|
||||
|
||||
for c in [mock.call(1, status='running', start_args=''), mock.call(1, status='canceled')]:
|
||||
for c in [mock.call(1, start_args='', status='canceled')]:
|
||||
assert c in task.update_model.call_args_list
|
||||
|
||||
def test_event_count(self, mock_me):
|
||||
@@ -580,7 +588,7 @@ class TestGenericRun:
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAdhocRun(TestJobExecution):
|
||||
def test_options_jinja_usage(self, adhoc_job, adhoc_update_model_wrapper, mock_me):
|
||||
def test_options_jinja_usage(self, adhoc_job, adhoc_update_model_wrapper, mock_me, mock_create_partition):
|
||||
ExecutionEnvironment.objects.create(name='Control Plane EE', managed=True)
|
||||
ExecutionEnvironment.objects.create(name='Default Job EE', managed=False)
|
||||
|
||||
@@ -1934,7 +1942,7 @@ def test_managed_injector_redaction(injector_cls):
|
||||
assert 'very_secret_value' not in str(build_safe_env(env))
|
||||
|
||||
|
||||
def test_job_run_no_ee(mock_me):
|
||||
def test_job_run_no_ee(mock_me, mock_create_partition):
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
job = Job(project=proj, organization=org, inventory=Inventory(pk=1))
|
||||
@@ -1944,7 +1952,7 @@ def test_job_run_no_ee(mock_me):
|
||||
task.update_model = mock.Mock(return_value=job)
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
|
||||
with mock.patch('awx.main.tasks.jobs.copy_tree'):
|
||||
with mock.patch('awx.main.tasks.jobs.shutil.copytree'):
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
task.pre_run_hook(job, private_data_dir)
|
||||
|
||||
@@ -1965,3 +1973,120 @@ def test_project_update_no_ee(mock_me):
|
||||
task.build_env(job, {})
|
||||
|
||||
assert 'The project could not sync because there is no Execution Environment' in str(e.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'work_unit_data, expected_function_call',
|
||||
[
|
||||
[
|
||||
# if (extra_data is None): continue
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': None,
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a string and StateName is None
|
||||
{
|
||||
"y4NgMKKW": {
|
||||
"ExtraData": "Unknown WorkType",
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a string and StateName in RECEPTOR_ACTIVE_STATES
|
||||
{
|
||||
"y4NgMKKW": {
|
||||
"ExtraData": "Unknown WorkType",
|
||||
"StateName": "Running",
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a string and StateName not in RECEPTOR_ACTIVE_STATES
|
||||
{
|
||||
"y4NgMKKW": {
|
||||
"ExtraData": "Unknown WorkType",
|
||||
"StateName": "Succeeded",
|
||||
}
|
||||
},
|
||||
True,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict but RemoteWorkType is not ansible-runner
|
||||
{
|
||||
"y4NgMKKW": {
|
||||
'ExtraData': {
|
||||
'RemoteWorkType': 'not-ansible-runner',
|
||||
},
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict and its an ansible-runner but we have no params
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': {
|
||||
'RemoteWorkType': 'ansible-runner',
|
||||
},
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict and its an ansible-runner but params is not --worker-info
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': {'RemoteWorkType': 'ansible-runner', 'RemoteParams': {'params': '--not-worker-info'}},
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict and its an ansible-runner but params starts without cleanup
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': {'RemoteWorkType': 'ansible-runner', 'RemoteParams': {'params': 'not cleanup stuff'}},
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict and its an ansible-runner w/ params but still running
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': {'RemoteWorkType': 'ansible-runner', 'RemoteParams': {'params': '--worker-info'}},
|
||||
"StateName": "Running",
|
||||
}
|
||||
},
|
||||
False,
|
||||
],
|
||||
[
|
||||
# Extra data is a dict and its an ansible-runner w/ params and completed
|
||||
{
|
||||
'zpdFi4BX': {
|
||||
'ExtraData': {'RemoteWorkType': 'ansible-runner', 'RemoteParams': {'params': '--worker-info'}},
|
||||
"StateName": "Succeeded",
|
||||
}
|
||||
},
|
||||
True,
|
||||
],
|
||||
],
|
||||
)
|
||||
def test_administrative_workunit_reaper(work_unit_data, expected_function_call):
|
||||
# Mock the get_receptor_ctl call and let it return a dummy object
|
||||
# It does not matter what file name we return as the socket because we won't actually call receptor (unless something is broken)
|
||||
with mock.patch('awx.main.tasks.receptor.get_receptor_ctl') as mock_get_receptor_ctl:
|
||||
mock_get_receptor_ctl.return_value = ReceptorControl('/var/run/awx-receptor/receptor.sock')
|
||||
with mock.patch('receptorctl.socket_interface.ReceptorControl.simple_command') as simple_command:
|
||||
receptor.administrative_workunit_reaper(work_list=work_unit_data)
|
||||
|
||||
if expected_function_call:
|
||||
simple_command.assert_called()
|
||||
else:
|
||||
simple_command.assert_not_called()
|
||||
|
||||
@@ -6,6 +6,7 @@ from datetime import timedelta
|
||||
import json
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import os
|
||||
import subprocess
|
||||
import re
|
||||
@@ -78,8 +79,9 @@ __all__ = [
|
||||
'IllegalArgumentError',
|
||||
'get_custom_venv_choices',
|
||||
'get_external_account',
|
||||
'task_manager_bulk_reschedule',
|
||||
'schedule_task_manager',
|
||||
'ScheduleTaskManager',
|
||||
'ScheduleDependencyManager',
|
||||
'ScheduleWorkflowManager',
|
||||
'classproperty',
|
||||
'create_temporary_fifo',
|
||||
'truncate_stdout',
|
||||
@@ -846,6 +848,66 @@ def get_mem_effective_capacity(mem_bytes):
|
||||
|
||||
_inventory_updates = threading.local()
|
||||
_task_manager = threading.local()
|
||||
_dependency_manager = threading.local()
|
||||
_workflow_manager = threading.local()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def task_manager_bulk_reschedule():
|
||||
managers = [ScheduleTaskManager(), ScheduleWorkflowManager(), ScheduleDependencyManager()]
|
||||
"""Context manager to avoid submitting task multiple times."""
|
||||
try:
|
||||
for m in managers:
|
||||
m.previous_flag = getattr(m.manager_threading_local, 'bulk_reschedule', False)
|
||||
m.previous_value = getattr(m.manager_threading_local, 'needs_scheduling', False)
|
||||
m.manager_threading_local.bulk_reschedule = True
|
||||
m.manager_threading_local.needs_scheduling = False
|
||||
yield
|
||||
finally:
|
||||
for m in managers:
|
||||
m.manager_threading_local.bulk_reschedule = m.previous_flag
|
||||
if m.manager_threading_local.needs_scheduling:
|
||||
m.schedule()
|
||||
m.manager_threading_local.needs_scheduling = m.previous_value
|
||||
|
||||
|
||||
class ScheduleManager:
|
||||
def __init__(self, manager, manager_threading_local):
|
||||
self.manager = manager
|
||||
self.manager_threading_local = manager_threading_local
|
||||
|
||||
def _schedule(self):
|
||||
from django.db import connection
|
||||
|
||||
# runs right away if not in transaction
|
||||
connection.on_commit(lambda: self.manager.delay())
|
||||
|
||||
def schedule(self):
|
||||
if getattr(self.manager_threading_local, 'bulk_reschedule', False):
|
||||
self.manager_threading_local.needs_scheduling = True
|
||||
return
|
||||
self._schedule()
|
||||
|
||||
|
||||
class ScheduleTaskManager(ScheduleManager):
|
||||
def __init__(self):
|
||||
from awx.main.scheduler.tasks import task_manager
|
||||
|
||||
super().__init__(task_manager, _task_manager)
|
||||
|
||||
|
||||
class ScheduleDependencyManager(ScheduleManager):
|
||||
def __init__(self):
|
||||
from awx.main.scheduler.tasks import dependency_manager
|
||||
|
||||
super().__init__(dependency_manager, _dependency_manager)
|
||||
|
||||
|
||||
class ScheduleWorkflowManager(ScheduleManager):
|
||||
def __init__(self):
|
||||
from awx.main.scheduler.tasks import workflow_manager
|
||||
|
||||
super().__init__(workflow_manager, _workflow_manager)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -861,37 +923,6 @@ def ignore_inventory_computed_fields():
|
||||
_inventory_updates.is_updating = previous_value
|
||||
|
||||
|
||||
def _schedule_task_manager():
|
||||
from awx.main.scheduler.tasks import run_task_manager
|
||||
from django.db import connection
|
||||
|
||||
# runs right away if not in transaction
|
||||
connection.on_commit(lambda: run_task_manager.delay())
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def task_manager_bulk_reschedule():
|
||||
"""Context manager to avoid submitting task multiple times."""
|
||||
try:
|
||||
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
|
||||
previous_value = getattr(_task_manager, 'needs_scheduling', False)
|
||||
_task_manager.bulk_reschedule = True
|
||||
_task_manager.needs_scheduling = False
|
||||
yield
|
||||
finally:
|
||||
_task_manager.bulk_reschedule = previous_flag
|
||||
if _task_manager.needs_scheduling:
|
||||
_schedule_task_manager()
|
||||
_task_manager.needs_scheduling = previous_value
|
||||
|
||||
|
||||
def schedule_task_manager():
|
||||
if getattr(_task_manager, 'bulk_reschedule', False):
|
||||
_task_manager.needs_scheduling = True
|
||||
return
|
||||
_schedule_task_manager()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ignore_inventory_group_removal():
|
||||
"""
|
||||
@@ -1153,3 +1184,19 @@ def cleanup_new_process(func):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper_cleanup_new_process
|
||||
|
||||
|
||||
def log_excess_runtime(func_logger, cutoff=5.0):
|
||||
def log_excess_runtime_decorator(func):
|
||||
@wraps(func)
|
||||
def _new_func(*args, **kwargs):
|
||||
start_time = time.time()
|
||||
return_value = func(*args, **kwargs)
|
||||
delta = time.time() - start_time
|
||||
if delta > cutoff:
|
||||
logger.info(f'Running {func.__name__!r} took {delta:.2f}s')
|
||||
return return_value
|
||||
|
||||
return _new_func
|
||||
|
||||
return log_excess_runtime_decorator
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from django.db import transaction, DatabaseError, InterfaceError
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
import logging
|
||||
import time
|
||||
@@ -32,6 +33,8 @@ def update_model(model, pk, _attempt=0, _max_attempts=5, select_for_update=False
|
||||
update_fields.append('failed')
|
||||
instance.save(update_fields=update_fields)
|
||||
return instance
|
||||
except ObjectDoesNotExist:
|
||||
return None
|
||||
except (DatabaseError, InterfaceError) as e:
|
||||
# Log out the error to the debug logger.
|
||||
logger.debug('Database error updating %s, retrying in 5 seconds (retry #%d): %s', model._meta.object_name, _attempt + 1, e)
|
||||
@@ -45,4 +48,5 @@ def update_model(model, pk, _attempt=0, _max_attempts=5, select_for_update=False
|
||||
raise RuntimeError(f'Could not fetch {pk} because of receiving abort signal')
|
||||
return update_model(model, pk, _attempt=_attempt + 1, _max_attempts=_max_attempts, **updates)
|
||||
else:
|
||||
logger.error('Failed to update %s after %d retries.', model._meta.object_name, _attempt)
|
||||
logger.warning(f'Failed to update {model._meta.object_name} pk={pk} after {_attempt} retries.')
|
||||
raise
|
||||
|
||||
@@ -6,6 +6,7 @@ import os
|
||||
import re # noqa
|
||||
import sys
|
||||
import tempfile
|
||||
import socket
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
@@ -104,7 +105,7 @@ STATICFILES_DIRS = (os.path.join(BASE_DIR, 'ui', 'build', 'static'), os.path.joi
|
||||
|
||||
# Absolute filesystem path to the directory where static file are collected via
|
||||
# the collectstatic command.
|
||||
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
|
||||
STATIC_ROOT = '/var/lib/awx/public/static'
|
||||
|
||||
# Static files (CSS, JavaScript, Images)
|
||||
# https://docs.djangoproject.com/en/dev/howto/static-files/
|
||||
@@ -248,6 +249,11 @@ SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL = 15
|
||||
# The maximum allowed jobs to start on a given task manager cycle
|
||||
START_TASK_LIMIT = 100
|
||||
|
||||
# Time out task managers if they take longer than this many seconds, plus TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
# We have the grace period so the task manager can bail out before the timeout.
|
||||
TASK_MANAGER_TIMEOUT = 300
|
||||
TASK_MANAGER_TIMEOUT_GRACE_PERIOD = 60
|
||||
|
||||
# Disallow sending session cookies over insecure connections
|
||||
SESSION_COOKIE_SECURE = True
|
||||
|
||||
@@ -373,6 +379,7 @@ AUTHENTICATION_BACKENDS = (
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseOAuth2',
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseOrganizationOAuth2',
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseTeamOAuth2',
|
||||
'social_core.backends.open_id_connect.OpenIdConnectAuth',
|
||||
'social_core.backends.azuread.AzureADOAuth2',
|
||||
'awx.sso.backends.SAMLAuth',
|
||||
'awx.main.backends.AWXModelBackend',
|
||||
@@ -427,12 +434,20 @@ os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
|
||||
|
||||
# heartbeat period can factor into some forms of logic, so it is maintained as a setting here
|
||||
CLUSTER_NODE_HEARTBEAT_PERIOD = 60
|
||||
|
||||
# Number of missed heartbeats until a node gets marked as lost
|
||||
CLUSTER_NODE_MISSED_HEARTBEAT_TOLERANCE = 2
|
||||
|
||||
RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD = 60 # https://github.com/ansible/receptor/blob/aa1d589e154d8a0cb99a220aff8f98faf2273be6/pkg/netceptor/netceptor.go#L34
|
||||
EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 30 # once every 30 minutes check if an execution node errors have been resolved
|
||||
|
||||
# Amount of time dispatcher will try to reconnect to database for jobs and consuming new work
|
||||
DISPATCHER_DB_DOWNTOWN_TOLLERANCE = 40
|
||||
|
||||
# Minimum time to wait after last job finished before scaling down a worker
|
||||
# A higher value will free up memory more agressively, but a lower value will require less forking
|
||||
DISPATCHER_SCALE_DOWN_WAIT_TIME = 60
|
||||
|
||||
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'tower_scheduler': {'task': 'awx.main.tasks.system.awx_periodic_scheduler', 'schedule': timedelta(seconds=30), 'options': {'expires': 20}},
|
||||
@@ -442,7 +457,8 @@ CELERYBEAT_SCHEDULE = {
|
||||
'options': {'expires': 50},
|
||||
},
|
||||
'gather_analytics': {'task': 'awx.main.tasks.system.gather_analytics', 'schedule': timedelta(minutes=5)},
|
||||
'task_manager': {'task': 'awx.main.scheduler.tasks.run_task_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20}},
|
||||
'task_manager': {'task': 'awx.main.scheduler.tasks.task_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20}},
|
||||
'dependency_manager': {'task': 'awx.main.scheduler.tasks.dependency_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20}},
|
||||
'k8s_reaper': {'task': 'awx.main.tasks.system.awx_k8s_reaper', 'schedule': timedelta(seconds=60), 'options': {'expires': 50}},
|
||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||
@@ -807,86 +823,25 @@ LOGGING = {
|
||||
'dispatcher': {'format': '%(asctime)s %(levelname)-8s [%(guid)s] %(name)s PID:%(process)d %(message)s'},
|
||||
'job_lifecycle': {'()': 'awx.main.utils.formatters.JobLifeCycleFormatter'},
|
||||
},
|
||||
# Extended below based on install scenario. You probably don't want to add something directly here.
|
||||
# See 'handler_config' below.
|
||||
'handlers': {
|
||||
'console': {
|
||||
'()': 'logging.StreamHandler',
|
||||
'level': 'DEBUG',
|
||||
'filters': ['require_debug_true_or_test', 'dynamic_level_filter', 'guid'],
|
||||
'filters': ['dynamic_level_filter', 'guid'],
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'null': {'class': 'logging.NullHandler'},
|
||||
'file': {'class': 'logging.NullHandler', 'formatter': 'simple'},
|
||||
'syslog': {'level': 'WARNING', 'filters': ['require_debug_false'], 'class': 'logging.NullHandler', 'formatter': 'simple'},
|
||||
'inventory_import': {'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'timed_import'},
|
||||
'external_logger': {
|
||||
'class': 'awx.main.utils.handlers.RSysLogHandler',
|
||||
'formatter': 'json',
|
||||
'address': '/var/run/awx-rsyslog/rsyslog.sock',
|
||||
'filters': ['external_log_enabled', 'dynamic_level_filter', 'guid'],
|
||||
},
|
||||
'tower_warnings': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'callback_receiver': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'dispatcher': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'dispatcher.log'),
|
||||
'formatter': 'dispatcher',
|
||||
},
|
||||
'wsbroadcast': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'wsbroadcast.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'celery.beat': {'class': 'logging.StreamHandler', 'level': 'ERROR'}, # don't log every celerybeat wakeup
|
||||
'inventory_import': {'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'timed_import'},
|
||||
'task_system': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'task_system.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'management_playbooks': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'system_tracking_migrations': {
|
||||
'level': 'WARNING',
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'rbac_migrations': {
|
||||
'level': 'WARNING',
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'),
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'job_lifecycle': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filename': os.path.join(LOG_ROOT, 'job_lifecycle.log'),
|
||||
'formatter': 'job_lifecycle',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'django': {'handlers': ['console']},
|
||||
@@ -919,6 +874,40 @@ LOGGING = {
|
||||
},
|
||||
}
|
||||
|
||||
# Log handler configuration. Keys are the name of the handler. Be mindful when renaming things here.
|
||||
# People might have created custom settings files that augments the behavior of these.
|
||||
# Specify 'filename' (used if the environment variable AWX_LOGGING_MODE is unset or 'file')
|
||||
# and an optional 'formatter'. If no formatter is specified, 'simple' is used.
|
||||
handler_config = {
|
||||
'tower_warnings': {'filename': 'tower.log'},
|
||||
'callback_receiver': {'filename': 'callback_receiver.log'},
|
||||
'dispatcher': {'filename': 'dispatcher.log', 'formatter': 'dispatcher'},
|
||||
'wsbroadcast': {'filename': 'wsbroadcast.log'},
|
||||
'task_system': {'filename': 'task_system.log'},
|
||||
'rbac_migrations': {'filename': 'tower_rbac_migrations.log'},
|
||||
'job_lifecycle': {'filename': 'job_lifecycle.log', 'formatter': 'job_lifecycle'},
|
||||
}
|
||||
|
||||
# If running on a VM, we log to files. When running in a container, we log to stdout.
|
||||
logging_mode = os.getenv('AWX_LOGGING_MODE', 'file')
|
||||
if logging_mode not in ('file', 'stdout'):
|
||||
raise Exception("AWX_LOGGING_MODE must be 'file' or 'stdout'")
|
||||
|
||||
for name, config in handler_config.items():
|
||||
# Common log handler config. Don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
LOGGING['handlers'][name] = {'filters': ['dynamic_level_filter', 'guid'], 'formatter': config.get('formatter', 'simple')}
|
||||
|
||||
if logging_mode == 'file':
|
||||
LOGGING['handlers'][name]['class'] = 'logging.handlers.WatchedFileHandler'
|
||||
LOGGING['handlers'][name]['filename'] = os.path.join(LOG_ROOT, config['filename'])
|
||||
|
||||
if logging_mode == 'stdout':
|
||||
LOGGING['handlers'][name]['class'] = 'logging.NullHandler'
|
||||
|
||||
# Prevents logging to stdout on traditional VM installs
|
||||
if logging_mode == 'file':
|
||||
LOGGING['handlers']['console']['filters'].insert(0, 'require_debug_true_or_test')
|
||||
|
||||
# Apply coloring to messages logged to the console
|
||||
COLOR_LOGS = False
|
||||
|
||||
@@ -1018,3 +1007,17 @@ DEFAULT_CONTAINER_RUN_OPTIONS = ['--network', 'slirp4netns:enable_ipv6=true']
|
||||
|
||||
# Mount exposed paths as hostPath resource in k8s/ocp
|
||||
AWX_MOUNT_ISOLATED_PATHS_ON_K8S = False
|
||||
|
||||
# Time out task managers if they take longer than this many seconds
|
||||
TASK_MANAGER_TIMEOUT = 300
|
||||
|
||||
# Number of seconds _in addition to_ the task manager timeout a job can stay
|
||||
# in waiting without being reaped
|
||||
JOB_WAITING_GRACE_PERIOD = 60
|
||||
|
||||
# Number of seconds after a container group job finished time to wait
|
||||
# before the awx_k8s_reaper task will tear down the pods
|
||||
K8S_POD_REAPER_GRACE_PERIOD = 60
|
||||
|
||||
# This is overridden downstream via /etc/tower/conf.d/cluster_host_id.py
|
||||
CLUSTER_HOST_ID = socket.gethostname()
|
||||
|
||||
@@ -78,18 +78,6 @@ include(optional('/etc/tower/conf.d/*.py'), scope=locals())
|
||||
BASE_VENV_PATH = "/var/lib/awx/venv/"
|
||||
AWX_VENV_PATH = os.path.join(BASE_VENV_PATH, "awx")
|
||||
|
||||
# If any local_*.py files are present in awx/settings/, use them to override
|
||||
# default settings for development. If not present, we can still run using
|
||||
# only the defaults.
|
||||
try:
|
||||
if os.getenv('AWX_KUBE_DEVEL', False):
|
||||
include(optional('minikube.py'), scope=locals())
|
||||
else:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
except ImportError:
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# Use SQLite for unit tests instead of PostgreSQL. If the lines below are
|
||||
# commented out, Django will create the test_awx-dev database in PostgreSQL to
|
||||
# run unit tests.
|
||||
@@ -110,5 +98,26 @@ CLUSTER_HOST_ID = socket.gethostname()
|
||||
|
||||
AWX_CALLBACK_PROFILE = True
|
||||
|
||||
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
|
||||
# Disable normal scheduled/triggered task managers (DependencyManager, TaskManager, WorkflowManager).
|
||||
# Allows user to trigger task managers directly for debugging and profiling purposes.
|
||||
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
|
||||
AWX_DISABLE_TASK_MANAGERS = False
|
||||
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
|
||||
|
||||
if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
|
||||
DATABASES['default'].setdefault('OPTIONS', dict()).setdefault('application_name', f'{CLUSTER_HOST_ID}-{os.getpid()}-{" ".join(sys.argv)}'[:63]) # noqa
|
||||
|
||||
|
||||
# If any local_*.py files are present in awx/settings/, use them to override
|
||||
# default settings for development. If not present, we can still run using
|
||||
# only the defaults.
|
||||
# this needs to stay at the bottom of this file
|
||||
try:
|
||||
if os.getenv('AWX_KUBE_DEVEL', False):
|
||||
include(optional('minikube.py'), scope=locals())
|
||||
else:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
except ImportError:
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
@@ -68,6 +68,7 @@ class LDAPSettings(BaseLDAPSettings):
|
||||
|
||||
|
||||
class LDAPBackend(BaseLDAPBackend):
|
||||
|
||||
"""
|
||||
Custom LDAP backend for AWX.
|
||||
"""
|
||||
@@ -116,7 +117,17 @@ class LDAPBackend(BaseLDAPBackend):
|
||||
for setting_name, type_ in [('GROUP_SEARCH', 'LDAPSearch'), ('GROUP_TYPE', 'LDAPGroupType')]:
|
||||
if getattr(self.settings, setting_name) is None:
|
||||
raise ImproperlyConfigured("{} must be an {} instance.".format(setting_name, type_))
|
||||
return super(LDAPBackend, self).authenticate(request, username, password)
|
||||
ldap_user = super(LDAPBackend, self).authenticate(request, username, password)
|
||||
# If we have an LDAP user and that user we found has an ldap_user internal object and that object has a bound connection
|
||||
# Then we can try and force an unbind to close the sticky connection
|
||||
if ldap_user and ldap_user.ldap_user and ldap_user.ldap_user._connection_bound:
|
||||
logger.debug("Forcing LDAP connection to close")
|
||||
try:
|
||||
ldap_user.ldap_user._connection.unbind_s()
|
||||
ldap_user.ldap_user._connection_bound = False
|
||||
except Exception:
|
||||
logger.exception(f"Got unexpected LDAP exception when forcing LDAP disconnect for user {ldap_user}, login will still proceed")
|
||||
return ldap_user
|
||||
except Exception:
|
||||
logger.exception("Encountered an error authenticating to LDAP")
|
||||
return None
|
||||
|
||||
@@ -1215,6 +1215,54 @@ register(
|
||||
placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# Generic OIDC AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_OIDC_KEY',
|
||||
field_class=fields.CharField,
|
||||
allow_null=False,
|
||||
default=None,
|
||||
label=_('OIDC Key'),
|
||||
help_text='The OIDC key (Client ID) from your IDP.',
|
||||
category=_('Generic OIDC'),
|
||||
category_slug='oidc',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_OIDC_SECRET',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('OIDC Secret'),
|
||||
help_text=_('The OIDC secret (Client Secret) from your IDP.'),
|
||||
category=_('Generic OIDC'),
|
||||
category_slug='oidc',
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_OIDC_OIDC_ENDPOINT',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('OIDC Provider URL'),
|
||||
help_text=_('The URL for your OIDC provider including the path up to /.well-known/openid-configuration'),
|
||||
category=_('Generic OIDC'),
|
||||
category_slug='oidc',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_OIDC_VERIFY_SSL',
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Verify OIDC Provider Certificate'),
|
||||
help_text=_('Verify the OIDV provider ssl certificate.'),
|
||||
category=_('Generic OIDC'),
|
||||
category_slug='oidc',
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# SAML AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
@@ -1535,12 +1583,12 @@ register(
|
||||
category_slug='saml',
|
||||
placeholder=[
|
||||
('is_superuser_attr', 'saml_attr'),
|
||||
('is_superuser_value', 'value'),
|
||||
('is_superuser_role', 'saml_role'),
|
||||
('is_superuser_value', ['value']),
|
||||
('is_superuser_role', ['saml_role']),
|
||||
('remove_superusers', True),
|
||||
('is_system_auditor_attr', 'saml_attr'),
|
||||
('is_system_auditor_value', 'value'),
|
||||
('is_system_auditor_role', 'saml_role'),
|
||||
('is_system_auditor_value', ['value']),
|
||||
('is_system_auditor_role', ['saml_role']),
|
||||
('remove_system_auditors', True),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -149,6 +149,7 @@ class AuthenticationBackendsField(fields.StringListField):
|
||||
('awx.sso.backends.RADIUSBackend', ['RADIUS_SERVER']),
|
||||
('social_core.backends.google.GoogleOAuth2', ['SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET']),
|
||||
('social_core.backends.github.GithubOAuth2', ['SOCIAL_AUTH_GITHUB_KEY', 'SOCIAL_AUTH_GITHUB_SECRET']),
|
||||
('social_core.backends.open_id_connect.OpenIdConnectAuth', ['SOCIAL_AUTH_OIDC_KEY', 'SOCIAL_AUTH_OIDC_SECRET', 'SOCIAL_AUTH_OIDC_OIDC_ENDPOINT']),
|
||||
(
|
||||
'social_core.backends.github.GithubOrganizationOAuth2',
|
||||
['SOCIAL_AUTH_GITHUB_ORG_KEY', 'SOCIAL_AUTH_GITHUB_ORG_SECRET', 'SOCIAL_AUTH_GITHUB_ORG_NAME'],
|
||||
@@ -741,12 +742,12 @@ class SAMLTeamAttrField(HybridDictField):
|
||||
class SAMLUserFlagsAttrField(HybridDictField):
|
||||
|
||||
is_superuser_attr = fields.CharField(required=False, allow_null=True)
|
||||
is_superuser_value = fields.CharField(required=False, allow_null=True)
|
||||
is_superuser_role = fields.CharField(required=False, allow_null=True)
|
||||
is_superuser_value = fields.StringListField(required=False, allow_null=True)
|
||||
is_superuser_role = fields.StringListField(required=False, allow_null=True)
|
||||
remove_superusers = fields.BooleanField(required=False, allow_null=True)
|
||||
is_system_auditor_attr = fields.CharField(required=False, allow_null=True)
|
||||
is_system_auditor_value = fields.CharField(required=False, allow_null=True)
|
||||
is_system_auditor_role = fields.CharField(required=False, allow_null=True)
|
||||
is_system_auditor_value = fields.StringListField(required=False, allow_null=True)
|
||||
is_system_auditor_role = fields.StringListField(required=False, allow_null=True)
|
||||
remove_system_auditors = fields.BooleanField(required=False, allow_null=True)
|
||||
|
||||
child = _Forbidden()
|
||||
|
||||
58
awx/sso/migrations/0003_convert_saml_string_to_list.py
Normal file
58
awx/sso/migrations/0003_convert_saml_string_to_list.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from django.db import migrations, connection
|
||||
import json
|
||||
|
||||
_values_to_change = ['is_superuser_value', 'is_superuser_role', 'is_system_auditor_value', 'is_system_auditor_role']
|
||||
|
||||
|
||||
def _get_setting():
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'SELECT value FROM conf_setting WHERE key= %s', ['SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR'])
|
||||
row = cursor.fetchone()
|
||||
if row == None:
|
||||
return {}
|
||||
existing_setting = row[0]
|
||||
|
||||
try:
|
||||
existing_json = json.loads(existing_setting)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
print("Failed to decode existing json setting:")
|
||||
print(existing_setting)
|
||||
raise e
|
||||
|
||||
return existing_json
|
||||
|
||||
|
||||
def _set_setting(value):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'UPDATE conf_setting SET value = %s WHERE key = %s', [json.dumps(value), 'SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR'])
|
||||
|
||||
|
||||
def forwards(app, schema_editor):
|
||||
# The Operation should use schema_editor to apply any changes it
|
||||
# wants to make to the database.
|
||||
existing_json = _get_setting()
|
||||
for key in _values_to_change:
|
||||
if existing_json.get(key, None) and isinstance(existing_json.get(key), str):
|
||||
existing_json[key] = [existing_json.get(key)]
|
||||
_set_setting(existing_json)
|
||||
|
||||
|
||||
def backwards(app, schema_editor):
|
||||
existing_json = _get_setting()
|
||||
for key in _values_to_change:
|
||||
if existing_json.get(key, None) and not isinstance(existing_json.get(key), str):
|
||||
try:
|
||||
existing_json[key] = existing_json.get(key).pop()
|
||||
except IndexError:
|
||||
existing_json[key] = ""
|
||||
_set_setting(existing_json)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('sso', '0002_expand_provider_options'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forwards, backwards),
|
||||
]
|
||||
@@ -250,7 +250,25 @@ def update_user_teams_by_saml_attr(backend, details, user=None, *args, **kwargs)
|
||||
[t.member_role.members.remove(user) for t in Team.objects.filter(Q(member_role__members=user) & ~Q(id__in=team_ids))]
|
||||
|
||||
|
||||
def _get_matches(list1, list2):
|
||||
# Because we are just doing an intersection here we don't really care which list is in which parameter
|
||||
|
||||
# A SAML provider could return either a string or a list of items so we need to coerce the SAML value into a list (if needed)
|
||||
if not isinstance(list1, (list, tuple)):
|
||||
list1 = [list1]
|
||||
|
||||
# In addition, we used to allow strings in the SAML config instead of Lists. The migration should take case of that but just in case, we will convert our list too
|
||||
if not isinstance(list2, (list, tuple)):
|
||||
list2 = [list2]
|
||||
|
||||
return set(list1).intersection(set(list2))
|
||||
|
||||
|
||||
def _check_flag(user, flag, attributes, user_flags_settings):
|
||||
'''
|
||||
Helper function to set the is_superuser is_system_auditor flags for the SAML adapter
|
||||
Returns the new flag and whether or not it changed the flag
|
||||
'''
|
||||
new_flag = False
|
||||
is_role_key = "is_%s_role" % (flag)
|
||||
is_attr_key = "is_%s_attr" % (flag)
|
||||
@@ -258,37 +276,35 @@ def _check_flag(user, flag, attributes, user_flags_settings):
|
||||
remove_setting = "remove_%ss" % (flag)
|
||||
|
||||
# Check to see if we are respecting a role and, if so, does our user have that role?
|
||||
role_setting = user_flags_settings.get(is_role_key, None)
|
||||
if role_setting:
|
||||
required_roles = user_flags_settings.get(is_role_key, None)
|
||||
if required_roles:
|
||||
matching_roles = _get_matches(required_roles, attributes.get('Role', []))
|
||||
|
||||
# We do a 2 layer check here so that we don't spit out the else message if there is no role defined
|
||||
if role_setting in attributes.get('Role', []):
|
||||
logger.debug("User %s has %s role %s" % (user.username, flag, role_setting))
|
||||
if matching_roles:
|
||||
logger.debug("User %s has %s role(s) %s" % (user.username, flag, ', '.join(matching_roles)))
|
||||
new_flag = True
|
||||
else:
|
||||
logger.debug("User %s is missing the %s role %s" % (user.username, flag, role_setting))
|
||||
logger.debug("User %s is missing the %s role(s) %s" % (user.username, flag, ', '.join(required_roles)))
|
||||
|
||||
# Next, check to see if we are respecting an attribute; this will take priority over the role if its defined
|
||||
attr_setting = user_flags_settings.get(is_attr_key, None)
|
||||
if attr_setting and attributes.get(attr_setting, None):
|
||||
# Do we have a required value for the attribute
|
||||
if user_flags_settings.get(is_value_key, None):
|
||||
required_value = user_flags_settings.get(is_value_key, None)
|
||||
if required_value:
|
||||
# If so, check and see if the value of the attr matches the required value
|
||||
attribute_value = attributes.get(attr_setting, None)
|
||||
attribute_matches = False
|
||||
if isinstance(attribute_value, (list, tuple)):
|
||||
if user_flags_settings.get(is_value_key) in attribute_value:
|
||||
attribute_matches = True
|
||||
elif attribute_value == user_flags_settings.get(is_value_key):
|
||||
attribute_matches = True
|
||||
saml_user_attribute_value = attributes.get(attr_setting, None)
|
||||
matching_values = _get_matches(required_value, saml_user_attribute_value)
|
||||
|
||||
if attribute_matches:
|
||||
logger.debug("Giving %s %s from attribute %s with matching value" % (user.username, flag, attr_setting))
|
||||
if matching_values:
|
||||
logger.debug("Giving %s %s from attribute %s with matching values %s" % (user.username, flag, attr_setting, ', '.join(matching_values)))
|
||||
new_flag = True
|
||||
# if they don't match make sure that new_flag is false
|
||||
else:
|
||||
logger.debug(
|
||||
"For %s on %s attr %s (%s) did not match expected value '%s'"
|
||||
% (flag, user.username, attr_setting, attribute_value, user_flags_settings.get(is_value_key))
|
||||
"Refusing %s for %s because attr %s (%s) did not match value(s) %s"
|
||||
% (flag, user.username, attr_setting, ", ".join(saml_user_attribute_value), ', '.join(required_value))
|
||||
)
|
||||
new_flag = False
|
||||
# If there was no required value then we can just allow them in because of the attribute
|
||||
|
||||
@@ -446,6 +446,10 @@ class TestSAMLUserFlags:
|
||||
(False, False),
|
||||
False,
|
||||
),
|
||||
# NOTE: The first handful of tests test role/value as string instead of lists.
|
||||
# This was from the initial implementation of these fields but the code should be able to handle this
|
||||
# There are a couple tests at the end of this which will validate arrays in these values.
|
||||
#
|
||||
# In this case we will give the user a group to make them an admin
|
||||
(
|
||||
{'is_superuser_role': 'test-role-1'},
|
||||
@@ -518,6 +522,30 @@ class TestSAMLUserFlags:
|
||||
(True, False),
|
||||
True,
|
||||
),
|
||||
# Positive test for multiple values for is_superuser_value
|
||||
(
|
||||
{'is_superuser_attr': 'is_superuser', 'is_superuser_value': ['junk', 'junk2', 'else', 'junk']},
|
||||
(True, True),
|
||||
False,
|
||||
),
|
||||
# Negative test for multiple values for is_superuser_value
|
||||
(
|
||||
{'is_superuser_attr': 'is_superuser', 'is_superuser_value': ['junk', 'junk2', 'junk']},
|
||||
(False, True),
|
||||
True,
|
||||
),
|
||||
# Positive test for multiple values of is_superuser_role
|
||||
(
|
||||
{'is_superuser_role': ['junk', 'junk2', 'something', 'junk']},
|
||||
(True, True),
|
||||
False,
|
||||
),
|
||||
# Negative test for multiple values of is_superuser_role
|
||||
(
|
||||
{'is_superuser_role': ['junk', 'junk2', 'junk']},
|
||||
(False, True),
|
||||
True,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test__check_flag(self, user_flags_settings, expected, is_superuser):
|
||||
|
||||
@@ -121,12 +121,12 @@ class TestSAMLUserFlagsAttrField:
|
||||
[
|
||||
{},
|
||||
{'is_superuser_attr': 'something'},
|
||||
{'is_superuser_value': 'value'},
|
||||
{'is_superuser_role': 'my_peeps'},
|
||||
{'is_superuser_value': ['value']},
|
||||
{'is_superuser_role': ['my_peeps']},
|
||||
{'remove_superusers': False},
|
||||
{'is_system_auditor_attr': 'something_else'},
|
||||
{'is_system_auditor_value': 'value2'},
|
||||
{'is_system_auditor_role': 'other_peeps'},
|
||||
{'is_system_auditor_value': ['value2']},
|
||||
{'is_system_auditor_role': ['other_peeps']},
|
||||
{'remove_system_auditors': False},
|
||||
],
|
||||
)
|
||||
@@ -147,7 +147,13 @@ class TestSAMLUserFlagsAttrField:
|
||||
'is_system_auditor_value': 'value2',
|
||||
'is_system_auditor_role': 'other_peeps',
|
||||
},
|
||||
{'junk': ['Invalid field.']},
|
||||
{
|
||||
'junk': ['Invalid field.'],
|
||||
'is_superuser_role': ['Expected a list of items but got type "str".'],
|
||||
'is_superuser_value': ['Expected a list of items but got type "str".'],
|
||||
'is_system_auditor_role': ['Expected a list of items but got type "str".'],
|
||||
'is_system_auditor_value': ['Expected a list of items but got type "str".'],
|
||||
},
|
||||
),
|
||||
(
|
||||
{
|
||||
|
||||
90
awx/ui/package-lock.json
generated
90
awx/ui/package-lock.json
generated
@@ -11,26 +11,26 @@
|
||||
"@patternfly/react-core": "^4.221.3",
|
||||
"@patternfly/react-icons": "4.75.1",
|
||||
"@patternfly/react-table": "4.93.1",
|
||||
"ace-builds": "^1.6.0",
|
||||
"ace-builds": "^1.8.1",
|
||||
"ansi-to-html": "0.7.2",
|
||||
"axios": "0.27.2",
|
||||
"codemirror": "^6.0.1",
|
||||
"d3": "7.4.4",
|
||||
"dagre": "^0.8.4",
|
||||
"dompurify": "2.3.8",
|
||||
"dompurify": "2.3.10",
|
||||
"formik": "2.2.9",
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^2.4.0",
|
||||
"prop-types": "^15.6.2",
|
||||
"luxon": "^3.0.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
"react-dom": "17.0.2",
|
||||
"react-error-boundary": "^3.1.4",
|
||||
"react-router-dom": "^5.3.3",
|
||||
"react-virtualized": "^9.21.1",
|
||||
"rrule": "2.7.0",
|
||||
"rrule": "2.7.1",
|
||||
"styled-components": "5.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -5249,9 +5249,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/ace-builds": {
|
||||
"version": "1.6.0",
|
||||
"resolved": "https://registry.npmjs.org/ace-builds/-/ace-builds-1.6.0.tgz",
|
||||
"integrity": "sha512-qdkx965G/TA12IK7Zk+iCVDtA9wvhxIGivGc2rsID4UYbY2Bpatwep3ZrBZwj1IB2miU6FodDMqM9Kc1lqDlLg=="
|
||||
"version": "1.8.1",
|
||||
"resolved": "https://registry.npmjs.org/ace-builds/-/ace-builds-1.8.1.tgz",
|
||||
"integrity": "sha512-wjEQ4khMQYg9FfdEDoOtqdoHwcwFL48H0VB3te5b5A3eqHwxsTw8IX6+xzfisgborIb8dYU+1y9tcmtGFrCPIg=="
|
||||
},
|
||||
"node_modules/acorn": {
|
||||
"version": "7.4.1",
|
||||
@@ -8271,9 +8271,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/dompurify": {
|
||||
"version": "2.3.8",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.3.8.tgz",
|
||||
"integrity": "sha512-eVhaWoVibIzqdGYjwsBWodIQIaXFSB+cKDf4cfxLMsK0xiud6SE+/WCVx/Xw/UwQsa4cS3T2eITcdtmTg2UKcw=="
|
||||
"version": "2.3.10",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.3.10.tgz",
|
||||
"integrity": "sha512-o7Fg/AgC7p/XpKjf/+RC3Ok6k4St5F7Q6q6+Nnm3p2zGWioAY6dh0CbbuwOhH2UcSzKsdniE/YnE2/92JcsA+g=="
|
||||
},
|
||||
"node_modules/domutils": {
|
||||
"version": "2.8.0",
|
||||
@@ -15448,9 +15448,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/luxon": {
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-2.4.0.tgz",
|
||||
"integrity": "sha512-w+NAwWOUL5hO0SgwOHsMBAmZ15SoknmQXhSO0hIbJCAmPKSsGeK8MlmhYh2w6Iib38IxN2M+/ooXWLbeis7GuA==",
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.1.tgz",
|
||||
"integrity": "sha512-hF3kv0e5gwHQZKz4wtm4c+inDtyc7elkanAsBq+fundaCdUBNJB1dHEGUZIM6SfSBUlbVFduPwEtNjFK8wLtcw==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
@@ -17829,13 +17829,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/prop-types": {
|
||||
"version": "15.7.2",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
|
||||
"integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
|
||||
"version": "15.8.1",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
|
||||
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
|
||||
"dependencies": {
|
||||
"loose-envify": "^1.4.0",
|
||||
"object-assign": "^4.1.1",
|
||||
"react-is": "^16.8.1"
|
||||
"react-is": "^16.13.1"
|
||||
}
|
||||
},
|
||||
"node_modules/prop-types-exact": {
|
||||
@@ -19414,13 +19414,18 @@
|
||||
}
|
||||
},
|
||||
"node_modules/rrule": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/rrule/-/rrule-2.7.0.tgz",
|
||||
"integrity": "sha512-PnSvdJLHrETO4qQxm9nlDvSxNfbPdDFbgdz2BSHXTP+IzHbdwSNvTHOeN0O9khiy91GjzWXyiVJhnPDOQvejNg==",
|
||||
"version": "2.7.1",
|
||||
"resolved": "https://registry.npmjs.org/rrule/-/rrule-2.7.1.tgz",
|
||||
"integrity": "sha512-4p20u/1U7WqR3Nb1hOUrm0u1nSI7sO93ZUVZEZ5HeF6Gr5OlJuyhwEGRvUHq8ZfrPsq5gfa5b9dqnUs/kPqpIw==",
|
||||
"dependencies": {
|
||||
"tslib": "^1.10.0"
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/rrule/node_modules/tslib": {
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz",
|
||||
"integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
|
||||
},
|
||||
"node_modules/rst-selector-parser": {
|
||||
"version": "2.2.3",
|
||||
"resolved": "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz",
|
||||
@@ -26302,9 +26307,9 @@
|
||||
}
|
||||
},
|
||||
"ace-builds": {
|
||||
"version": "1.6.0",
|
||||
"resolved": "https://registry.npmjs.org/ace-builds/-/ace-builds-1.6.0.tgz",
|
||||
"integrity": "sha512-qdkx965G/TA12IK7Zk+iCVDtA9wvhxIGivGc2rsID4UYbY2Bpatwep3ZrBZwj1IB2miU6FodDMqM9Kc1lqDlLg=="
|
||||
"version": "1.8.1",
|
||||
"resolved": "https://registry.npmjs.org/ace-builds/-/ace-builds-1.8.1.tgz",
|
||||
"integrity": "sha512-wjEQ4khMQYg9FfdEDoOtqdoHwcwFL48H0VB3te5b5A3eqHwxsTw8IX6+xzfisgborIb8dYU+1y9tcmtGFrCPIg=="
|
||||
},
|
||||
"acorn": {
|
||||
"version": "7.4.1",
|
||||
@@ -28656,9 +28661,9 @@
|
||||
}
|
||||
},
|
||||
"dompurify": {
|
||||
"version": "2.3.8",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.3.8.tgz",
|
||||
"integrity": "sha512-eVhaWoVibIzqdGYjwsBWodIQIaXFSB+cKDf4cfxLMsK0xiud6SE+/WCVx/Xw/UwQsa4cS3T2eITcdtmTg2UKcw=="
|
||||
"version": "2.3.10",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-2.3.10.tgz",
|
||||
"integrity": "sha512-o7Fg/AgC7p/XpKjf/+RC3Ok6k4St5F7Q6q6+Nnm3p2zGWioAY6dh0CbbuwOhH2UcSzKsdniE/YnE2/92JcsA+g=="
|
||||
},
|
||||
"domutils": {
|
||||
"version": "2.8.0",
|
||||
@@ -34178,9 +34183,9 @@
|
||||
}
|
||||
},
|
||||
"luxon": {
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-2.4.0.tgz",
|
||||
"integrity": "sha512-w+NAwWOUL5hO0SgwOHsMBAmZ15SoknmQXhSO0hIbJCAmPKSsGeK8MlmhYh2w6Iib38IxN2M+/ooXWLbeis7GuA=="
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.1.tgz",
|
||||
"integrity": "sha512-hF3kv0e5gwHQZKz4wtm4c+inDtyc7elkanAsBq+fundaCdUBNJB1dHEGUZIM6SfSBUlbVFduPwEtNjFK8wLtcw=="
|
||||
},
|
||||
"lz-string": {
|
||||
"version": "1.4.4",
|
||||
@@ -35880,13 +35885,13 @@
|
||||
}
|
||||
},
|
||||
"prop-types": {
|
||||
"version": "15.7.2",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.7.2.tgz",
|
||||
"integrity": "sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ==",
|
||||
"version": "15.8.1",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
|
||||
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
|
||||
"requires": {
|
||||
"loose-envify": "^1.4.0",
|
||||
"object-assign": "^4.1.1",
|
||||
"react-is": "^16.8.1"
|
||||
"react-is": "^16.13.1"
|
||||
}
|
||||
},
|
||||
"prop-types-exact": {
|
||||
@@ -37007,11 +37012,18 @@
|
||||
}
|
||||
},
|
||||
"rrule": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/rrule/-/rrule-2.7.0.tgz",
|
||||
"integrity": "sha512-PnSvdJLHrETO4qQxm9nlDvSxNfbPdDFbgdz2BSHXTP+IzHbdwSNvTHOeN0O9khiy91GjzWXyiVJhnPDOQvejNg==",
|
||||
"version": "2.7.1",
|
||||
"resolved": "https://registry.npmjs.org/rrule/-/rrule-2.7.1.tgz",
|
||||
"integrity": "sha512-4p20u/1U7WqR3Nb1hOUrm0u1nSI7sO93ZUVZEZ5HeF6Gr5OlJuyhwEGRvUHq8ZfrPsq5gfa5b9dqnUs/kPqpIw==",
|
||||
"requires": {
|
||||
"tslib": "^1.10.0"
|
||||
"tslib": "^2.4.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"tslib": {
|
||||
"version": "2.4.0",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz",
|
||||
"integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"rst-selector-parser": {
|
||||
|
||||
@@ -11,26 +11,26 @@
|
||||
"@patternfly/react-core": "^4.221.3",
|
||||
"@patternfly/react-icons": "4.75.1",
|
||||
"@patternfly/react-table": "4.93.1",
|
||||
"ace-builds": "^1.6.0",
|
||||
"ace-builds": "^1.8.1",
|
||||
"ansi-to-html": "0.7.2",
|
||||
"axios": "0.27.2",
|
||||
"codemirror": "^6.0.1",
|
||||
"d3": "7.4.4",
|
||||
"dagre": "^0.8.4",
|
||||
"dompurify": "2.3.8",
|
||||
"dompurify": "2.3.10",
|
||||
"formik": "2.2.9",
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^2.4.0",
|
||||
"prop-types": "^15.6.2",
|
||||
"luxon": "^3.0.1",
|
||||
"prop-types": "^15.8.1",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^10.1.0",
|
||||
"react-dom": "17.0.2",
|
||||
"react-error-boundary": "^3.1.4",
|
||||
"react-router-dom": "^5.3.3",
|
||||
"react-virtualized": "^9.21.1",
|
||||
"rrule": "2.7.0",
|
||||
"rrule": "2.7.1",
|
||||
"styled-components": "5.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -2,8 +2,6 @@ import React from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import { withFormik, useFormikContext } from 'formik';
|
||||
import PropTypes from 'prop-types';
|
||||
|
||||
import { VERBOSITY } from 'components/VerbositySelectField';
|
||||
import Wizard from '../Wizard';
|
||||
import useAdHocLaunchSteps from './useAdHocLaunchSteps';
|
||||
|
||||
@@ -62,7 +60,7 @@ const FormikApp = withFormik({
|
||||
limit: adHocItemStrings || 'all',
|
||||
credentials: [],
|
||||
module_args: '',
|
||||
verbosity: VERBOSITY()[0],
|
||||
verbosity: 0,
|
||||
forks: 0,
|
||||
diff_mode: false,
|
||||
become_enabled: '',
|
||||
|
||||
@@ -20,12 +20,7 @@ function NavExpandableGroup(props) {
|
||||
if (routes.length === 1 && groupId === 'settings') {
|
||||
const [{ path }] = routes;
|
||||
return (
|
||||
<NavItem
|
||||
itemId={groupId}
|
||||
isActive={isActivePath(path)}
|
||||
key={path}
|
||||
// ouiaId={path}
|
||||
>
|
||||
<NavItem itemId={groupId} isActive={isActivePath(path)} key={path}>
|
||||
<Link to={path}>{groupTitle}</Link>
|
||||
</NavItem>
|
||||
);
|
||||
@@ -40,12 +35,7 @@ function NavExpandableGroup(props) {
|
||||
title={groupTitle}
|
||||
>
|
||||
{routes.map(({ path, title }) => (
|
||||
<NavItem
|
||||
groupId={groupId}
|
||||
isActive={isActivePath(path)}
|
||||
key={path}
|
||||
// ouiaId={path}
|
||||
>
|
||||
<NavItem groupId={groupId} isActive={isActivePath(path)} key={path}>
|
||||
<Link to={path}>{title}</Link>
|
||||
</NavItem>
|
||||
))}
|
||||
|
||||
@@ -24,6 +24,7 @@ const CardBody = styled(PFCardBody)`
|
||||
|
||||
const Expandable = styled(PFExpandable)`
|
||||
text-align: left;
|
||||
max-width: 75vw;
|
||||
|
||||
& .pf-c-expandable__toggle {
|
||||
padding-left: 10px;
|
||||
@@ -54,7 +55,7 @@ function ErrorDetail({ error }) {
|
||||
{response?.config?.method.toUpperCase()} {response?.config?.url}{' '}
|
||||
<strong>{response?.status}</strong>
|
||||
</CardBody>
|
||||
<CardBody>
|
||||
<CardBody css="max-width: 70vw">
|
||||
{Array.isArray(message) ? (
|
||||
<ul>
|
||||
{message.map((m) =>
|
||||
@@ -70,9 +71,16 @@ function ErrorDetail({ error }) {
|
||||
};
|
||||
|
||||
const renderStack = () => (
|
||||
<CardBody css="white-space: pre; font-family: var(--pf-global--FontFamily--monospace)">
|
||||
{error.stack}
|
||||
</CardBody>
|
||||
<>
|
||||
<CardBody>
|
||||
<strong>
|
||||
{error.name}: {error.message}
|
||||
</strong>
|
||||
</CardBody>
|
||||
<CardBody css="white-space: pre; font-family: var(--pf-global--FontFamily--monospace)">
|
||||
{error.stack}
|
||||
</CardBody>
|
||||
</>
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@@ -10,6 +10,11 @@ export const FormColumnLayout = styled.div`
|
||||
@media (min-width: 1210px) {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
|
||||
${(props) =>
|
||||
props.stacked &&
|
||||
`border-bottom: 1px solid var(--pf-global--BorderColor--100);
|
||||
padding: var(--pf-global--spacer--sm) 0 var(--pf-global--spacer--md) `}
|
||||
`;
|
||||
|
||||
export const FormFullWidthLayout = styled.div`
|
||||
|
||||
@@ -15,46 +15,61 @@ function JobCancelButton({
|
||||
buttonText,
|
||||
style = {},
|
||||
job = {},
|
||||
isDisabled,
|
||||
tooltip,
|
||||
cancelationMessage,
|
||||
onCancelWorkflow,
|
||||
}) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const { error: cancelError, request: cancelJob } = useRequest(
|
||||
useCallback(async () => {
|
||||
setIsOpen(false);
|
||||
await getJobModel(job.type).cancel(job.id);
|
||||
}, [job.id, job.type]),
|
||||
|
||||
if (onCancelWorkflow) {
|
||||
onCancelWorkflow();
|
||||
}
|
||||
}, [job.id, job.type, onCancelWorkflow]),
|
||||
{}
|
||||
);
|
||||
const { error, dismissError: dismissCancelError } =
|
||||
useDismissableError(cancelError);
|
||||
|
||||
const isAlreadyCancelled = cancelError?.response?.status === 405;
|
||||
|
||||
const renderTooltip = () => {
|
||||
if (tooltip) {
|
||||
return tooltip;
|
||||
}
|
||||
return isAlreadyCancelled ? null : title;
|
||||
};
|
||||
return (
|
||||
<>
|
||||
<Tooltip content={isAlreadyCancelled ? null : title}>
|
||||
{showIconButton ? (
|
||||
<Button
|
||||
isDisabled={isAlreadyCancelled}
|
||||
aria-label={title}
|
||||
ouiaId="cancel-job-button"
|
||||
onClick={() => setIsOpen(true)}
|
||||
variant="plain"
|
||||
style={style}
|
||||
>
|
||||
<MinusCircleIcon />
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
isDisabled={isAlreadyCancelled}
|
||||
aria-label={title}
|
||||
variant="secondary"
|
||||
ouiaId="cancel-job-button"
|
||||
onClick={() => setIsOpen(true)}
|
||||
style={style}
|
||||
>
|
||||
{buttonText || t`Cancel Job`}
|
||||
</Button>
|
||||
)}
|
||||
<Tooltip content={renderTooltip()}>
|
||||
<div>
|
||||
{showIconButton ? (
|
||||
<Button
|
||||
isDisabled={isDisabled || isAlreadyCancelled}
|
||||
aria-label={title}
|
||||
ouiaId="cancel-job-button"
|
||||
onClick={() => setIsOpen(true)}
|
||||
variant="plain"
|
||||
style={style}
|
||||
>
|
||||
<MinusCircleIcon />
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
isDisabled={isDisabled || isAlreadyCancelled}
|
||||
aria-label={title}
|
||||
variant="secondary"
|
||||
ouiaId="cancel-job-button"
|
||||
onClick={() => setIsOpen(true)}
|
||||
style={style}
|
||||
>
|
||||
{buttonText || t`Cancel Job`}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</Tooltip>
|
||||
{isOpen && (
|
||||
<AlertModal
|
||||
@@ -86,7 +101,7 @@ function JobCancelButton({
|
||||
</Button>,
|
||||
]}
|
||||
>
|
||||
{t`Are you sure you want to cancel this job?`}
|
||||
{cancelationMessage ?? t`Are you sure you want to cancel this job?`}
|
||||
</AlertModal>
|
||||
)}
|
||||
{error && !isAlreadyCancelled && (
|
||||
|
||||
@@ -1,7 +1,22 @@
|
||||
import React from 'react';
|
||||
import { shape, string, number, arrayOf, node, oneOfType } from 'prop-types';
|
||||
import { Tab, Tabs, TabTitleText } from '@patternfly/react-core';
|
||||
import {
|
||||
Tab as PFTab,
|
||||
Tabs as PFTabs,
|
||||
TabTitleText,
|
||||
} from '@patternfly/react-core';
|
||||
import { useHistory, useLocation } from 'react-router-dom';
|
||||
import styled from 'styled-components';
|
||||
|
||||
const Tabs = styled(PFTabs)`
|
||||
& > ul {
|
||||
flex-grow: 1;
|
||||
}
|
||||
`;
|
||||
|
||||
const Tab = styled(PFTab)`
|
||||
${(props) => props.hasstyle && `${props.hasstyle}`}
|
||||
`;
|
||||
|
||||
function RoutedTabs({ tabsArray }) {
|
||||
const history = useHistory();
|
||||
@@ -31,7 +46,6 @@ function RoutedTabs({ tabsArray }) {
|
||||
history.push(link);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Tabs
|
||||
activeKey={getActiveTabId()}
|
||||
@@ -43,10 +57,11 @@ function RoutedTabs({ tabsArray }) {
|
||||
aria-label={typeof tab.name === 'string' ? tab.name : null}
|
||||
eventKey={tab.id}
|
||||
key={tab.id}
|
||||
href={`#${tab.link}`}
|
||||
href={!tab.hasstyle && `#${tab.link}`}
|
||||
title={<TabTitleText>{tab.name}</TabTitleText>}
|
||||
aria-controls=""
|
||||
ouiaId={`${tab.name}-tab`}
|
||||
hasstyle={tab.hasstyle}
|
||||
/>
|
||||
))}
|
||||
</Tabs>
|
||||
@@ -57,7 +72,6 @@ RoutedTabs.propTypes = {
|
||||
tabsArray: arrayOf(
|
||||
shape({
|
||||
id: number.isRequired,
|
||||
link: string.isRequired,
|
||||
name: oneOfType([string.isRequired, node.isRequired]),
|
||||
})
|
||||
).isRequired,
|
||||
|
||||
@@ -2,7 +2,6 @@ import React, { useState } from 'react';
|
||||
import { func, shape } from 'prop-types';
|
||||
|
||||
import { useHistory, useLocation } from 'react-router-dom';
|
||||
import { RRule } from 'rrule';
|
||||
import { Card } from '@patternfly/react-core';
|
||||
import yaml from 'js-yaml';
|
||||
import { parseVariableField } from 'util/yaml';
|
||||
@@ -12,7 +11,7 @@ import mergeExtraVars from 'util/prompt/mergeExtraVars';
|
||||
import getSurveyValues from 'util/prompt/getSurveyValues';
|
||||
import { getAddedAndRemoved } from 'util/lists';
|
||||
import ScheduleForm from '../shared/ScheduleForm';
|
||||
import buildRuleObj from '../shared/buildRuleObj';
|
||||
import buildRuleSet from '../shared/buildRuleSet';
|
||||
import { CardBody } from '../../Card';
|
||||
|
||||
function ScheduleAdd({
|
||||
@@ -36,21 +35,12 @@ function ScheduleAdd({
|
||||
) => {
|
||||
const {
|
||||
inventory,
|
||||
extra_vars,
|
||||
originalCredentials,
|
||||
end,
|
||||
frequency,
|
||||
interval,
|
||||
frequencyOptions,
|
||||
exceptionFrequency,
|
||||
exceptionOptions,
|
||||
timezone,
|
||||
occurrences,
|
||||
runOn,
|
||||
runOnTheDay,
|
||||
runOnTheMonth,
|
||||
runOnDayMonth,
|
||||
runOnDayNumber,
|
||||
runOnTheOccurrence,
|
||||
credentials,
|
||||
daysOfWeek,
|
||||
...submitValues
|
||||
} = values;
|
||||
const { added } = getAddedAndRemoved(
|
||||
@@ -83,11 +73,13 @@ function ScheduleAdd({
|
||||
}
|
||||
|
||||
try {
|
||||
const rule = new RRule(buildRuleObj(values));
|
||||
const ruleSet = buildRuleSet(values);
|
||||
const requestData = {
|
||||
...submitValues,
|
||||
rrule: rule.toString().replace(/\n/g, ' '),
|
||||
rrule: ruleSet.toString().replace(/\n/g, ' '),
|
||||
};
|
||||
delete requestData.startDate;
|
||||
delete requestData.startTime;
|
||||
|
||||
if (Object.keys(values).includes('daysToKeep')) {
|
||||
if (requestData.extra_data) {
|
||||
@@ -98,10 +90,6 @@ function ScheduleAdd({
|
||||
});
|
||||
}
|
||||
}
|
||||
delete requestData.startDate;
|
||||
delete requestData.startTime;
|
||||
delete requestData.endDate;
|
||||
delete requestData.endTime;
|
||||
|
||||
const {
|
||||
data: { id: scheduleId },
|
||||
|
||||
@@ -80,9 +80,7 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
frequency: [],
|
||||
name: 'Run once schedule',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:00 AM',
|
||||
@@ -98,15 +96,19 @@ describe('<ScheduleAdd />', () => {
|
||||
});
|
||||
});
|
||||
|
||||
test('Successfully creates a schedule with 10 minute repeat frequency after 10 occurrences', async () => {
|
||||
test('Successfully creates a schedule with 10 minute repeat frequency and 10 occurrences', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'after',
|
||||
frequency: 'minute',
|
||||
interval: 10,
|
||||
frequency: ['minute'],
|
||||
frequencyOptions: {
|
||||
minute: {
|
||||
end: 'after',
|
||||
interval: 10,
|
||||
occurrences: 10,
|
||||
},
|
||||
},
|
||||
name: 'Run every 10 minutes 10 times',
|
||||
occurrences: 10,
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:30 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -125,11 +127,15 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'onDate',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '10:45 AM',
|
||||
frequency: 'hour',
|
||||
interval: 1,
|
||||
frequency: ['hour'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
end: 'onDate',
|
||||
interval: 1,
|
||||
endDate: '2020-03-26',
|
||||
endTime: '10:45 AM',
|
||||
},
|
||||
},
|
||||
name: 'Run every hour until date',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
@@ -141,7 +147,7 @@ describe('<ScheduleAdd />', () => {
|
||||
name: 'Run every hour until date',
|
||||
extra_data: {},
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T104500',
|
||||
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T144500Z',
|
||||
});
|
||||
});
|
||||
|
||||
@@ -149,9 +155,13 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'day',
|
||||
interval: 1,
|
||||
frequency: ['day'],
|
||||
frequencyOptions: {
|
||||
day: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run daily',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
@@ -170,13 +180,17 @@ describe('<ScheduleAdd />', () => {
|
||||
test('Successfully creates a schedule with weekly repeat frequency on mon/wed/fri', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
daysOfWeek: [RRule.MO, RRule.WE, RRule.FR],
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'week',
|
||||
interval: 1,
|
||||
frequency: ['week'],
|
||||
frequencyOptions: {
|
||||
week: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
daysOfWeek: [RRule.MO, RRule.WE, RRule.FR],
|
||||
},
|
||||
},
|
||||
name: 'Run weekly on mon/wed/fri',
|
||||
occurrences: 1,
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -194,13 +208,17 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'month',
|
||||
interval: 1,
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
interval: 1,
|
||||
runOn: 'day',
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run on the first day of the month',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayNumber: 1,
|
||||
startTime: '10:45 AM',
|
||||
startDate: '2020-04-01',
|
||||
timezone: 'America/New_York',
|
||||
@@ -219,16 +237,20 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '11:00 AM',
|
||||
frequency: 'month',
|
||||
interval: 1,
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
end: 'never',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '11:00 AM',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheDay: 'tuesday',
|
||||
runOnTheOccurrence: -1,
|
||||
},
|
||||
},
|
||||
name: 'Run monthly on the last Tuesday',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheDay: 'tuesday',
|
||||
runOnTheOccurrence: -1,
|
||||
startDate: '2020-03-31',
|
||||
startTime: '11:00 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -242,18 +264,23 @@ describe('<ScheduleAdd />', () => {
|
||||
'DTSTART;TZID=America/New_York:20200331T110000 RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=-1;BYDAY=TU',
|
||||
});
|
||||
});
|
||||
|
||||
test('Successfully creates a schedule with yearly repeat frequency on the first day of March', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 3,
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the first day of March',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 3,
|
||||
runOnDayNumber: 1,
|
||||
startDate: '2020-03-01',
|
||||
startTime: '12:00 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -272,15 +299,19 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'friday',
|
||||
runOnTheMonth: 4,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the second Friday in April',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'friday',
|
||||
runOnTheMonth: 4,
|
||||
startDate: '2020-04-10',
|
||||
startTime: '11:15 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -299,15 +330,19 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'weekday',
|
||||
runOnTheMonth: 10,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the first weekday in October',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'weekday',
|
||||
runOnTheMonth: 10,
|
||||
startDate: '2020-04-10',
|
||||
startTime: '11:15 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -376,17 +411,7 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
name: 'Schedule',
|
||||
end: 'never',
|
||||
endDate: '2021-01-29',
|
||||
endTime: '2:15 PM',
|
||||
frequency: 'none',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 1,
|
||||
runOnDayNumber: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnTheMonth: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
frequency: [],
|
||||
skip_tags: '',
|
||||
inventory: { name: 'inventory', id: 45 },
|
||||
credentials: [
|
||||
@@ -405,7 +430,7 @@ describe('<ScheduleAdd />', () => {
|
||||
inventory: 45,
|
||||
name: 'Schedule',
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20210128T141500 RRULE:COUNT=1;FREQ=MINUTELY',
|
||||
'DTSTART;TZID=America/New_York:20210128T141500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
|
||||
skip_tags: '',
|
||||
});
|
||||
expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 10);
|
||||
@@ -462,9 +487,7 @@ describe('<ScheduleAdd />', () => {
|
||||
await act(async () => {
|
||||
scheduleSurveyWrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
frequency: [],
|
||||
name: 'Run once schedule',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:00 AM',
|
||||
|
||||
@@ -0,0 +1,218 @@
|
||||
import React from 'react';
|
||||
import styled from 'styled-components';
|
||||
import { t, Plural, SelectOrdinal } from '@lingui/macro';
|
||||
import { DateTime } from 'luxon';
|
||||
import { formatDateString } from 'util/dates';
|
||||
import { DetailList, Detail } from '../../DetailList';
|
||||
|
||||
const Label = styled.div`
|
||||
margin-bottom: var(--pf-global--spacer--sm);
|
||||
font-weight: var(--pf-global--FontWeight--bold);
|
||||
`;
|
||||
|
||||
export default function FrequencyDetails({ type, label, options, timezone }) {
|
||||
const getRunEveryLabel = () => {
|
||||
const { interval } = options;
|
||||
switch (type) {
|
||||
case 'minute':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} minute"
|
||||
other="{interval} minutes"
|
||||
/>
|
||||
);
|
||||
case 'hour':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} hour"
|
||||
other="{interval} hours"
|
||||
/>
|
||||
);
|
||||
case 'day':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} day"
|
||||
other="{interval} days"
|
||||
/>
|
||||
);
|
||||
case 'week':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} week"
|
||||
other="{interval} weeks"
|
||||
/>
|
||||
);
|
||||
case 'month':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} month"
|
||||
other="{interval} months"
|
||||
/>
|
||||
);
|
||||
case 'year':
|
||||
return (
|
||||
<Plural
|
||||
value={interval}
|
||||
one="{interval} year"
|
||||
other="{interval} years"
|
||||
/>
|
||||
);
|
||||
default:
|
||||
throw new Error(t`Frequency did not match an expected value`);
|
||||
}
|
||||
};
|
||||
|
||||
const weekdays = {
|
||||
0: t`Monday`,
|
||||
1: t`Tuesday`,
|
||||
2: t`Wednesday`,
|
||||
3: t`Thursday`,
|
||||
4: t`Friday`,
|
||||
5: t`Saturday`,
|
||||
6: t`Sunday`,
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<Label>{label}</Label>
|
||||
<DetailList gutter="sm">
|
||||
<Detail label={t`Run every`} value={getRunEveryLabel()} />
|
||||
{type === 'week' ? (
|
||||
<Detail
|
||||
label={t`On days`}
|
||||
value={options.daysOfWeek
|
||||
.sort(sortWeekday)
|
||||
.map((d) => weekdays[d.weekday])
|
||||
.join(', ')}
|
||||
/>
|
||||
) : null}
|
||||
<RunOnDetail type={type} options={options} />
|
||||
<Detail label={t`End`} value={getEndValue(type, options, timezone)} />
|
||||
</DetailList>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function sortWeekday(a, b) {
|
||||
if (a.weekday === 6) return -1;
|
||||
if (b.weekday === 6) return 1;
|
||||
return a.weekday - b.weekday;
|
||||
}
|
||||
|
||||
function RunOnDetail({ type, options }) {
|
||||
if (type === 'month') {
|
||||
if (options.runOn === 'day') {
|
||||
return (
|
||||
<Detail label={t`Run on`} value={t`Day ${options.runOnDayNumber}`} />
|
||||
);
|
||||
}
|
||||
const dayOfWeek = options.runOnTheDay;
|
||||
return (
|
||||
<Detail
|
||||
label={t`Run on`}
|
||||
value={
|
||||
options.runOnDayNumber === -1 ? (
|
||||
t`The last ${dayOfWeek}`
|
||||
) : (
|
||||
<SelectOrdinal
|
||||
value={options.runOnDayNumber}
|
||||
one={`The first ${dayOfWeek}`}
|
||||
two={`The second ${dayOfWeek}`}
|
||||
_3={`The third ${dayOfWeek}`}
|
||||
_4={`The fourth ${dayOfWeek}`}
|
||||
_5={`The fifth ${dayOfWeek}`}
|
||||
/>
|
||||
)
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
if (type === 'year') {
|
||||
const months = {
|
||||
1: t`January`,
|
||||
2: t`February`,
|
||||
3: t`March`,
|
||||
4: t`April`,
|
||||
5: t`May`,
|
||||
6: t`June`,
|
||||
7: t`July`,
|
||||
8: t`August`,
|
||||
9: t`September`,
|
||||
10: t`October`,
|
||||
11: t`November`,
|
||||
12: t`December`,
|
||||
};
|
||||
if (options.runOn === 'day') {
|
||||
return (
|
||||
<Detail
|
||||
label={t`Run on`}
|
||||
value={`${months[options.runOnTheMonth]} ${options.runOnDayMonth}`}
|
||||
/>
|
||||
);
|
||||
}
|
||||
const weekdays = {
|
||||
sunday: t`Sunday`,
|
||||
monday: t`Monday`,
|
||||
tuesday: t`Tuesday`,
|
||||
wednesday: t`Wednesday`,
|
||||
thursday: t`Thursday`,
|
||||
friday: t`Friday`,
|
||||
saturday: t`Saturday`,
|
||||
day: t`day`,
|
||||
weekday: t`weekday`,
|
||||
weekendDay: t`weekend day`,
|
||||
};
|
||||
const weekday = weekdays[options.runOnTheDay];
|
||||
const month = months[options.runOnTheMonth];
|
||||
return (
|
||||
<Detail
|
||||
label={t`Run on`}
|
||||
value={
|
||||
options.runOnTheOccurrence === -1 ? (
|
||||
t`The last ${weekday} of ${month}`
|
||||
) : (
|
||||
<SelectOrdinal
|
||||
value={options.runOnTheOccurrence}
|
||||
one={`The first ${weekday} of ${month}`}
|
||||
two={`The second ${weekday} of ${month}`}
|
||||
_3={`The third ${weekday} of ${month}`}
|
||||
_4={`The fourth ${weekday} of ${month}`}
|
||||
_5={`The fifth ${weekday} of ${month}`}
|
||||
/>
|
||||
)
|
||||
}
|
||||
/>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function getEndValue(type, options, timezone) {
|
||||
if (options.end === 'never') {
|
||||
return t`Never`;
|
||||
}
|
||||
if (options.end === 'after') {
|
||||
const numOccurrences = options.occurrences;
|
||||
return (
|
||||
<Plural
|
||||
value={numOccurrences}
|
||||
one="After {numOccurrences} occurrence"
|
||||
other="After {numOccurrences} occurrences"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const date = DateTime.fromFormat(
|
||||
`${options.endDate} ${options.endTime}`,
|
||||
'yyyy-MM-dd h:mm a',
|
||||
{
|
||||
zone: timezone,
|
||||
}
|
||||
);
|
||||
return formatDateString(date, timezone);
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
import 'styled-components/macro';
|
||||
import React, { useCallback, useEffect } from 'react';
|
||||
import { Link, useHistory, useLocation } from 'react-router-dom';
|
||||
import { RRule, rrulestr } from 'rrule';
|
||||
import styled from 'styled-components';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
@@ -12,6 +11,8 @@ import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api';
|
||||
import { parseVariableField, jsonToYaml } from 'util/yaml';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import parseRuleObj from '../shared/parseRuleObj';
|
||||
import FrequencyDetails from './FrequencyDetails';
|
||||
import AlertModal from '../../AlertModal';
|
||||
import { CardBody, CardActionsRow } from '../../Card';
|
||||
import ContentError from '../../ContentError';
|
||||
@@ -41,6 +42,26 @@ const PromptTitle = styled(Title)`
|
||||
const PromptDetailList = styled(DetailList)`
|
||||
padding: 0px 20px;
|
||||
`;
|
||||
|
||||
const FrequencyDetailsContainer = styled.div`
|
||||
background-color: var(--pf-global--palette--black-150);
|
||||
margin-top: var(--pf-global--spacer--lg);
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
margin-right: calc(var(--pf-c-card--child--PaddingRight) * -1);
|
||||
margin-left: calc(var(--pf-c-card--child--PaddingLeft) * -1);
|
||||
padding: var(--pf-c-card--child--PaddingRight);
|
||||
|
||||
& > p {
|
||||
margin-bottom: var(--pf-global--spacer--md);
|
||||
}
|
||||
|
||||
& > *:not(:first-child):not(:last-child) {
|
||||
margin-bottom: var(--pf-global--spacer--md);
|
||||
padding-bottom: var(--pf-global--spacer--md);
|
||||
border-bottom: 1px solid var(--pf-global--palette--black-300);
|
||||
}
|
||||
`;
|
||||
|
||||
function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
|
||||
const {
|
||||
id,
|
||||
@@ -132,19 +153,18 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
|
||||
fetchCredentialsAndPreview();
|
||||
}, [fetchCredentialsAndPreview]);
|
||||
|
||||
const rule = rrulestr(rrule);
|
||||
let repeatFrequency =
|
||||
rule.options.freq === RRule.MINUTELY && dtstart === dtend
|
||||
? t`None (Run Once)`
|
||||
: rule.toText().replace(/^\w/, (c) => c.toUpperCase());
|
||||
// We should allow rrule tot handle this issue, and they have in version 2.6.8.
|
||||
// (https://github.com/jakubroztocil/rrule/commit/ab9c564a83de2f9688d6671f2a6df273ceb902bf)
|
||||
// However, we are unable to upgrade to that version because that
|
||||
// version throws and unexpected warning.
|
||||
// (https://github.com/jakubroztocil/rrule/issues/427)
|
||||
if (repeatFrequency.split(' ')[1] === 'minutes') {
|
||||
repeatFrequency = t`Every minute for ${rule.options.count} times`;
|
||||
}
|
||||
const frequencies = {
|
||||
minute: t`Minute`,
|
||||
hour: t`Hour`,
|
||||
day: t`Day`,
|
||||
week: t`Week`,
|
||||
month: t`Month`,
|
||||
year: t`Year`,
|
||||
};
|
||||
const { frequency, frequencyOptions } = parseRuleObj(schedule);
|
||||
const repeatFrequency = frequency.length
|
||||
? frequency.map((f) => frequencies[f]).join(', ')
|
||||
: t`None (Run Once)`;
|
||||
|
||||
const {
|
||||
ask_credential_on_launch,
|
||||
@@ -268,6 +288,24 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
|
||||
helpText={helpText.localTimeZone(config)}
|
||||
/>
|
||||
<Detail label={t`Repeat Frequency`} value={repeatFrequency} />
|
||||
</DetailList>
|
||||
{frequency.length ? (
|
||||
<FrequencyDetailsContainer>
|
||||
<p>
|
||||
<strong>{t`Frequency Details`}</strong>
|
||||
</p>
|
||||
{frequency.map((freq) => (
|
||||
<FrequencyDetails
|
||||
key={freq}
|
||||
type={freq}
|
||||
label={frequencies[freq]}
|
||||
options={frequencyOptions[freq]}
|
||||
timezone={timezone}
|
||||
/>
|
||||
))}
|
||||
</FrequencyDetailsContainer>
|
||||
) : null}
|
||||
<DetailList gutter="sm">
|
||||
{hasDaysToKeepField ? (
|
||||
<Detail label={t`Days of Data to Keep`} value={daysToKeep} />
|
||||
) : null}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import { useHistory, useLocation } from 'react-router-dom';
|
||||
import { RRule } from 'rrule';
|
||||
import { shape } from 'prop-types';
|
||||
import { Card } from '@patternfly/react-core';
|
||||
import yaml from 'js-yaml';
|
||||
@@ -12,7 +11,7 @@ import { parseVariableField } from 'util/yaml';
|
||||
import mergeExtraVars from 'util/prompt/mergeExtraVars';
|
||||
import getSurveyValues from 'util/prompt/getSurveyValues';
|
||||
import ScheduleForm from '../shared/ScheduleForm';
|
||||
import buildRuleObj from '../shared/buildRuleObj';
|
||||
import buildRuleSet from '../shared/buildRuleSet';
|
||||
import { CardBody } from '../../Card';
|
||||
|
||||
function ScheduleEdit({
|
||||
@@ -27,7 +26,7 @@ function ScheduleEdit({
|
||||
const history = useHistory();
|
||||
const location = useLocation();
|
||||
const { pathname } = location;
|
||||
const pathRoot = pathname.substr(0, pathname.indexOf('schedules'));
|
||||
const pathRoot = pathname.substring(0, pathname.indexOf('schedules'));
|
||||
|
||||
const handleSubmit = async (
|
||||
values,
|
||||
@@ -38,18 +37,11 @@ function ScheduleEdit({
|
||||
const {
|
||||
inventory,
|
||||
credentials = [],
|
||||
end,
|
||||
frequency,
|
||||
interval,
|
||||
frequencyOptions,
|
||||
exceptionFrequency,
|
||||
exceptionOptions,
|
||||
timezone,
|
||||
occurences,
|
||||
runOn,
|
||||
runOnTheDay,
|
||||
runOnTheMonth,
|
||||
runOnDayMonth,
|
||||
runOnDayNumber,
|
||||
runOnTheOccurence,
|
||||
daysOfWeek,
|
||||
...submitValues
|
||||
} = values;
|
||||
const { added, removed } = getAddedAndRemoved(
|
||||
@@ -91,15 +83,13 @@ function ScheduleEdit({
|
||||
}
|
||||
|
||||
try {
|
||||
const rule = new RRule(buildRuleObj(values));
|
||||
const ruleSet = buildRuleSet(values);
|
||||
const requestData = {
|
||||
...submitValues,
|
||||
rrule: rule.toString().replace(/\n/g, ' '),
|
||||
rrule: ruleSet.toString().replace(/\n/g, ' '),
|
||||
};
|
||||
delete requestData.startDate;
|
||||
delete requestData.startTime;
|
||||
delete requestData.endDate;
|
||||
delete requestData.endTime;
|
||||
|
||||
if (Object.keys(values).includes('daysToKeep')) {
|
||||
if (!requestData.extra_data) {
|
||||
|
||||
@@ -195,9 +195,7 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
frequency: [],
|
||||
name: 'Run once schedule',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:00 AM',
|
||||
@@ -218,11 +216,15 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'after',
|
||||
frequency: 'minute',
|
||||
interval: 10,
|
||||
frequency: ['minute'],
|
||||
frequencyOptions: {
|
||||
minute: {
|
||||
end: 'after',
|
||||
interval: 10,
|
||||
occurrences: 10,
|
||||
},
|
||||
},
|
||||
name: 'Run every 10 minutes 10 times',
|
||||
occurrences: 10,
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:30 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -232,7 +234,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Run every 10 minutes 10 times',
|
||||
extra_data: {},
|
||||
occurrences: 10,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200325T103000 RRULE:INTERVAL=10;FREQ=MINUTELY;COUNT=10',
|
||||
});
|
||||
@@ -242,11 +243,15 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'onDate',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '10:45 AM',
|
||||
frequency: 'hour',
|
||||
interval: 1,
|
||||
frequency: ['hour'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
end: 'onDate',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '10:45 AM',
|
||||
interval: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run every hour until date',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
@@ -259,7 +264,7 @@ describe('<ScheduleEdit />', () => {
|
||||
name: 'Run every hour until date',
|
||||
extra_data: {},
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T104500',
|
||||
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T144500Z',
|
||||
});
|
||||
});
|
||||
|
||||
@@ -267,9 +272,13 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'day',
|
||||
interval: 1,
|
||||
frequency: ['day'],
|
||||
frequencyOptions: {
|
||||
day: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run daily',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
@@ -284,16 +293,21 @@ describe('<ScheduleEdit />', () => {
|
||||
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=DAILY',
|
||||
});
|
||||
});
|
||||
|
||||
test('Successfully creates a schedule with weekly repeat frequency on mon/wed/fri', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
daysOfWeek: [RRule.MO, RRule.WE, RRule.FR],
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'week',
|
||||
interval: 1,
|
||||
frequency: ['week'],
|
||||
frequencyOptions: {
|
||||
week: {
|
||||
end: 'never',
|
||||
daysOfWeek: [RRule.MO, RRule.WE, RRule.FR],
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run weekly on mon/wed/fri',
|
||||
occurrences: 1,
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:45 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -303,7 +317,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Run weekly on mon/wed/fri',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
rrule: `DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=${RRule.MO},${RRule.WE},${RRule.FR}`,
|
||||
});
|
||||
});
|
||||
@@ -312,13 +325,17 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'month',
|
||||
interval: 1,
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
},
|
||||
name: 'Run on the first day of the month',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayNumber: 1,
|
||||
startDate: '2020-04-01',
|
||||
startTime: '10:45 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -328,7 +345,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Run on the first day of the month',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200401T104500 RRULE:INTERVAL=1;FREQ=MONTHLY;BYMONTHDAY=1',
|
||||
});
|
||||
@@ -338,15 +354,20 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
endDateTime: '2020-03-26T11:00:00',
|
||||
frequency: 'month',
|
||||
interval: 1,
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
end: 'never',
|
||||
endDate: '2020-03-26',
|
||||
endTime: '11:00 AM',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheDay: 'tuesday',
|
||||
runOnTheOccurrence: -1,
|
||||
},
|
||||
},
|
||||
name: 'Run monthly on the last Tuesday',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheDay: 'tuesday',
|
||||
runOnTheOccurrence: -1,
|
||||
startDate: '2020-03-31',
|
||||
startTime: '11:00 AM',
|
||||
timezone: 'America/New_York',
|
||||
@@ -354,11 +375,8 @@ describe('<ScheduleEdit />', () => {
|
||||
});
|
||||
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
|
||||
description: 'test description',
|
||||
endDateTime: '2020-03-26T11:00:00',
|
||||
name: 'Run monthly on the last Tuesday',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
runOnTheOccurrence: -1,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200331T110000 RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=-1;BYDAY=TU',
|
||||
});
|
||||
@@ -368,14 +386,18 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 3,
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the first day of March',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 3,
|
||||
runOnDayNumber: 1,
|
||||
startTime: '12:00 AM',
|
||||
startDate: '2020-03-01',
|
||||
timezone: 'America/New_York',
|
||||
@@ -385,7 +407,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Yearly on the first day of March',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200301T000000 RRULE:INTERVAL=1;FREQ=YEARLY;BYMONTH=3;BYMONTHDAY=1',
|
||||
});
|
||||
@@ -395,15 +416,19 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'friday',
|
||||
runOnTheMonth: 4,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the second Friday in April',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'friday',
|
||||
runOnTheMonth: 4,
|
||||
startTime: '11:15 AM',
|
||||
startDate: '2020-04-10',
|
||||
timezone: 'America/New_York',
|
||||
@@ -413,8 +438,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Yearly on the second Friday in April',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
runOnTheOccurrence: 2,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=2;BYDAY=FR;BYMONTH=4',
|
||||
});
|
||||
@@ -424,15 +447,19 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'year',
|
||||
interval: 1,
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
end: 'never',
|
||||
interval: 1,
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'weekday',
|
||||
runOnTheMonth: 10,
|
||||
},
|
||||
},
|
||||
name: 'Yearly on the first weekday in October',
|
||||
occurrences: 1,
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'weekday',
|
||||
runOnTheMonth: 10,
|
||||
startTime: '11:15 AM',
|
||||
startDate: '2020-04-10',
|
||||
timezone: 'America/New_York',
|
||||
@@ -442,8 +469,6 @@ describe('<ScheduleEdit />', () => {
|
||||
description: 'test description',
|
||||
name: 'Yearly on the first weekday in October',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=1;BYDAY=MO,TU,WE,TH,FR;BYMONTH=10',
|
||||
});
|
||||
@@ -522,17 +547,7 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
wrapper.find('Formik').invoke('onSubmit')({
|
||||
name: mockSchedule.name,
|
||||
end: 'never',
|
||||
endDate: '2021-01-29',
|
||||
endTime: '2:15 PM',
|
||||
frequency: 'none',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 1,
|
||||
runOnDayNumber: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnTheMonth: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
frequency: [],
|
||||
skip_tags: '',
|
||||
startDate: '2021-01-28',
|
||||
startTime: '2:15 PM',
|
||||
@@ -549,10 +564,8 @@ describe('<ScheduleEdit />', () => {
|
||||
expect(SchedulesAPI.update).toBeCalledWith(27, {
|
||||
extra_data: {},
|
||||
name: 'mock schedule',
|
||||
occurrences: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20210128T141500 RRULE:COUNT=1;FREQ=MINUTELY',
|
||||
'DTSTART;TZID=America/New_York:20210128T141500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
|
||||
skip_tags: '',
|
||||
});
|
||||
expect(SchedulesAPI.disassociateCredential).toBeCalledWith(27, 75);
|
||||
@@ -621,8 +634,6 @@ describe('<ScheduleEdit />', () => {
|
||||
startDateTime: undefined,
|
||||
description: '',
|
||||
extra_data: {},
|
||||
occurrences: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
name: 'foo',
|
||||
inventory: 702,
|
||||
rrule:
|
||||
@@ -723,9 +734,8 @@ describe('<ScheduleEdit />', () => {
|
||||
await act(async () => {
|
||||
scheduleSurveyWrapper.find('Formik').invoke('onSubmit')({
|
||||
description: 'test description',
|
||||
end: 'never',
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
frequency: [],
|
||||
frequencyOptions: {},
|
||||
name: 'Run once schedule',
|
||||
startDate: '2020-03-25',
|
||||
startTime: '10:00 AM',
|
||||
|
||||
@@ -16,11 +16,11 @@ const DateTimeGroup = styled.span`
|
||||
`;
|
||||
function DateTimePicker({ dateFieldName, timeFieldName, label }) {
|
||||
const [dateField, dateMeta, dateHelpers] = useField({
|
||||
name: `${dateFieldName}`,
|
||||
name: dateFieldName,
|
||||
validate: combine([required(null), isValidDate]),
|
||||
});
|
||||
const [timeField, timeMeta, timeHelpers] = useField({
|
||||
name: `${timeFieldName}`,
|
||||
name: timeFieldName,
|
||||
validate: combine([required(null), validateTime()]),
|
||||
});
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import {
|
||||
Radio,
|
||||
TextInput,
|
||||
} from '@patternfly/react-core';
|
||||
import { required } from 'util/validators';
|
||||
import { required, requiredPositiveInteger } from 'util/validators';
|
||||
import AnsibleSelect from '../../AnsibleSelect';
|
||||
import FormField from '../../FormField';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
@@ -45,65 +45,50 @@ const Checkbox = styled(_Checkbox)`
|
||||
}
|
||||
`;
|
||||
|
||||
export function requiredPositiveInteger() {
|
||||
return (value) => {
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isInteger(value)) {
|
||||
return t`This field must be an integer`;
|
||||
}
|
||||
if (value < 1) {
|
||||
return t`This field must be greater than 0`;
|
||||
}
|
||||
}
|
||||
if (!value) {
|
||||
return t`Select a value for this field`;
|
||||
}
|
||||
return undefined;
|
||||
};
|
||||
}
|
||||
|
||||
const FrequencyDetailSubform = () => {
|
||||
const FrequencyDetailSubform = ({ frequency, prefix }) => {
|
||||
const id = prefix.replace('.', '-');
|
||||
const [runOnDayMonth] = useField({
|
||||
name: 'runOnDayMonth',
|
||||
name: `${prefix}.runOnDayMonth`,
|
||||
});
|
||||
const [runOnDayNumber] = useField({
|
||||
name: 'runOnDayNumber',
|
||||
name: `${prefix}.runOnDayNumber`,
|
||||
});
|
||||
const [runOnTheOccurrence] = useField({
|
||||
name: 'runOnTheOccurrence',
|
||||
name: `${prefix}.runOnTheOccurrence`,
|
||||
});
|
||||
const [runOnTheDay] = useField({
|
||||
name: 'runOnTheDay',
|
||||
name: `${prefix}.runOnTheDay`,
|
||||
});
|
||||
const [runOnTheMonth] = useField({
|
||||
name: 'runOnTheMonth',
|
||||
name: `${prefix}.runOnTheMonth`,
|
||||
});
|
||||
const [startDate] = useField('startDate');
|
||||
const [{ name: dateFieldName }] = useField('endDate');
|
||||
const [{ name: timeFieldName }] = useField('endTime');
|
||||
const [startDate] = useField(`${prefix}.startDate`);
|
||||
|
||||
const [daysOfWeek, daysOfWeekMeta, daysOfWeekHelpers] = useField({
|
||||
name: 'daysOfWeek',
|
||||
validate: required(t`Select a value for this field`),
|
||||
name: `${prefix}.daysOfWeek`,
|
||||
validate: (val) => {
|
||||
if (frequency === 'week') {
|
||||
return required(t`Select a value for this field`)(val?.length > 0);
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
const [end, endMeta] = useField({
|
||||
name: 'end',
|
||||
name: `${prefix}.end`,
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [interval, intervalMeta] = useField({
|
||||
name: 'interval',
|
||||
name: `${prefix}.interval`,
|
||||
validate: requiredPositiveInteger(),
|
||||
});
|
||||
const [runOn, runOnMeta] = useField({
|
||||
name: 'runOn',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [frequency] = useField({
|
||||
name: 'frequency',
|
||||
});
|
||||
useField({
|
||||
name: 'occurrences',
|
||||
validate: requiredPositiveInteger(),
|
||||
name: `${prefix}.runOn`,
|
||||
validate: (val) => {
|
||||
if (frequency === 'month' || frequency === 'year') {
|
||||
return required(t`Select a value for this field`)(val);
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
|
||||
const monthOptions = [
|
||||
@@ -170,7 +155,8 @@ const FrequencyDetailSubform = () => {
|
||||
];
|
||||
|
||||
const updateDaysOfWeek = (day, checked) => {
|
||||
const newDaysOfWeek = [...daysOfWeek.value];
|
||||
const newDaysOfWeek = daysOfWeek.value ? [...daysOfWeek.value] : [];
|
||||
daysOfWeekHelpers.setTouched(true);
|
||||
if (checked) {
|
||||
newDaysOfWeek.push(day);
|
||||
daysOfWeekHelpers.setValue(newDaysOfWeek);
|
||||
@@ -181,10 +167,29 @@ const FrequencyDetailSubform = () => {
|
||||
}
|
||||
};
|
||||
|
||||
const getPeriodLabel = () => {
|
||||
switch (frequency) {
|
||||
case 'minute':
|
||||
return t`Minute`;
|
||||
case 'hour':
|
||||
return t`Hour`;
|
||||
case 'day':
|
||||
return t`Day`;
|
||||
case 'week':
|
||||
return t`Week`;
|
||||
case 'month':
|
||||
return t`Month`;
|
||||
case 'year':
|
||||
return t`Year`;
|
||||
default:
|
||||
throw new Error(t`Frequency did not match an expected value`);
|
||||
}
|
||||
};
|
||||
|
||||
const getRunEveryLabel = () => {
|
||||
const intervalValue = interval.value;
|
||||
|
||||
switch (frequency.value) {
|
||||
switch (frequency) {
|
||||
case 'minute':
|
||||
return <Plural value={intervalValue} one="minute" other="minutes" />;
|
||||
case 'hour':
|
||||
@@ -202,12 +207,14 @@ const FrequencyDetailSubform = () => {
|
||||
}
|
||||
};
|
||||
|
||||
/* eslint-disable no-restricted-globals */
|
||||
return (
|
||||
<>
|
||||
<p css="grid-column: 1/-1">
|
||||
<b>{getPeriodLabel()}</b>
|
||||
</p>
|
||||
<FormGroup
|
||||
name="interval"
|
||||
fieldId="schedule-run-every"
|
||||
name={`${prefix}.interval`}
|
||||
fieldId={`schedule-run-every-${id}`}
|
||||
helperTextInvalid={intervalMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
@@ -218,7 +225,7 @@ const FrequencyDetailSubform = () => {
|
||||
<div css="display: flex">
|
||||
<TextInput
|
||||
css="margin-right: 10px;"
|
||||
id="schedule-run-every"
|
||||
id={`schedule-run-every-${id}`}
|
||||
type="number"
|
||||
min="1"
|
||||
step="1"
|
||||
@@ -230,10 +237,10 @@ const FrequencyDetailSubform = () => {
|
||||
<RunEveryLabel>{getRunEveryLabel()}</RunEveryLabel>
|
||||
</div>
|
||||
</FormGroup>
|
||||
{frequency?.value === 'week' && (
|
||||
{frequency === 'week' && (
|
||||
<FormGroup
|
||||
name="daysOfWeek"
|
||||
fieldId="schedule-days-of-week"
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
fieldId={`schedule-days-of-week-${id}`}
|
||||
helperTextInvalid={daysOfWeekMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
@@ -246,89 +253,89 @@ const FrequencyDetailSubform = () => {
|
||||
<div css="display: flex">
|
||||
<Checkbox
|
||||
label={t`Sun`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.SU)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SU, checked);
|
||||
}}
|
||||
aria-label={t`Sunday`}
|
||||
id="schedule-days-of-week-sun"
|
||||
ouiaId="schedule-days-of-week-sun"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-sun-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sun-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Mon`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.MO)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.MO)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.MO, checked);
|
||||
}}
|
||||
aria-label={t`Monday`}
|
||||
id="schedule-days-of-week-mon"
|
||||
ouiaId="schedule-days-of-week-mon"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-mon-${id}`}
|
||||
ouiaId={`schedule-days-of-week-mon-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Tue`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.TU)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TU)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TU, checked);
|
||||
}}
|
||||
aria-label={t`Tuesday`}
|
||||
id="schedule-days-of-week-tue"
|
||||
ouiaId="schedule-days-of-week-tue"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-tue-${id}`}
|
||||
ouiaId={`schedule-days-of-week-tue-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Wed`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.WE)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.WE)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.WE, checked);
|
||||
}}
|
||||
aria-label={t`Wednesday`}
|
||||
id="schedule-days-of-week-wed"
|
||||
ouiaId="schedule-days-of-week-wed"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-wed-${id}`}
|
||||
ouiaId={`schedule-days-of-week-wed-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Thu`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.TH)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.TH)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.TH, checked);
|
||||
}}
|
||||
aria-label={t`Thursday`}
|
||||
id="schedule-days-of-week-thu"
|
||||
ouiaId="schedule-days-of-week-thu"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-thu-${id}`}
|
||||
ouiaId={`schedule-days-of-week-thu-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Fri`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.FR)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.FR)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.FR, checked);
|
||||
}}
|
||||
aria-label={t`Friday`}
|
||||
id="schedule-days-of-week-fri"
|
||||
ouiaId="schedule-days-of-week-fri"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-fri-${id}`}
|
||||
ouiaId={`schedule-days-of-week-fri-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
<Checkbox
|
||||
label={t`Sat`}
|
||||
isChecked={daysOfWeek.value.includes(RRule.SA)}
|
||||
isChecked={daysOfWeek.value?.includes(RRule.SA)}
|
||||
onChange={(checked) => {
|
||||
updateDaysOfWeek(RRule.SA, checked);
|
||||
}}
|
||||
aria-label={t`Saturday`}
|
||||
id="schedule-days-of-week-sat"
|
||||
ouiaId="schedule-days-of-week-sat"
|
||||
name="daysOfWeek"
|
||||
id={`schedule-days-of-week-sat-${id}`}
|
||||
ouiaId={`schedule-days-of-week-sat-${id}`}
|
||||
name={`${prefix}.daysOfWeek`}
|
||||
/>
|
||||
</div>
|
||||
</FormGroup>
|
||||
)}
|
||||
{(frequency?.value === 'month' || frequency?.value === 'year') &&
|
||||
!isNaN(new Date(startDate.value)) && (
|
||||
{(frequency === 'month' || frequency === 'year') &&
|
||||
!Number.isNaN(new Date(startDate.value)) && (
|
||||
<FormGroup
|
||||
name="runOn"
|
||||
fieldId="schedule-run-on"
|
||||
name={`${prefix}.runOn`}
|
||||
fieldId={`schedule-run-on-${id}`}
|
||||
helperTextInvalid={runOnMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
@@ -337,11 +344,11 @@ const FrequencyDetailSubform = () => {
|
||||
label={t`Run on`}
|
||||
>
|
||||
<RunOnRadio
|
||||
id="schedule-run-on-day"
|
||||
name="runOn"
|
||||
id={`schedule-run-on-day-${id}`}
|
||||
name={`${prefix}.runOn`}
|
||||
label={
|
||||
<div css="display: flex;align-items: center;">
|
||||
{frequency?.value === 'month' && (
|
||||
{frequency === 'month' && (
|
||||
<span
|
||||
id="radio-schedule-run-on-day"
|
||||
css="margin-right: 10px;"
|
||||
@@ -349,9 +356,9 @@ const FrequencyDetailSubform = () => {
|
||||
<Trans>Day</Trans>
|
||||
</span>
|
||||
)}
|
||||
{frequency?.value === 'year' && (
|
||||
{frequency === 'year' && (
|
||||
<AnsibleSelect
|
||||
id="schedule-run-on-day-month"
|
||||
id={`schedule-run-on-day-month-${id}`}
|
||||
css="margin-right: 10px"
|
||||
isDisabled={runOn.value !== 'day'}
|
||||
data={monthOptions}
|
||||
@@ -359,7 +366,7 @@ const FrequencyDetailSubform = () => {
|
||||
/>
|
||||
)}
|
||||
<TextInput
|
||||
id="schedule-run-on-day-number"
|
||||
id={`schedule-run-on-day-number-${id}`}
|
||||
type="number"
|
||||
min="1"
|
||||
max="31"
|
||||
@@ -380,18 +387,18 @@ const FrequencyDetailSubform = () => {
|
||||
}}
|
||||
/>
|
||||
<RunOnRadio
|
||||
id="schedule-run-on-the"
|
||||
name="runOn"
|
||||
id={`schedule-run-on-the-${id}`}
|
||||
name={`${prefix}.runOn`}
|
||||
label={
|
||||
<div css="display: flex;align-items: center;">
|
||||
<span
|
||||
id="radio-schedule-run-on-the"
|
||||
id={`radio-schedule-run-on-the-${id}`}
|
||||
css="margin-right: 10px;"
|
||||
>
|
||||
<Trans>The</Trans>
|
||||
</span>
|
||||
<AnsibleSelect
|
||||
id="schedule-run-on-the-occurrence"
|
||||
id={`schedule-run-on-the-occurrence-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={[
|
||||
{ value: 1, key: 'first', label: t`First` },
|
||||
@@ -412,7 +419,7 @@ const FrequencyDetailSubform = () => {
|
||||
{...runOnTheOccurrence}
|
||||
/>
|
||||
<AnsibleSelect
|
||||
id="schedule-run-on-the-day"
|
||||
id={`schedule-run-on-the-day-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={[
|
||||
{
|
||||
@@ -464,16 +471,16 @@ const FrequencyDetailSubform = () => {
|
||||
]}
|
||||
{...runOnTheDay}
|
||||
/>
|
||||
{frequency?.value === 'year' && (
|
||||
{frequency === 'year' && (
|
||||
<>
|
||||
<span
|
||||
id="of-schedule-run-on-the-month"
|
||||
id={`of-schedule-run-on-the-month-${id}`}
|
||||
css="margin-left: 10px;"
|
||||
>
|
||||
<Trans>of</Trans>
|
||||
</span>
|
||||
<AnsibleSelect
|
||||
id="schedule-run-on-the-month"
|
||||
id={`schedule-run-on-the-month-${id}`}
|
||||
isDisabled={runOn.value !== 'the'}
|
||||
data={monthOptions}
|
||||
{...runOnTheMonth}
|
||||
@@ -492,16 +499,16 @@ const FrequencyDetailSubform = () => {
|
||||
</FormGroup>
|
||||
)}
|
||||
<FormGroup
|
||||
name="end"
|
||||
fieldId="schedule-end"
|
||||
name={`${prefix}.end`}
|
||||
fieldId={`schedule-end-${id}`}
|
||||
helperTextInvalid={endMeta.error}
|
||||
isRequired
|
||||
validated={!endMeta.touched || !endMeta.error ? 'default' : 'error'}
|
||||
label={t`End`}
|
||||
>
|
||||
<Radio
|
||||
id="end-never"
|
||||
name="end"
|
||||
id={`end-never-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`Never`}
|
||||
value="never"
|
||||
isChecked={end.value === 'never'}
|
||||
@@ -509,11 +516,11 @@ const FrequencyDetailSubform = () => {
|
||||
event.target.value = 'never';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId="end-never-radio-button"
|
||||
ouiaId={`end-never-radio-button-${id}`}
|
||||
/>
|
||||
<Radio
|
||||
id="end-after"
|
||||
name="end"
|
||||
id={`end-after-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`After number of occurrences`}
|
||||
value="after"
|
||||
isChecked={end.value === 'after'}
|
||||
@@ -521,11 +528,11 @@ const FrequencyDetailSubform = () => {
|
||||
event.target.value = 'after';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId="end-after-radio-button"
|
||||
ouiaId={`end-after-radio-button-${id}`}
|
||||
/>
|
||||
<Radio
|
||||
id="end-on-date"
|
||||
name="end"
|
||||
id={`end-on-date-${id}`}
|
||||
name={`${prefix}.end`}
|
||||
label={t`On date`}
|
||||
value="onDate"
|
||||
isChecked={end.value === 'onDate'}
|
||||
@@ -533,25 +540,24 @@ const FrequencyDetailSubform = () => {
|
||||
event.target.value = 'onDate';
|
||||
end.onChange(event);
|
||||
}}
|
||||
ouiaId="end-on-radio-button"
|
||||
ouiaId={`end-on-radio-button-${id}`}
|
||||
/>
|
||||
</FormGroup>
|
||||
{end?.value === 'after' && (
|
||||
<FormField
|
||||
id="schedule-occurrences"
|
||||
id={`schedule-occurrences-${id}`}
|
||||
label={t`Occurrences`}
|
||||
name="occurrences"
|
||||
name={`${prefix}.occurrences`}
|
||||
type="number"
|
||||
min="1"
|
||||
step="1"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
/>
|
||||
)}
|
||||
{end?.value === 'onDate' && (
|
||||
<DateTimePicker
|
||||
dateFieldName={dateFieldName}
|
||||
timeFieldName={timeFieldName}
|
||||
dateFieldName={`${prefix}.endDate`}
|
||||
timeFieldName={`${prefix}.endTime`}
|
||||
label={t`End date/time`}
|
||||
/>
|
||||
)}
|
||||
|
||||
55
awx/ui/src/components/Schedule/shared/FrequencySelect.js
Normal file
55
awx/ui/src/components/Schedule/shared/FrequencySelect.js
Normal file
@@ -0,0 +1,55 @@
|
||||
import React, { useState } from 'react';
|
||||
import { arrayOf, string } from 'prop-types';
|
||||
import { Select, SelectOption, SelectVariant } from '@patternfly/react-core';
|
||||
|
||||
export default function FrequencySelect({
|
||||
id,
|
||||
value,
|
||||
onChange,
|
||||
onBlur,
|
||||
placeholderText,
|
||||
children,
|
||||
}) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
const onSelect = (event, selectedValue) => {
|
||||
if (selectedValue === 'none') {
|
||||
onChange([]);
|
||||
setIsOpen(false);
|
||||
return;
|
||||
}
|
||||
const index = value.indexOf(selectedValue);
|
||||
if (index === -1) {
|
||||
onChange(value.concat(selectedValue));
|
||||
} else {
|
||||
onChange(value.slice(0, index).concat(value.slice(index + 1)));
|
||||
}
|
||||
};
|
||||
|
||||
const onToggle = (val) => {
|
||||
if (!val) {
|
||||
onBlur();
|
||||
}
|
||||
setIsOpen(val);
|
||||
};
|
||||
|
||||
return (
|
||||
<Select
|
||||
variant={SelectVariant.checkbox}
|
||||
onSelect={onSelect}
|
||||
selections={value}
|
||||
placeholderText={placeholderText}
|
||||
onToggle={onToggle}
|
||||
isOpen={isOpen}
|
||||
ouiaId={`frequency-select-${id}`}
|
||||
>
|
||||
{children}
|
||||
</Select>
|
||||
);
|
||||
}
|
||||
|
||||
FrequencySelect.propTypes = {
|
||||
value: arrayOf(string).isRequired,
|
||||
};
|
||||
|
||||
export { SelectOption, SelectVariant };
|
||||
@@ -3,38 +3,23 @@ import { shape, func } from 'prop-types';
|
||||
|
||||
import { DateTime } from 'luxon';
|
||||
import { t } from '@lingui/macro';
|
||||
import { Formik, useField } from 'formik';
|
||||
import { Formik } from 'formik';
|
||||
import { RRule } from 'rrule';
|
||||
import {
|
||||
Button,
|
||||
Form,
|
||||
FormGroup,
|
||||
Title,
|
||||
ActionGroup,
|
||||
// To be removed once UI completes complex schedules
|
||||
Alert,
|
||||
} from '@patternfly/react-core';
|
||||
import { Config, useConfig } from 'contexts/Config';
|
||||
import { Button, Form, ActionGroup } from '@patternfly/react-core';
|
||||
import { Config } from 'contexts/Config';
|
||||
import { SchedulesAPI } from 'api';
|
||||
import { dateToInputDateTime } from 'util/dates';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { required } from 'util/validators';
|
||||
import { parseVariableField } from 'util/yaml';
|
||||
import Popover from '../../Popover';
|
||||
import AnsibleSelect from '../../AnsibleSelect';
|
||||
import ContentError from '../../ContentError';
|
||||
import ContentLoading from '../../ContentLoading';
|
||||
import FormField, { FormSubmitError } from '../../FormField';
|
||||
import {
|
||||
FormColumnLayout,
|
||||
SubFormLayout,
|
||||
FormFullWidthLayout,
|
||||
} from '../../FormLayout';
|
||||
import FrequencyDetailSubform from './FrequencyDetailSubform';
|
||||
import { FormSubmitError } from '../../FormField';
|
||||
import { FormColumnLayout, FormFullWidthLayout } from '../../FormLayout';
|
||||
import SchedulePromptableFields from './SchedulePromptableFields';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
import ScheduleFormFields from './ScheduleFormFields';
|
||||
import UnsupportedScheduleForm from './UnsupportedScheduleForm';
|
||||
import parseRuleObj, { UnsupportedRRuleError } from './parseRuleObj';
|
||||
import buildRuleObj from './buildRuleObj';
|
||||
import helpText from '../../../screens/Template/shared/JobTemplate.helptext';
|
||||
|
||||
const NUM_DAYS_PER_FREQUENCY = {
|
||||
week: 7,
|
||||
@@ -42,173 +27,6 @@ const NUM_DAYS_PER_FREQUENCY = {
|
||||
year: 365,
|
||||
};
|
||||
|
||||
const generateRunOnTheDay = (days = []) => {
|
||||
if (
|
||||
[
|
||||
RRule.MO,
|
||||
RRule.TU,
|
||||
RRule.WE,
|
||||
RRule.TH,
|
||||
RRule.FR,
|
||||
RRule.SA,
|
||||
RRule.SU,
|
||||
].every((element) => days.indexOf(element) > -1)
|
||||
) {
|
||||
return 'day';
|
||||
}
|
||||
if (
|
||||
[RRule.MO, RRule.TU, RRule.WE, RRule.TH, RRule.FR].every(
|
||||
(element) => days.indexOf(element) > -1
|
||||
)
|
||||
) {
|
||||
return 'weekday';
|
||||
}
|
||||
if ([RRule.SA, RRule.SU].every((element) => days.indexOf(element) > -1)) {
|
||||
return 'weekendDay';
|
||||
}
|
||||
if (days.indexOf(RRule.MO) > -1) {
|
||||
return 'monday';
|
||||
}
|
||||
if (days.indexOf(RRule.TU) > -1) {
|
||||
return 'tuesday';
|
||||
}
|
||||
if (days.indexOf(RRule.WE) > -1) {
|
||||
return 'wednesday';
|
||||
}
|
||||
if (days.indexOf(RRule.TH) > -1) {
|
||||
return 'thursday';
|
||||
}
|
||||
if (days.indexOf(RRule.FR) > -1) {
|
||||
return 'friday';
|
||||
}
|
||||
if (days.indexOf(RRule.SA) > -1) {
|
||||
return 'saturday';
|
||||
}
|
||||
if (days.indexOf(RRule.SU) > -1) {
|
||||
return 'sunday';
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
function ScheduleFormFields({ hasDaysToKeepField, zoneOptions, zoneLinks }) {
|
||||
const [timezone, timezoneMeta] = useField({
|
||||
name: 'timezone',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [frequency, frequencyMeta] = useField({
|
||||
name: 'frequency',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [{ name: dateFieldName }] = useField('startDate');
|
||||
const [{ name: timeFieldName }] = useField('startTime');
|
||||
const [timezoneMessage, setTimezoneMessage] = useState('');
|
||||
const warnLinkedTZ = (event, selectedValue) => {
|
||||
if (zoneLinks[selectedValue]) {
|
||||
setTimezoneMessage(
|
||||
`Warning: ${selectedValue} is a link to ${zoneLinks[selectedValue]} and will be saved as that.`
|
||||
);
|
||||
} else {
|
||||
setTimezoneMessage('');
|
||||
}
|
||||
timezone.onChange(event, selectedValue);
|
||||
};
|
||||
|
||||
let timezoneValidatedStatus = 'default';
|
||||
if (timezoneMeta.touched && timezoneMeta.error) {
|
||||
timezoneValidatedStatus = 'error';
|
||||
} else if (timezoneMessage) {
|
||||
timezoneValidatedStatus = 'warning';
|
||||
}
|
||||
|
||||
const config = useConfig();
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormField
|
||||
id="schedule-name"
|
||||
label={t`Name`}
|
||||
name="name"
|
||||
type="text"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
/>
|
||||
<FormField
|
||||
id="schedule-description"
|
||||
label={t`Description`}
|
||||
name="description"
|
||||
type="text"
|
||||
/>
|
||||
<DateTimePicker
|
||||
dateFieldName={dateFieldName}
|
||||
timeFieldName={timeFieldName}
|
||||
label={t`Start date/time`}
|
||||
/>
|
||||
<FormGroup
|
||||
name="timezone"
|
||||
fieldId="schedule-timezone"
|
||||
helperTextInvalid={timezoneMeta.error || timezoneMessage}
|
||||
isRequired
|
||||
validated={timezoneValidatedStatus}
|
||||
label={t`Local time zone`}
|
||||
helperText={timezoneMessage}
|
||||
labelIcon={<Popover content={helpText.localTimeZone(config)} />}
|
||||
>
|
||||
<AnsibleSelect
|
||||
id="schedule-timezone"
|
||||
data={zoneOptions}
|
||||
{...timezone}
|
||||
onChange={warnLinkedTZ}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup
|
||||
name="frequency"
|
||||
fieldId="schedule-requency"
|
||||
helperTextInvalid={frequencyMeta.error}
|
||||
isRequired
|
||||
validated={
|
||||
!frequencyMeta.touched || !frequencyMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={t`Run frequency`}
|
||||
>
|
||||
<AnsibleSelect
|
||||
id="schedule-frequency"
|
||||
data={[
|
||||
{ value: 'none', key: 'none', label: t`None (run once)` },
|
||||
{ value: 'minute', key: 'minute', label: t`Minute` },
|
||||
{ value: 'hour', key: 'hour', label: t`Hour` },
|
||||
{ value: 'day', key: 'day', label: t`Day` },
|
||||
{ value: 'week', key: 'week', label: t`Week` },
|
||||
{ value: 'month', key: 'month', label: t`Month` },
|
||||
{ value: 'year', key: 'year', label: t`Year` },
|
||||
]}
|
||||
{...frequency}
|
||||
/>
|
||||
</FormGroup>
|
||||
{hasDaysToKeepField ? (
|
||||
<FormField
|
||||
id="schedule-days-to-keep"
|
||||
label={t`Days of Data to Keep`}
|
||||
name="daysToKeep"
|
||||
type="number"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
/>
|
||||
) : null}
|
||||
{frequency.value !== 'none' && (
|
||||
<SubFormLayout>
|
||||
<Title size="md" headingLevel="h4">
|
||||
{t`Frequency Details`}
|
||||
</Title>
|
||||
<FormColumnLayout>
|
||||
<FrequencyDetailSubform />
|
||||
</FormColumnLayout>
|
||||
</SubFormLayout>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function ScheduleForm({
|
||||
hasDaysToKeepField,
|
||||
handleCancel,
|
||||
@@ -415,25 +233,72 @@ function ScheduleForm({
|
||||
const [currentDate, time] = dateToInputDateTime(closestQuarterHour.toISO());
|
||||
|
||||
const [tomorrowDate] = dateToInputDateTime(tomorrow.toISO());
|
||||
const initialFrequencyOptions = {
|
||||
minute: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
hour: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
day: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
},
|
||||
week: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
daysOfWeek: [],
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
runOn: 'day',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
year: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
runOn: 'day',
|
||||
runOnTheOccurrence: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnTheMonth: 1,
|
||||
runOnDayMonth: 1,
|
||||
runOnDayNumber: 1,
|
||||
},
|
||||
};
|
||||
|
||||
const initialValues = {
|
||||
daysOfWeek: [],
|
||||
description: schedule.description || '',
|
||||
end: 'never',
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
frequency: [],
|
||||
exceptionFrequency: [],
|
||||
frequencyOptions: initialFrequencyOptions,
|
||||
exceptionOptions: initialFrequencyOptions,
|
||||
name: schedule.name || '',
|
||||
occurrences: 1,
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 1,
|
||||
runOnDayNumber: 1,
|
||||
runOnTheDay: 'sunday',
|
||||
runOnTheMonth: 1,
|
||||
runOnTheOccurrence: 1,
|
||||
startDate: currentDate,
|
||||
startTime: time,
|
||||
timezone: schedule.timezone || 'America/New_York',
|
||||
timezone: schedule.timezone || now.zoneName,
|
||||
};
|
||||
const submitSchedule = (
|
||||
values,
|
||||
@@ -465,132 +330,23 @@ function ScheduleForm({
|
||||
initialValues.daysToKeep = initialDaysToKeep;
|
||||
}
|
||||
|
||||
const overriddenValues = {};
|
||||
|
||||
if (Object.keys(schedule).length > 0) {
|
||||
if (schedule.rrule) {
|
||||
if (schedule.rrule.split(/\s+/).length > 2) {
|
||||
let overriddenValues = {};
|
||||
if (schedule.rrule) {
|
||||
try {
|
||||
overriddenValues = parseRuleObj(schedule);
|
||||
} catch (error) {
|
||||
if (error instanceof UnsupportedRRuleError) {
|
||||
return (
|
||||
<Form autoComplete="off">
|
||||
<Alert
|
||||
variant="danger"
|
||||
isInline
|
||||
ouiaId="form-submit-error-alert"
|
||||
title={t`Complex schedules are not supported in the UI yet, please use the API to manage this schedule.`}
|
||||
/>
|
||||
<b>{t`Schedule Rules`}:</b>
|
||||
<pre css="white-space: pre; font-family: var(--pf-global--FontFamily--monospace)">
|
||||
{schedule.rrule}
|
||||
</pre>
|
||||
<ActionGroup>
|
||||
<Button
|
||||
ouiaId="schedule-form-cancel-button"
|
||||
aria-label={t`Cancel`}
|
||||
variant="secondary"
|
||||
type="button"
|
||||
onClick={handleCancel}
|
||||
>
|
||||
{t`Cancel`}
|
||||
</Button>
|
||||
</ActionGroup>
|
||||
</Form>
|
||||
<UnsupportedScheduleForm
|
||||
schedule={schedule}
|
||||
handleCancel={handleCancel}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const {
|
||||
origOptions: {
|
||||
bymonth,
|
||||
bymonthday,
|
||||
bysetpos,
|
||||
byweekday,
|
||||
count,
|
||||
dtstart,
|
||||
freq,
|
||||
interval,
|
||||
},
|
||||
} = RRule.fromString(schedule.rrule.replace(' ', '\n'));
|
||||
|
||||
if (dtstart) {
|
||||
const [startDate, startTime] = dateToInputDateTime(
|
||||
schedule.dtstart,
|
||||
schedule.timezone
|
||||
);
|
||||
|
||||
overriddenValues.startDate = startDate;
|
||||
overriddenValues.startTime = startTime;
|
||||
}
|
||||
|
||||
if (schedule.until) {
|
||||
overriddenValues.end = 'onDate';
|
||||
|
||||
const [endDate, endTime] = dateToInputDateTime(
|
||||
schedule.until,
|
||||
schedule.timezone
|
||||
);
|
||||
|
||||
overriddenValues.endDate = endDate;
|
||||
overriddenValues.endTime = endTime;
|
||||
} else if (count) {
|
||||
overriddenValues.end = 'after';
|
||||
overriddenValues.occurrences = count;
|
||||
}
|
||||
|
||||
if (interval) {
|
||||
overriddenValues.interval = interval;
|
||||
}
|
||||
|
||||
if (typeof freq === 'number') {
|
||||
switch (freq) {
|
||||
case RRule.MINUTELY:
|
||||
if (schedule.dtstart !== schedule.dtend) {
|
||||
overriddenValues.frequency = 'minute';
|
||||
}
|
||||
break;
|
||||
case RRule.HOURLY:
|
||||
overriddenValues.frequency = 'hour';
|
||||
break;
|
||||
case RRule.DAILY:
|
||||
overriddenValues.frequency = 'day';
|
||||
break;
|
||||
case RRule.WEEKLY:
|
||||
overriddenValues.frequency = 'week';
|
||||
if (byweekday) {
|
||||
overriddenValues.daysOfWeek = byweekday;
|
||||
}
|
||||
break;
|
||||
case RRule.MONTHLY:
|
||||
overriddenValues.frequency = 'month';
|
||||
if (bymonthday) {
|
||||
overriddenValues.runOnDayNumber = bymonthday;
|
||||
} else if (bysetpos) {
|
||||
overriddenValues.runOn = 'the';
|
||||
overriddenValues.runOnTheOccurrence = bysetpos;
|
||||
overriddenValues.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
}
|
||||
break;
|
||||
case RRule.YEARLY:
|
||||
overriddenValues.frequency = 'year';
|
||||
if (bymonthday) {
|
||||
overriddenValues.runOnDayNumber = bymonthday;
|
||||
overriddenValues.runOnDayMonth = bymonth;
|
||||
} else if (bysetpos) {
|
||||
overriddenValues.runOn = 'the';
|
||||
overriddenValues.runOnTheOccurrence = bysetpos;
|
||||
overriddenValues.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
overriddenValues.runOnTheMonth = bymonth;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
rruleError = error;
|
||||
}
|
||||
} else {
|
||||
rruleError = new Error(t`Schedule is missing rrule`);
|
||||
rruleError = error;
|
||||
}
|
||||
} else if (schedule.id) {
|
||||
rruleError = new Error(t`Schedule is missing rrule`);
|
||||
}
|
||||
|
||||
if (contentError || rruleError) {
|
||||
@@ -601,54 +357,83 @@ function ScheduleForm({
|
||||
return <ContentLoading />;
|
||||
}
|
||||
|
||||
const validate = (values) => {
|
||||
const errors = {};
|
||||
|
||||
values.frequency.forEach((freq) => {
|
||||
const options = values.frequencyOptions[freq];
|
||||
const freqErrors = {};
|
||||
|
||||
if (
|
||||
(freq === 'month' || freq === 'year') &&
|
||||
options.runOn === 'day' &&
|
||||
(options.runOnDayNumber < 1 || options.runOnDayNumber > 31)
|
||||
) {
|
||||
freqErrors.runOn = t`Please select a day number between 1 and 31.`;
|
||||
}
|
||||
|
||||
if (options.end === 'after' && !options.occurrences) {
|
||||
freqErrors.occurrences = t`Please enter a number of occurrences.`;
|
||||
}
|
||||
|
||||
if (options.end === 'onDate') {
|
||||
if (
|
||||
DateTime.fromISO(values.startDate) >=
|
||||
DateTime.fromISO(options.endDate)
|
||||
) {
|
||||
freqErrors.endDate = t`Please select an end date/time that comes after the start date/time.`;
|
||||
}
|
||||
|
||||
if (
|
||||
DateTime.fromISO(options.endDate)
|
||||
.diff(DateTime.fromISO(values.startDate), 'days')
|
||||
.toObject().days < NUM_DAYS_PER_FREQUENCY[freq]
|
||||
) {
|
||||
const rule = new RRule(
|
||||
buildRuleObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
frequency: freq,
|
||||
...options,
|
||||
})
|
||||
);
|
||||
if (rule.all().length === 0) {
|
||||
errors.startDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
freqErrors.endDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Object.keys(freqErrors).length > 0) {
|
||||
if (!errors.frequencyOptions) {
|
||||
errors.frequencyOptions = {};
|
||||
}
|
||||
errors.frequencyOptions[freq] = freqErrors;
|
||||
}
|
||||
});
|
||||
|
||||
return errors;
|
||||
};
|
||||
|
||||
return (
|
||||
<Config>
|
||||
{() => (
|
||||
<Formik
|
||||
initialValues={Object.assign(initialValues, overriddenValues)}
|
||||
initialValues={{
|
||||
...initialValues,
|
||||
...overriddenValues,
|
||||
frequencyOptions: {
|
||||
...initialValues.frequencyOptions,
|
||||
...overriddenValues.frequencyOptions,
|
||||
},
|
||||
exceptionOptions: {
|
||||
...initialValues.exceptionOptions,
|
||||
...overriddenValues.exceptionOptions,
|
||||
},
|
||||
}}
|
||||
onSubmit={(values) => {
|
||||
submitSchedule(values, launchConfig, surveyConfig, credentials);
|
||||
}}
|
||||
validate={(values) => {
|
||||
const errors = {};
|
||||
const {
|
||||
end,
|
||||
endDate,
|
||||
frequency,
|
||||
runOn,
|
||||
runOnDayNumber,
|
||||
startDate,
|
||||
} = values;
|
||||
|
||||
if (
|
||||
end === 'onDate' &&
|
||||
DateTime.fromISO(endDate)
|
||||
.diff(DateTime.fromISO(startDate), 'days')
|
||||
.toObject().days < NUM_DAYS_PER_FREQUENCY[frequency]
|
||||
) {
|
||||
const rule = new RRule(buildRuleObj(values));
|
||||
if (rule.all().length === 0) {
|
||||
errors.startDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
errors.endDate = t`Selected date range must have at least 1 schedule occurrence.`;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
end === 'onDate' &&
|
||||
DateTime.fromISO(startDate) >= DateTime.fromISO(endDate)
|
||||
) {
|
||||
errors.endDate = t`Please select an end date/time that comes after the start date/time.`;
|
||||
}
|
||||
|
||||
if (
|
||||
(frequency === 'month' || frequency === 'year') &&
|
||||
runOn === 'day' &&
|
||||
(runOnDayNumber < 1 || runOnDayNumber > 31)
|
||||
) {
|
||||
errors.runOn = t`Please select a day number between 1 and 31.`;
|
||||
}
|
||||
return errors;
|
||||
}}
|
||||
validate={validate}
|
||||
>
|
||||
{(formik) => (
|
||||
<Form autoComplete="off" onSubmit={formik.handleSubmit}>
|
||||
|
||||
@@ -94,7 +94,7 @@ const defaultFieldsVisible = () => {
|
||||
expect(
|
||||
wrapper.find('FormGroup[label="Local time zone"]').find('HelpIcon').length
|
||||
).toBe(1);
|
||||
expect(wrapper.find('FormGroup[label="Run frequency"]').length).toBe(1);
|
||||
expect(wrapper.find('FrequencySelect').length).toBe(1);
|
||||
};
|
||||
|
||||
const nonRRuleValuesMatch = () => {
|
||||
@@ -498,21 +498,19 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('DatePicker').prop('value')).toMatch(`${date}`);
|
||||
expect(wrapper.find('TimePicker').prop('time')).toMatch(`${time}`);
|
||||
expect(wrapper.find('select#schedule-timezone').prop('value')).toBe(
|
||||
'America/New_York'
|
||||
);
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'none'
|
||||
'UTC'
|
||||
);
|
||||
expect(
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual([]);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to minute', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('minute', {
|
||||
target: { value: 'minute', key: 'minute', label: 'Minute' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['minute']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -523,20 +521,30 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-minute')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-minute').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#end-on-date-frequencyOptions-minute')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to hour', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('hour', {
|
||||
target: { value: 'hour', key: 'hour', label: 'Hour' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['hour']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -547,20 +555,28 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-hour')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-hour').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-hour').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-hour').prop('checked')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to day', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('day', {
|
||||
target: { value: 'day', key: 'day', label: 'Day' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['day']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -571,20 +587,28 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-day')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-day').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-day').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-day').prop('checked')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to week', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('week', {
|
||||
target: { value: 'week', key: 'week', label: 'Week' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['week']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -595,20 +619,28 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-week')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-week').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-week').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-week').prop('checked')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to month', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('month', {
|
||||
target: { value: 'month', key: 'month', label: 'Month' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['month']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -619,31 +651,45 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-run-on-day').prop('checked')).toBe(
|
||||
true
|
||||
);
|
||||
expect(
|
||||
wrapper.find('input#schedule-run-on-day-number').prop('value')
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(wrapper.find('input#schedule-run-on-the').prop('checked')).toBe(
|
||||
false
|
||||
);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-month').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-month').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-month').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-frequencyOptions-month')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-number-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-the-frequencyOptions-month')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(wrapper.find('select#schedule-run-on-day-month').length).toBe(0);
|
||||
expect(wrapper.find('select#schedule-run-on-the-month').length).toBe(0);
|
||||
});
|
||||
|
||||
test('correct frequency details fields and values shown when frequency changed to year', async () => {
|
||||
const runFrequencySelect = wrapper.find(
|
||||
'FormGroup[label="Run frequency"] FormSelect'
|
||||
'FrequencySelect#schedule-frequency'
|
||||
);
|
||||
await act(async () => {
|
||||
runFrequencySelect.invoke('onChange')('year', {
|
||||
target: { value: 'year', key: 'year', label: 'Year' },
|
||||
});
|
||||
runFrequencySelect.invoke('onChange')(['year']);
|
||||
});
|
||||
wrapper.update();
|
||||
defaultFieldsVisible();
|
||||
@@ -654,73 +700,125 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-run-on-day').prop('checked')).toBe(
|
||||
true
|
||||
);
|
||||
expect(
|
||||
wrapper.find('input#schedule-run-on-day-number').prop('value')
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-year')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-year').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-year').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-year').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-frequencyOptions-year')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-number-frequencyOptions-year')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-the-frequencyOptions-year')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('select#schedule-run-on-day-month-frequencyOptions-year')
|
||||
.length
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('select#schedule-run-on-the-month-frequencyOptions-year')
|
||||
.length
|
||||
).toBe(1);
|
||||
expect(wrapper.find('input#schedule-run-on-the').prop('checked')).toBe(
|
||||
false
|
||||
);
|
||||
expect(wrapper.find('select#schedule-run-on-day-month').length).toBe(1);
|
||||
expect(wrapper.find('select#schedule-run-on-the-month').length).toBe(1);
|
||||
});
|
||||
|
||||
test('occurrences field properly shown when end after selection is made', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([
|
||||
'minute',
|
||||
]);
|
||||
});
|
||||
wrapper.update();
|
||||
await act(async () => {
|
||||
wrapper
|
||||
.find('FormGroup[label="Run frequency"] FormSelect')
|
||||
.invoke('onChange')('minute', {
|
||||
target: { value: 'minute', key: 'minute', label: 'Minute' },
|
||||
.find('Radio#end-after-frequencyOptions-minute')
|
||||
.invoke('onChange')('after', {
|
||||
target: { name: 'frequencyOptions.minute.end' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
await act(async () => {
|
||||
wrapper.find('Radio#end-after').invoke('onChange')('after', {
|
||||
target: { name: 'end' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-minute').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#end-on-date-frequencyOptions-minute')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(1);
|
||||
expect(wrapper.find('input#schedule-occurrences').prop('value')).toBe(1);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-occurrences-frequencyOptions-minute')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
await act(async () => {
|
||||
wrapper.find('Radio#end-never').invoke('onChange')('never', {
|
||||
target: { name: 'end' },
|
||||
wrapper
|
||||
.find('Radio#end-never-frequencyOptions-minute')
|
||||
.invoke('onChange')('never', {
|
||||
target: { name: 'frequencyOptions.minute.end' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
});
|
||||
|
||||
test('error shown when end date/time comes before start date/time', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([
|
||||
'minute',
|
||||
]);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-minute').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('FormGroup[label="Run frequency"] FormSelect')
|
||||
.invoke('onChange')('minute', {
|
||||
target: { value: 'minute', key: 'minute', label: 'Minute' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
.find('input#end-on-date-frequencyOptions-minute')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
await act(async () => {
|
||||
wrapper.find('Radio#end-on-date').invoke('onChange')('onDate', {
|
||||
target: { name: 'end' },
|
||||
wrapper
|
||||
.find('Radio#end-on-date-frequencyOptions-minute')
|
||||
.invoke('onChange')('onDate', {
|
||||
target: { name: 'frequencyOptions.minute.end' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('#schedule-end-datetime-helper').length).toBe(0);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#end-on-date-frequencyOptions-minute')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
await act(async () => {
|
||||
wrapper.find('DatePicker[aria-label="End date"]').prop('onChange')(
|
||||
'2020-03-14',
|
||||
@@ -739,26 +837,29 @@ describe('<ScheduleForm />', () => {
|
||||
});
|
||||
|
||||
test('error shown when on day number is not between 1 and 31', async () => {
|
||||
act(() => {
|
||||
wrapper.find('select[id="schedule-frequency"]').invoke('onChange')(
|
||||
{
|
||||
currentTarget: { value: 'month', type: 'change' },
|
||||
target: { name: 'frequency', value: 'month' },
|
||||
},
|
||||
'month'
|
||||
);
|
||||
await act(async () => {
|
||||
wrapper.find('FrequencySelect#schedule-frequency').invoke('onChange')([
|
||||
'month',
|
||||
]);
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
act(() => {
|
||||
wrapper.find('input#schedule-run-on-day-number').simulate('change', {
|
||||
target: { value: 32, name: 'runOnDayNumber' },
|
||||
});
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-number-frequencyOptions-month')
|
||||
.simulate('change', {
|
||||
target: {
|
||||
value: 32,
|
||||
name: 'frequencyOptions.month.runOnDayNumber',
|
||||
},
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(
|
||||
wrapper.find('input#schedule-run-on-day-number').prop('value')
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-number-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe(32);
|
||||
|
||||
await act(async () => {
|
||||
@@ -766,9 +867,9 @@ describe('<ScheduleForm />', () => {
|
||||
});
|
||||
wrapper.update();
|
||||
|
||||
expect(wrapper.find('#schedule-run-on-helper').text()).toBe(
|
||||
'Please select a day number between 1 and 31.'
|
||||
);
|
||||
expect(
|
||||
wrapper.find('#schedule-run-on-frequencyOptions-month-helper').text()
|
||||
).toBe('Please select a day number between 1 and 31.');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -928,9 +1029,9 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'none'
|
||||
);
|
||||
expect(
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual([]);
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every 10 minutes', async () => {
|
||||
@@ -966,13 +1067,25 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'minute'
|
||||
);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(10);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['minute']);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-minute')
|
||||
.prop('value')
|
||||
).toBe(10);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-minute').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-minute').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#end-on-date-frequencyOptions-minute')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every hour 10 times', async () => {
|
||||
@@ -1009,14 +1122,28 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'hour'
|
||||
);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-occurrences').prop('value')).toBe(10);
|
||||
expect(
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['hour']);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-hour')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-hour').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-hour').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-hour').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-occurrences-frequencyOptions-hour')
|
||||
.prop('value')
|
||||
).toBe(10);
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every day', async () => {
|
||||
@@ -1053,13 +1180,23 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="End date/time"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'day'
|
||||
);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['day']);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-day').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-day').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-day').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-day')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every week on m/w/f until Jan 1, 2020', async () => {
|
||||
@@ -1096,40 +1233,64 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Run on"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'week'
|
||||
);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-sun').prop('checked')
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['week']);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-week')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-week').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-mon').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-tue').prop('checked')
|
||||
wrapper.find('input#end-after-frequencyOptions-week').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-wed').prop('checked')
|
||||
wrapper.find('input#end-on-date-frequencyOptions-week').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-thu').prop('checked')
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-sun-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-fri').prop('checked')
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-mon-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#schedule-days-of-week-sat').prop('checked')
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-tue-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-wed-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-thu-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-fri-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-days-of-week-sat-frequencyOptions-week')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('DatePicker[aria-label="End date"]').prop('value')
|
||||
).toBe('2021-01-01');
|
||||
expect(
|
||||
wrapper.find('TimePicker[aria-label="End time"]').prop('value')
|
||||
).toBe('1:00 AM');
|
||||
).toBe('12:00 AM');
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every month on the last weekday', async () => {
|
||||
@@ -1169,25 +1330,43 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'month'
|
||||
);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#schedule-run-on-day').prop('checked')).toBe(
|
||||
false
|
||||
);
|
||||
expect(wrapper.find('input#schedule-run-on-the').prop('checked')).toBe(
|
||||
true
|
||||
);
|
||||
expect(
|
||||
wrapper.find('select#schedule-run-on-the-occurrence').prop('value')
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['month']);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-month').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-month').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-month').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-frequencyOptions-month')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-the-frequencyOptions-month')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('select#schedule-run-on-the-occurrence-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe(-1);
|
||||
expect(wrapper.find('select#schedule-run-on-the-day').prop('value')).toBe(
|
||||
'weekday'
|
||||
);
|
||||
expect(
|
||||
wrapper
|
||||
.find('select#schedule-run-on-the-day-frequencyOptions-month')
|
||||
.prop('value')
|
||||
).toBe('weekday');
|
||||
});
|
||||
|
||||
test('initially renders expected fields and values with existing schedule that runs every year on the May 6', async () => {
|
||||
@@ -1224,24 +1403,42 @@ describe('<ScheduleForm />', () => {
|
||||
expect(wrapper.find('FormGroup[label="Occurrences"]').length).toBe(0);
|
||||
|
||||
nonRRuleValuesMatch();
|
||||
expect(wrapper.find('select#schedule-frequency').prop('value')).toBe(
|
||||
'year'
|
||||
);
|
||||
expect(wrapper.find('input#end-never').prop('checked')).toBe(true);
|
||||
expect(wrapper.find('input#end-after').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#end-on-date').prop('checked')).toBe(false);
|
||||
expect(wrapper.find('input#schedule-run-every').prop('value')).toBe(1);
|
||||
expect(wrapper.find('input#schedule-run-on-day').prop('checked')).toBe(
|
||||
true
|
||||
);
|
||||
expect(wrapper.find('input#schedule-run-on-the').prop('checked')).toBe(
|
||||
false
|
||||
);
|
||||
expect(
|
||||
wrapper.find('select#schedule-run-on-day-month').prop('value')
|
||||
wrapper.find('FrequencySelect#schedule-frequency').prop('value')
|
||||
).toEqual(['year']);
|
||||
expect(
|
||||
wrapper.find('input#end-never-frequencyOptions-year').prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper.find('input#end-after-frequencyOptions-year').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper.find('input#end-on-date-frequencyOptions-year').prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-every-frequencyOptions-year')
|
||||
.prop('value')
|
||||
).toBe(1);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-frequencyOptions-year')
|
||||
.prop('checked')
|
||||
).toBe(true);
|
||||
expect(
|
||||
wrapper
|
||||
.find('input#schedule-run-on-the-frequencyOptions-year')
|
||||
.prop('checked')
|
||||
).toBe(false);
|
||||
expect(
|
||||
wrapper
|
||||
.find('select#schedule-run-on-day-month-frequencyOptions-year')
|
||||
.prop('value')
|
||||
).toBe(5);
|
||||
expect(
|
||||
wrapper.find('input#schedule-run-on-day-number').prop('value')
|
||||
wrapper
|
||||
.find('input#schedule-run-on-day-number-frequencyOptions-year')
|
||||
.prop('value')
|
||||
).toBe(6);
|
||||
});
|
||||
});
|
||||
|
||||
194
awx/ui/src/components/Schedule/shared/ScheduleFormFields.js
Normal file
194
awx/ui/src/components/Schedule/shared/ScheduleFormFields.js
Normal file
@@ -0,0 +1,194 @@
|
||||
import React, { useState } from 'react';
|
||||
import { useField } from 'formik';
|
||||
import { FormGroup, Title } from '@patternfly/react-core';
|
||||
import { t } from '@lingui/macro';
|
||||
import styled from 'styled-components';
|
||||
import FormField from 'components/FormField';
|
||||
import { required } from 'util/validators';
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import Popover from '../../Popover';
|
||||
import AnsibleSelect from '../../AnsibleSelect';
|
||||
import FrequencySelect, { SelectOption } from './FrequencySelect';
|
||||
import helpText from '../../../screens/Template/shared/JobTemplate.helptext';
|
||||
import { SubFormLayout, FormColumnLayout } from '../../FormLayout';
|
||||
import FrequencyDetailSubform from './FrequencyDetailSubform';
|
||||
import DateTimePicker from './DateTimePicker';
|
||||
import sortFrequencies from './sortFrequencies';
|
||||
|
||||
const SelectClearOption = styled(SelectOption)`
|
||||
& > input[type='checkbox'] {
|
||||
display: none;
|
||||
}
|
||||
`;
|
||||
|
||||
export default function ScheduleFormFields({
|
||||
hasDaysToKeepField,
|
||||
zoneOptions,
|
||||
zoneLinks,
|
||||
}) {
|
||||
const [timezone, timezoneMeta] = useField({
|
||||
name: 'timezone',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [frequency, frequencyMeta, frequencyHelper] = useField({
|
||||
name: 'frequency',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
const [timezoneMessage, setTimezoneMessage] = useState('');
|
||||
const warnLinkedTZ = (event, selectedValue) => {
|
||||
if (zoneLinks[selectedValue]) {
|
||||
setTimezoneMessage(
|
||||
t`Warning: ${selectedValue} is a link to ${zoneLinks[selectedValue]} and will be saved as that.`
|
||||
);
|
||||
} else {
|
||||
setTimezoneMessage('');
|
||||
}
|
||||
timezone.onChange(event, selectedValue);
|
||||
};
|
||||
let timezoneValidatedStatus = 'default';
|
||||
if (timezoneMeta.touched && timezoneMeta.error) {
|
||||
timezoneValidatedStatus = 'error';
|
||||
} else if (timezoneMessage) {
|
||||
timezoneValidatedStatus = 'warning';
|
||||
}
|
||||
const config = useConfig();
|
||||
|
||||
// const [exceptionFrequency, exceptionFrequencyMeta, exceptionFrequencyHelper] =
|
||||
// useField({
|
||||
// name: 'exceptionFrequency',
|
||||
// validate: required(t`Select a value for this field`),
|
||||
// });
|
||||
|
||||
const updateFrequency = (setFrequency) => (values) => {
|
||||
setFrequency(values.sort(sortFrequencies));
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormField
|
||||
id="schedule-name"
|
||||
label={t`Name`}
|
||||
name="name"
|
||||
type="text"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
/>
|
||||
<FormField
|
||||
id="schedule-description"
|
||||
label={t`Description`}
|
||||
name="description"
|
||||
type="text"
|
||||
/>
|
||||
<DateTimePicker
|
||||
dateFieldName="startDate"
|
||||
timeFieldName="startTime"
|
||||
label={t`Start date/time`}
|
||||
/>
|
||||
<FormGroup
|
||||
name="timezone"
|
||||
fieldId="schedule-timezone"
|
||||
helperTextInvalid={timezoneMeta.error || timezoneMessage}
|
||||
isRequired
|
||||
validated={timezoneValidatedStatus}
|
||||
label={t`Local time zone`}
|
||||
helperText={timezoneMessage}
|
||||
labelIcon={<Popover content={helpText.localTimeZone(config)} />}
|
||||
>
|
||||
<AnsibleSelect
|
||||
id="schedule-timezone"
|
||||
data={zoneOptions}
|
||||
{...timezone}
|
||||
onChange={warnLinkedTZ}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup
|
||||
name="frequency"
|
||||
fieldId="schedule-frequency"
|
||||
helperTextInvalid={frequencyMeta.error}
|
||||
validated={
|
||||
!frequencyMeta.touched || !frequencyMeta.error ? 'default' : 'error'
|
||||
}
|
||||
label={t`Repeat frequency`}
|
||||
>
|
||||
<FrequencySelect
|
||||
id="schedule-frequency"
|
||||
onChange={updateFrequency(frequencyHelper.setValue)}
|
||||
value={frequency.value}
|
||||
placeholderText={
|
||||
frequency.value.length ? t`Select frequency` : t`None (run once)`
|
||||
}
|
||||
onBlur={frequencyHelper.setTouched}
|
||||
>
|
||||
<SelectClearOption value="none">{t`None (run once)`}</SelectClearOption>
|
||||
<SelectOption value="minute">{t`Minute`}</SelectOption>
|
||||
<SelectOption value="hour">{t`Hour`}</SelectOption>
|
||||
<SelectOption value="day">{t`Day`}</SelectOption>
|
||||
<SelectOption value="week">{t`Week`}</SelectOption>
|
||||
<SelectOption value="month">{t`Month`}</SelectOption>
|
||||
<SelectOption value="year">{t`Year`}</SelectOption>
|
||||
</FrequencySelect>
|
||||
</FormGroup>
|
||||
{hasDaysToKeepField ? (
|
||||
<FormField
|
||||
id="schedule-days-to-keep"
|
||||
label={t`Days of Data to Keep`}
|
||||
name="daysToKeep"
|
||||
type="number"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
/>
|
||||
) : null}
|
||||
{frequency.value.length ? (
|
||||
<SubFormLayout>
|
||||
<Title size="md" headingLevel="h4">
|
||||
{t`Frequency Details`}
|
||||
</Title>
|
||||
{frequency.value.map((val) => (
|
||||
<FormColumnLayout key={val} stacked>
|
||||
<FrequencyDetailSubform
|
||||
frequency={val}
|
||||
prefix={`frequencyOptions.${val}`}
|
||||
/>
|
||||
</FormColumnLayout>
|
||||
))}
|
||||
{/* <Title size="md" headingLevel="h4">{t`Exceptions`}</Title>
|
||||
<FormGroup
|
||||
name="exceptions"
|
||||
fieldId="exception-frequency"
|
||||
helperTextInvalid={exceptionFrequencyMeta.error}
|
||||
validated={
|
||||
!exceptionFrequencyMeta.touched || !exceptionFrequencyMeta.error
|
||||
? 'default'
|
||||
: 'error'
|
||||
}
|
||||
label={t`Add exceptions`}
|
||||
>
|
||||
<FrequencySelect
|
||||
variant={SelectVariant.checkbox}
|
||||
onChange={exceptionFrequencyHelper.setValue}
|
||||
value={exceptionFrequency.value}
|
||||
placeholderText={t`None`}
|
||||
onBlur={exceptionFrequencyHelper.setTouched}
|
||||
>
|
||||
<SelectClearOption value="none">{t`None`}</SelectClearOption>
|
||||
<SelectOption value="minute">{t`Minute`}</SelectOption>
|
||||
<SelectOption value="hour">{t`Hour`}</SelectOption>
|
||||
<SelectOption value="day">{t`Day`}</SelectOption>
|
||||
<SelectOption value="week">{t`Week`}</SelectOption>
|
||||
<SelectOption value="month">{t`Month`}</SelectOption>
|
||||
<SelectOption value="year">{t`Year`}</SelectOption>
|
||||
</FrequencySelect>
|
||||
</FormGroup>
|
||||
{exceptionFrequency.value.map((val) => (
|
||||
<FormColumnLayout key={val} stacked>
|
||||
<FrequencyDetailSubform
|
||||
frequency={val}
|
||||
prefix={`exceptionOptions.${val}`}
|
||||
/>
|
||||
</FormColumnLayout>
|
||||
))} */}
|
||||
</SubFormLayout>
|
||||
) : null}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -41,20 +41,9 @@ function SchedulePromptableFields({
|
||||
resetForm({
|
||||
values: {
|
||||
...initialValues,
|
||||
daysOfWeek: values.daysOfWeek,
|
||||
description: values.description,
|
||||
end: values.end,
|
||||
endDateTime: values.endDateTime,
|
||||
frequency: values.frequency,
|
||||
interval: values.interval,
|
||||
name: values.name,
|
||||
occurences: values.occurances,
|
||||
runOn: values.runOn,
|
||||
runOnDayMonth: values.runOnDayMonth,
|
||||
runOnDayNumber: values.runOnDayNumber,
|
||||
runOnTheDay: values.runOnTheDay,
|
||||
runOnTheMonth: values.runOnTheMonth,
|
||||
runOnTheOccurence: values.runOnTheOccurance,
|
||||
startDateTime: values.startDateTime,
|
||||
timezone: values.timezone,
|
||||
},
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
import React from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import { Button, Form, ActionGroup, Alert } from '@patternfly/react-core';
|
||||
|
||||
export default function UnsupportedScheduleForm({ schedule, handleCancel }) {
|
||||
return (
|
||||
<Form autoComplete="off">
|
||||
<Alert
|
||||
variant="danger"
|
||||
isInline
|
||||
ouiaId="form-submit-error-alert"
|
||||
title={t`This schedule uses complex rules that are not supported in the
|
||||
UI. Please use the API to manage this schedule.`}
|
||||
/>
|
||||
<b>{t`Schedule Rules`}:</b>
|
||||
<pre css="white-space: pre; font-family: var(--pf-global--FontFamily--monospace)">
|
||||
{schedule.rrule.split(' ').join('\n')}
|
||||
</pre>
|
||||
<ActionGroup>
|
||||
<Button
|
||||
ouiaId="schedule-form-cancel-button"
|
||||
aria-label={t`Cancel`}
|
||||
variant="secondary"
|
||||
type="button"
|
||||
onClick={handleCancel}
|
||||
>
|
||||
{t`Cancel`}
|
||||
</Button>
|
||||
</ActionGroup>
|
||||
</Form>
|
||||
);
|
||||
}
|
||||
@@ -3,30 +3,42 @@ import { RRule } from 'rrule';
|
||||
import { DateTime } from 'luxon';
|
||||
import { getRRuleDayConstants } from 'util/dates';
|
||||
|
||||
window.RRule = RRule;
|
||||
window.DateTime = DateTime;
|
||||
|
||||
const parseTime = (time) => [
|
||||
DateTime.fromFormat(time, 'h:mm a').hour,
|
||||
DateTime.fromFormat(time, 'h:mm a').minute,
|
||||
];
|
||||
|
||||
export default function buildRuleObj(values) {
|
||||
export function buildDtStartObj(values) {
|
||||
// Dates are formatted like "YYYY-MM-DD"
|
||||
const [startYear, startMonth, startDay] = values.startDate.split('-');
|
||||
// Times are formatted like "HH:MM:SS" or "HH:MM" if no seconds
|
||||
// have been specified
|
||||
const [startHour, startMinute] = parseTime(values.startTime);
|
||||
|
||||
const dateString = `${startYear}${pad(startMonth)}${pad(startDay)}T${pad(
|
||||
startHour
|
||||
)}${pad(startMinute)}00`;
|
||||
const rruleString = values.timezone
|
||||
? `DTSTART;TZID=${values.timezone}:${dateString}`
|
||||
: `DTSTART:${dateString}Z`;
|
||||
const rule = RRule.fromString(rruleString);
|
||||
|
||||
return rule;
|
||||
}
|
||||
|
||||
function pad(num) {
|
||||
if (typeof num === 'string') {
|
||||
return num;
|
||||
}
|
||||
return num < 10 ? `0${num}` : num;
|
||||
}
|
||||
|
||||
export default function buildRuleObj(values) {
|
||||
const ruleObj = {
|
||||
interval: values.interval,
|
||||
dtstart: new Date(
|
||||
Date.UTC(
|
||||
startYear,
|
||||
parseInt(startMonth, 10) - 1,
|
||||
startDay,
|
||||
startHour,
|
||||
startMinute
|
||||
)
|
||||
),
|
||||
tzid: values.timezone,
|
||||
};
|
||||
|
||||
switch (values.frequency) {
|
||||
@@ -79,22 +91,20 @@ export default function buildRuleObj(values) {
|
||||
ruleObj.count = values.occurrences;
|
||||
break;
|
||||
case 'onDate': {
|
||||
const [endYear, endMonth, endDay] = values.endDate.split('-');
|
||||
|
||||
const [endHour, endMinute] = parseTime(values.endTime);
|
||||
ruleObj.until = new Date(
|
||||
Date.UTC(
|
||||
endYear,
|
||||
parseInt(endMonth, 10) - 1,
|
||||
endDay,
|
||||
endHour,
|
||||
endMinute
|
||||
)
|
||||
);
|
||||
const localEndDate = DateTime.fromISO(`${values.endDate}T000000`, {
|
||||
zone: values.timezone,
|
||||
});
|
||||
const localEndTime = localEndDate.set({
|
||||
hour: endHour,
|
||||
minute: endMinute,
|
||||
second: 0,
|
||||
});
|
||||
ruleObj.until = localEndTime.toJSDate();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new Error(t`End did not match an expected value`);
|
||||
throw new Error(t`End did not match an expected value (${values.end})`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
45
awx/ui/src/components/Schedule/shared/buildRuleSet.js
Normal file
45
awx/ui/src/components/Schedule/shared/buildRuleSet.js
Normal file
@@ -0,0 +1,45 @@
|
||||
import { RRule, RRuleSet } from 'rrule';
|
||||
import buildRuleObj, { buildDtStartObj } from './buildRuleObj';
|
||||
|
||||
window.RRuleSet = RRuleSet;
|
||||
|
||||
const frequencies = ['minute', 'hour', 'day', 'week', 'month', 'year'];
|
||||
export default function buildRuleSet(values) {
|
||||
const set = new RRuleSet();
|
||||
|
||||
const startRule = buildDtStartObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
});
|
||||
set.rrule(startRule);
|
||||
|
||||
if (values.frequency.length === 0) {
|
||||
const rule = buildRuleObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency: 'none',
|
||||
interval: 1,
|
||||
});
|
||||
set.rrule(new RRule(rule));
|
||||
}
|
||||
|
||||
frequencies.forEach((frequency) => {
|
||||
if (!values.frequency.includes(frequency)) {
|
||||
return;
|
||||
}
|
||||
const rule = buildRuleObj({
|
||||
startDate: values.startDate,
|
||||
startTime: values.startTime,
|
||||
timezone: values.timezone,
|
||||
frequency,
|
||||
...values.frequencyOptions[frequency],
|
||||
});
|
||||
set.rrule(new RRule(rule));
|
||||
});
|
||||
|
||||
// TODO: exclusions
|
||||
|
||||
return set;
|
||||
}
|
||||
246
awx/ui/src/components/Schedule/shared/buildRuleSet.test.js
Normal file
246
awx/ui/src/components/Schedule/shared/buildRuleSet.test.js
Normal file
@@ -0,0 +1,246 @@
|
||||
import { RRule } from 'rrule';
|
||||
import buildRuleSet from './buildRuleSet';
|
||||
|
||||
import { DateTime } from 'luxon';
|
||||
|
||||
describe('buildRuleSet', () => {
|
||||
test('should build minutely recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['minute'],
|
||||
frequencyOptions: {
|
||||
minute: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=MINUTELY'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build hourly recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['hour'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=HOURLY'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build daily recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['day'],
|
||||
frequencyOptions: {
|
||||
day: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=DAILY'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build weekly recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['week'],
|
||||
frequencyOptions: {
|
||||
week: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
daysOfWeek: [RRule.SU],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=SU'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build monthly by day recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'day',
|
||||
runOnDayNumber: 15,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=MONTHLY;BYMONTHDAY=15'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build monthly by weekday recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['month'],
|
||||
frequencyOptions: {
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'monday',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=2;BYDAY=MO'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build yearly by day recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'day',
|
||||
runOnDayMonth: 3,
|
||||
runOnDayNumber: 15,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=YEARLY;BYMONTH=3;BYMONTHDAY=15'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build yearly by weekday recurring rrule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['year'],
|
||||
frequencyOptions: {
|
||||
year: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 4,
|
||||
runOnTheDay: 'monday',
|
||||
runOnTheMonth: 6,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(
|
||||
'DTSTART:20220613T123000Z\nRRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=4;BYDAY=MO;BYMONTH=6'
|
||||
);
|
||||
});
|
||||
|
||||
test('should build combined frequencies', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: ['minute', 'month'],
|
||||
frequencyOptions: {
|
||||
minute: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'monday',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(`DTSTART:20220613T123000Z
|
||||
RRULE:INTERVAL=1;FREQ=MINUTELY
|
||||
RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=2;BYDAY=MO`);
|
||||
});
|
||||
|
||||
test('should build combined frequencies with end dates', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-01',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['hour', 'month'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
interval: 2,
|
||||
end: 'onDate',
|
||||
endDate: '2026-07-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'onDate',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'monday',
|
||||
runOnDayNumber: 1,
|
||||
endDate: '2026-06-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(`DTSTART;TZID=US/Eastern:20220601T123000
|
||||
RRULE:INTERVAL=2;FREQ=HOURLY;UNTIL=20260702T170000Z
|
||||
RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=2;BYDAY=MO;UNTIL=20260602T170000Z`);
|
||||
});
|
||||
|
||||
test('should build single occurence', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
frequency: [],
|
||||
frequencyOptions: {},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
expect(ruleSet.toString()).toEqual(`DTSTART:20220613T123000Z
|
||||
RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY`);
|
||||
});
|
||||
});
|
||||
235
awx/ui/src/components/Schedule/shared/parseRuleObj.js
Normal file
235
awx/ui/src/components/Schedule/shared/parseRuleObj.js
Normal file
@@ -0,0 +1,235 @@
|
||||
import { RRule, rrulestr } from 'rrule';
|
||||
import { dateToInputDateTime } from 'util/dates';
|
||||
import { DateTime } from 'luxon';
|
||||
import sortFrequencies from './sortFrequencies';
|
||||
|
||||
export class UnsupportedRRuleError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'UnsupportedRRuleError';
|
||||
}
|
||||
}
|
||||
|
||||
export default function parseRuleObj(schedule) {
|
||||
let values = {
|
||||
frequency: [],
|
||||
frequencyOptions: {},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
timezone: schedule.timezone,
|
||||
};
|
||||
const ruleset = rrulestr(schedule.rrule.replace(' ', '\n'), {
|
||||
forceset: true,
|
||||
});
|
||||
|
||||
const ruleStrings = ruleset.valueOf();
|
||||
ruleStrings.forEach((ruleString) => {
|
||||
const type = ruleString.match(/^[A-Z]+/)[0];
|
||||
switch (type) {
|
||||
case 'DTSTART':
|
||||
values = parseDtstart(schedule, values);
|
||||
break;
|
||||
case 'RRULE':
|
||||
values = parseRrule(ruleString, schedule, values);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedRRuleError(`Unsupported rrule type: ${type}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (isSingleOccurrence(values)) {
|
||||
values.frequency = [];
|
||||
values.frequencyOptions = {};
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
function isSingleOccurrence(values) {
|
||||
if (values.frequency.length > 1) {
|
||||
return false;
|
||||
}
|
||||
if (values.frequency[0] !== 'minute') {
|
||||
return false;
|
||||
}
|
||||
const options = values.frequencyOptions.minute;
|
||||
return options.end === 'after' && options.occurrences === 1;
|
||||
}
|
||||
|
||||
function parseDtstart(schedule, values) {
|
||||
// TODO: should this rely on DTSTART in rruleset rather than schedule.dtstart?
|
||||
const [startDate, startTime] = dateToInputDateTime(
|
||||
schedule.dtstart,
|
||||
schedule.timezone
|
||||
);
|
||||
return {
|
||||
...values,
|
||||
startDate,
|
||||
startTime,
|
||||
};
|
||||
}
|
||||
|
||||
const frequencyTypes = {
|
||||
[RRule.MINUTELY]: 'minute',
|
||||
[RRule.HOURLY]: 'hour',
|
||||
[RRule.DAILY]: 'day',
|
||||
[RRule.WEEKLY]: 'week',
|
||||
[RRule.MONTHLY]: 'month',
|
||||
[RRule.YEARLY]: 'year',
|
||||
};
|
||||
|
||||
function parseRrule(rruleString, schedule, values) {
|
||||
const {
|
||||
origOptions: {
|
||||
bymonth,
|
||||
bymonthday,
|
||||
bysetpos,
|
||||
byweekday,
|
||||
count,
|
||||
freq,
|
||||
interval,
|
||||
until,
|
||||
},
|
||||
} = RRule.fromString(rruleString);
|
||||
|
||||
const now = DateTime.now();
|
||||
const closestQuarterHour = DateTime.fromMillis(
|
||||
Math.ceil(now.ts / 900000) * 900000
|
||||
);
|
||||
const tomorrow = closestQuarterHour.plus({ days: 1 });
|
||||
const [, time] = dateToInputDateTime(closestQuarterHour.toISO());
|
||||
const [tomorrowDate] = dateToInputDateTime(tomorrow.toISO());
|
||||
|
||||
const options = {
|
||||
endDate: tomorrowDate,
|
||||
endTime: time,
|
||||
occurrences: 1,
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
};
|
||||
|
||||
if (until) {
|
||||
options.end = 'onDate';
|
||||
const end = DateTime.fromISO(until.toISOString());
|
||||
const [endDate, endTime] = dateToInputDateTime(end, schedule.timezone);
|
||||
options.endDate = endDate;
|
||||
options.endTime = endTime;
|
||||
} else if (count) {
|
||||
options.end = 'after';
|
||||
options.occurrences = count;
|
||||
}
|
||||
|
||||
if (interval) {
|
||||
options.interval = interval;
|
||||
}
|
||||
|
||||
if (typeof freq !== 'number') {
|
||||
throw new Error(`Unexpected rrule frequency: ${freq}`);
|
||||
}
|
||||
const frequency = frequencyTypes[freq];
|
||||
if (values.frequency.includes(frequency)) {
|
||||
throw new Error(`Duplicate frequency types not supported (${frequency})`);
|
||||
}
|
||||
|
||||
if (freq === RRule.WEEKLY && byweekday) {
|
||||
options.daysOfWeek = byweekday;
|
||||
}
|
||||
|
||||
if (freq === RRule.MONTHLY) {
|
||||
options.runOn = 'day';
|
||||
options.runOnTheOccurrence = 1;
|
||||
options.runOnTheDay = 'sunday';
|
||||
options.runOnDayNumber = 1;
|
||||
|
||||
if (bymonthday) {
|
||||
options.runOnDayNumber = bymonthday;
|
||||
}
|
||||
if (bysetpos) {
|
||||
options.runOn = 'the';
|
||||
options.runOnTheOccurrence = bysetpos;
|
||||
options.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
}
|
||||
}
|
||||
|
||||
if (freq === RRule.YEARLY) {
|
||||
options.runOn = 'day';
|
||||
options.runOnTheOccurrence = 1;
|
||||
options.runOnTheDay = 'sunday';
|
||||
options.runOnTheMonth = 1;
|
||||
options.runOnDayMonth = 1;
|
||||
options.runOnDayNumber = 1;
|
||||
|
||||
if (bymonthday) {
|
||||
options.runOnDayNumber = bymonthday;
|
||||
options.runOnDayMonth = bymonth;
|
||||
}
|
||||
if (bysetpos) {
|
||||
options.runOn = 'the';
|
||||
options.runOnTheOccurrence = bysetpos;
|
||||
options.runOnTheDay = generateRunOnTheDay(byweekday);
|
||||
options.runOnTheMonth = bymonth;
|
||||
}
|
||||
}
|
||||
|
||||
if (values.frequencyOptions.frequency) {
|
||||
throw new UnsupportedRRuleError('Duplicate frequency types not supported');
|
||||
}
|
||||
|
||||
return {
|
||||
...values,
|
||||
frequency: [...values.frequency, frequency].sort(sortFrequencies),
|
||||
frequencyOptions: {
|
||||
...values.frequencyOptions,
|
||||
[frequency]: options,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function generateRunOnTheDay(days = []) {
|
||||
if (
|
||||
[
|
||||
RRule.MO,
|
||||
RRule.TU,
|
||||
RRule.WE,
|
||||
RRule.TH,
|
||||
RRule.FR,
|
||||
RRule.SA,
|
||||
RRule.SU,
|
||||
].every((element) => days.indexOf(element) > -1)
|
||||
) {
|
||||
return 'day';
|
||||
}
|
||||
if (
|
||||
[RRule.MO, RRule.TU, RRule.WE, RRule.TH, RRule.FR].every(
|
||||
(element) => days.indexOf(element) > -1
|
||||
)
|
||||
) {
|
||||
return 'weekday';
|
||||
}
|
||||
if ([RRule.SA, RRule.SU].every((element) => days.indexOf(element) > -1)) {
|
||||
return 'weekendDay';
|
||||
}
|
||||
if (days.indexOf(RRule.MO) > -1) {
|
||||
return 'monday';
|
||||
}
|
||||
if (days.indexOf(RRule.TU) > -1) {
|
||||
return 'tuesday';
|
||||
}
|
||||
if (days.indexOf(RRule.WE) > -1) {
|
||||
return 'wednesday';
|
||||
}
|
||||
if (days.indexOf(RRule.TH) > -1) {
|
||||
return 'thursday';
|
||||
}
|
||||
if (days.indexOf(RRule.FR) > -1) {
|
||||
return 'friday';
|
||||
}
|
||||
if (days.indexOf(RRule.SA) > -1) {
|
||||
return 'saturday';
|
||||
}
|
||||
if (days.indexOf(RRule.SU) > -1) {
|
||||
return 'sunday';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
244
awx/ui/src/components/Schedule/shared/parseRuleObj.test.js
Normal file
244
awx/ui/src/components/Schedule/shared/parseRuleObj.test.js
Normal file
@@ -0,0 +1,244 @@
|
||||
import { DateTime, Settings } from 'luxon';
|
||||
import { RRule } from 'rrule';
|
||||
import parseRuleObj from './parseRuleObj';
|
||||
import buildRuleSet from './buildRuleSet';
|
||||
|
||||
describe(parseRuleObj, () => {
|
||||
let origNow = Settings.now;
|
||||
beforeEach(() => {
|
||||
const expectedNow = DateTime.local(2022, 6, 1, 13, 0, 0);
|
||||
Settings.now = () => expectedNow.toMillis();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
Settings.now = origNow;
|
||||
});
|
||||
|
||||
test('should parse weekly recurring rrule', () => {
|
||||
const schedule = {
|
||||
rrule:
|
||||
'DTSTART;TZID=US/Eastern:20220608T123000 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=MO',
|
||||
dtstart: '2022-06-13T16:30:00Z',
|
||||
timezone: 'US/Eastern',
|
||||
until: '',
|
||||
dtend: null,
|
||||
};
|
||||
|
||||
const parsed = parseRuleObj(schedule);
|
||||
|
||||
expect(parsed).toEqual({
|
||||
startDate: '2022-06-13',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['week'],
|
||||
frequencyOptions: {
|
||||
week: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
occurrences: 1,
|
||||
endDate: '2022-06-02',
|
||||
endTime: '1:00 PM',
|
||||
daysOfWeek: [RRule.MO],
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
});
|
||||
});
|
||||
|
||||
test('should parse weekly recurring rrule with end date', () => {
|
||||
const schedule = {
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20210101T050000Z',
|
||||
dtstart: '2020-04-02T18:45:00Z',
|
||||
timezone: 'America/New_York',
|
||||
};
|
||||
|
||||
const parsed = parseRuleObj(schedule);
|
||||
|
||||
expect(parsed).toEqual({
|
||||
startDate: '2020-04-02',
|
||||
startTime: '2:45 PM',
|
||||
timezone: 'America/New_York',
|
||||
frequency: ['week'],
|
||||
frequencyOptions: {
|
||||
week: {
|
||||
interval: 1,
|
||||
end: 'onDate',
|
||||
occurrences: 1,
|
||||
endDate: '2021-01-01',
|
||||
endTime: '12:00 AM',
|
||||
daysOfWeek: [RRule.MO, RRule.WE, RRule.FR],
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
});
|
||||
});
|
||||
|
||||
test('should parse hourly rule with end date', () => {
|
||||
const schedule = {
|
||||
rrule:
|
||||
'DTSTART;TZID=US/Eastern:20220608T123000 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20230608T170000Z',
|
||||
dtstart: '2022-06-08T16:30:00Z',
|
||||
timezone: 'US/Eastern',
|
||||
};
|
||||
|
||||
const parsed = parseRuleObj(schedule);
|
||||
|
||||
expect(parsed).toEqual({
|
||||
startDate: '2022-06-08',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['hour'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
interval: 1,
|
||||
end: 'onDate',
|
||||
occurrences: 1,
|
||||
endDate: '2023-06-08',
|
||||
endTime: '1:00 PM',
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
});
|
||||
});
|
||||
|
||||
// TODO: do we need to support this? It's technically invalid RRULE, but the
|
||||
// API has historically supported it as a special case (but cast to UTC?)
|
||||
test.skip('should parse hourly rule with end date in local time', () => {
|
||||
const schedule = {
|
||||
rrule:
|
||||
'DTSTART;TZID=US/Eastern:20220608T123000 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20230608T130000',
|
||||
dtstart: '2022-06-08T16:30:00',
|
||||
timezone: 'US/Eastern',
|
||||
};
|
||||
|
||||
const parsed = parseRuleObj(schedule);
|
||||
|
||||
expect(parsed).toEqual({
|
||||
startDate: '2022-06-08',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['hour'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
interval: 1,
|
||||
end: 'onDate',
|
||||
occurrences: 1,
|
||||
endDate: '2023-06-08',
|
||||
endTime: '1:00 PM',
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
});
|
||||
});
|
||||
|
||||
test('should parse non-recurring rrule', () => {
|
||||
const schedule = {
|
||||
rrule:
|
||||
'DTSTART;TZID=America/New_York:20220610T130000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
|
||||
dtstart: '2022-06-10T17:00:00Z',
|
||||
dtend: '2022-06-10T17:00:00Z',
|
||||
timezone: 'US/Eastern',
|
||||
until: '',
|
||||
};
|
||||
|
||||
expect(parseRuleObj(schedule)).toEqual({
|
||||
startDate: '2022-06-10',
|
||||
startTime: '1:00 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: [],
|
||||
frequencyOptions: {},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
});
|
||||
});
|
||||
|
||||
// buildRuleSet is well-tested; use it to verify this does the inverse
|
||||
test('should re-parse built complex schedule', () => {
|
||||
const values = {
|
||||
startDate: '2022-06-01',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['minute', 'month'],
|
||||
frequencyOptions: {
|
||||
minute: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
endDate: '2022-06-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'never',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'monday',
|
||||
runOnDayNumber: 1,
|
||||
endDate: '2022-06-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
};
|
||||
|
||||
const ruleSet = buildRuleSet(values);
|
||||
const parsed = parseRuleObj({
|
||||
rrule: ruleSet.toString(),
|
||||
dtstart: '2022-06-01T12:30:00',
|
||||
dtend: '2022-06-01T12:30:00',
|
||||
timezone: 'US/Eastern',
|
||||
});
|
||||
|
||||
expect(parsed).toEqual(values);
|
||||
});
|
||||
|
||||
test('should parse built complex schedule with end dates', () => {
|
||||
const rulesetString = `DTSTART;TZID=US/Eastern:20220601T123000
|
||||
RRULE:INTERVAL=2;FREQ=HOURLY;UNTIL=20260702T170000Z
|
||||
RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=2;BYDAY=MO;UNTIL=20260602T170000Z`;
|
||||
const values = {
|
||||
startDate: '2022-06-01',
|
||||
startTime: '12:30 PM',
|
||||
timezone: 'US/Eastern',
|
||||
frequency: ['hour', 'month'],
|
||||
frequencyOptions: {
|
||||
hour: {
|
||||
interval: 2,
|
||||
end: 'onDate',
|
||||
endDate: '2026-07-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
month: {
|
||||
interval: 1,
|
||||
end: 'onDate',
|
||||
runOn: 'the',
|
||||
runOnTheOccurrence: 2,
|
||||
runOnTheDay: 'monday',
|
||||
runOnDayNumber: 1,
|
||||
endDate: '2026-06-02',
|
||||
endTime: '1:00 PM',
|
||||
occurrences: 1,
|
||||
},
|
||||
},
|
||||
exceptionFrequency: [],
|
||||
exceptionOptions: {},
|
||||
};
|
||||
|
||||
const parsed = parseRuleObj({
|
||||
rrule: rulesetString,
|
||||
dtstart: '2022-06-01T16:30:00Z',
|
||||
dtend: '2026-06-07T16:30:00Z',
|
||||
timezone: 'US/Eastern',
|
||||
});
|
||||
|
||||
expect(parsed).toEqual(values);
|
||||
});
|
||||
});
|
||||
18
awx/ui/src/components/Schedule/shared/sortFrequencies.js
Normal file
18
awx/ui/src/components/Schedule/shared/sortFrequencies.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const ORDER = {
|
||||
minute: 1,
|
||||
hour: 2,
|
||||
day: 3,
|
||||
week: 4,
|
||||
month: 5,
|
||||
year: 6,
|
||||
};
|
||||
|
||||
export default function sortFrequencies(a, b) {
|
||||
if (ORDER[a] < ORDER[b]) {
|
||||
return -1;
|
||||
}
|
||||
if (ORDER[a] > ORDER[b]) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
import React, { useState } from 'react';
|
||||
import { useParams, Link } from 'react-router-dom';
|
||||
import { t } from '@lingui/macro';
|
||||
import {
|
||||
Select,
|
||||
SelectOption,
|
||||
SelectGroup,
|
||||
SelectVariant,
|
||||
Chip,
|
||||
} from '@patternfly/react-core';
|
||||
import ChipGroup from 'components/ChipGroup';
|
||||
import { stringIsUUID } from 'util/strings';
|
||||
|
||||
const JOB_URL_SEGMENT_MAP = {
|
||||
job: 'playbook',
|
||||
project_update: 'project',
|
||||
system_job: 'management',
|
||||
system: 'system_job',
|
||||
inventory_update: 'inventory',
|
||||
workflow_job: 'workflow',
|
||||
};
|
||||
|
||||
function WorkflowOutputNavigation({ relatedJobs, parentRef }) {
|
||||
const { id } = useParams();
|
||||
|
||||
const relevantResults = relatedJobs.filter(
|
||||
({
|
||||
job: jobId,
|
||||
summary_fields: {
|
||||
unified_job_template: { unified_job_type },
|
||||
},
|
||||
}) => jobId && `${jobId}` !== id && unified_job_type !== 'workflow_approval'
|
||||
);
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [filterBy, setFilterBy] = useState();
|
||||
const [sortedJobs, setSortedJobs] = useState(relevantResults);
|
||||
|
||||
const handleFilter = (v) => {
|
||||
if (filterBy === v) {
|
||||
setSortedJobs(relevantResults);
|
||||
setFilterBy();
|
||||
} else {
|
||||
setFilterBy(v);
|
||||
setSortedJobs(
|
||||
relevantResults.filter(
|
||||
(node) =>
|
||||
node.summary_fields.job.status === v.toLowerCase() &&
|
||||
`${node.job}` !== id
|
||||
)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
const numSuccessJobs = relevantResults.filter(
|
||||
(node) => node.summary_fields.job.status === 'successful'
|
||||
).length;
|
||||
const numFailedJobs = relevantResults.length - numSuccessJobs;
|
||||
|
||||
return (
|
||||
<Select
|
||||
key={`${id}`}
|
||||
variant={SelectVariant.typeaheadMulti}
|
||||
menuAppendTo={parentRef?.current}
|
||||
onToggle={() => {
|
||||
setIsOpen(!isOpen);
|
||||
}}
|
||||
selections={filterBy}
|
||||
onSelect={(e, v) => {
|
||||
if (v !== 'Failed' && v !== 'Successful') return;
|
||||
handleFilter(v);
|
||||
}}
|
||||
isOpen={isOpen}
|
||||
isGrouped
|
||||
hasInlineFilter
|
||||
placeholderText={t`Workflow Job 1/${relevantResults.length}`}
|
||||
chipGroupComponent={
|
||||
<ChipGroup numChips={1} totalChips={1}>
|
||||
<Chip key={filterBy} onClick={() => handleFilter(filterBy)}>
|
||||
{[filterBy]}
|
||||
</Chip>
|
||||
</ChipGroup>
|
||||
}
|
||||
>
|
||||
{[
|
||||
<SelectGroup label={t`Workflow Statuses`} key="status">
|
||||
<SelectOption
|
||||
description={t`Filter by failed jobs`}
|
||||
key="failed"
|
||||
value={t`Failed`}
|
||||
itemCount={numFailedJobs}
|
||||
/>
|
||||
<SelectOption
|
||||
description={t`Filter by successful jobs`}
|
||||
key="successful"
|
||||
value={t`Successful`}
|
||||
itemCount={numSuccessJobs}
|
||||
/>
|
||||
</SelectGroup>,
|
||||
<SelectGroup label={t`Workflow Nodes`} key="nodes">
|
||||
{sortedJobs?.map((node) => (
|
||||
<SelectOption
|
||||
key={node.id}
|
||||
to={`/jobs/${
|
||||
JOB_URL_SEGMENT_MAP[
|
||||
node.summary_fields.unified_job_template.unified_job_type
|
||||
]
|
||||
}/${node.summary_fields.job?.id}/output`}
|
||||
component={Link}
|
||||
value={node.summary_fields.unified_job_template.name}
|
||||
>
|
||||
{stringIsUUID(node.identifier)
|
||||
? node.summary_fields.unified_job_template.name
|
||||
: node.identifier}
|
||||
</SelectOption>
|
||||
))}
|
||||
</SelectGroup>,
|
||||
]}
|
||||
</Select>
|
||||
);
|
||||
}
|
||||
|
||||
export default WorkflowOutputNavigation;
|
||||
1
awx/ui/src/components/WorkflowOutputNavigation/index.js
Normal file
1
awx/ui/src/components/WorkflowOutputNavigation/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { default } from './WorkflowOutputNavigation';
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { useEffect, useCallback } from 'react';
|
||||
import React, { useEffect, useCallback, useRef } from 'react';
|
||||
import {
|
||||
Route,
|
||||
Switch,
|
||||
@@ -18,6 +18,7 @@ import RoutedTabs from 'components/RoutedTabs';
|
||||
import { getSearchableKeys } from 'components/PaginatedTable';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { getJobModel } from 'util/jobs';
|
||||
import WorkflowOutputNavigation from 'components/WorkflowOutputNavigation';
|
||||
import JobDetail from './JobDetail';
|
||||
import JobOutput from './JobOutput';
|
||||
import { WorkflowOutput } from './WorkflowOutput';
|
||||
@@ -49,10 +50,12 @@ function Job({ setBreadcrumb }) {
|
||||
eventRelatedSearchableKeys,
|
||||
eventSearchableKeys,
|
||||
inventorySourceChoices,
|
||||
relatedJobs,
|
||||
},
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
let eventOptions = {};
|
||||
let relatedJobData = {};
|
||||
const { data: jobDetailData } = await getJobModel(type).readDetail(id);
|
||||
if (type !== 'workflow_job') {
|
||||
const { data: jobEventOptions } = await getJobModel(
|
||||
@@ -60,6 +63,14 @@ function Job({ setBreadcrumb }) {
|
||||
).readEventOptions(id);
|
||||
eventOptions = jobEventOptions;
|
||||
}
|
||||
if (jobDetailData.related.source_workflow_job) {
|
||||
const {
|
||||
data: { results },
|
||||
} = await getJobModel('workflow_job').readNodes(
|
||||
jobDetailData.summary_fields.source_workflow_job.id
|
||||
);
|
||||
relatedJobData = results;
|
||||
}
|
||||
if (
|
||||
jobDetailData?.summary_fields?.credentials?.find(
|
||||
(cred) => cred.kind === 'vault'
|
||||
@@ -82,6 +93,7 @@ function Job({ setBreadcrumb }) {
|
||||
inventorySourceChoices:
|
||||
choices?.data?.actions?.GET?.source?.choices || [],
|
||||
jobDetail: jobDetailData,
|
||||
relatedJobs: relatedJobData,
|
||||
eventRelatedSearchableKeys: (
|
||||
eventOptions?.related_search_fields || []
|
||||
).map((val) => val.slice(0, -8)),
|
||||
@@ -93,6 +105,7 @@ function Job({ setBreadcrumb }) {
|
||||
inventorySourceChoices: [],
|
||||
eventRelatedSearchableKeys: [],
|
||||
eventSearchableKeys: [],
|
||||
relatedJobs: [],
|
||||
}
|
||||
);
|
||||
|
||||
@@ -101,7 +114,7 @@ function Job({ setBreadcrumb }) {
|
||||
}, [fetchJob]);
|
||||
|
||||
const job = useWsJob(jobDetail);
|
||||
|
||||
const ref = useRef(null);
|
||||
const tabsArray = [
|
||||
{
|
||||
name: (
|
||||
@@ -117,6 +130,16 @@ function Job({ setBreadcrumb }) {
|
||||
{ name: t`Details`, link: `${match.url}/details`, id: 0 },
|
||||
{ name: t`Output`, link: `${match.url}/output`, id: 1 },
|
||||
];
|
||||
if (relatedJobs?.length > 0) {
|
||||
tabsArray.push({
|
||||
name: (
|
||||
<WorkflowOutputNavigation parentRef={ref} relatedJobs={relatedJobs} />
|
||||
),
|
||||
link: undefined,
|
||||
id: 2,
|
||||
hasstyle: 'margin-left: auto',
|
||||
});
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
@@ -147,46 +170,53 @@ function Job({ setBreadcrumb }) {
|
||||
|
||||
return (
|
||||
<PageSection>
|
||||
<Card>
|
||||
<RoutedTabs tabsArray={tabsArray} />
|
||||
<Switch>
|
||||
<Redirect from="/jobs/system/:id" to="/jobs/management/:id" exact />
|
||||
<Redirect
|
||||
from="/jobs/:typeSegment/:id"
|
||||
to="/jobs/:typeSegment/:id/output"
|
||||
exact
|
||||
<div ref={ref}>
|
||||
<Card>
|
||||
<RoutedTabs
|
||||
isWorkflow={match.url.startsWith('/jobs/workflow')}
|
||||
tabsArray={tabsArray}
|
||||
/>
|
||||
{job && [
|
||||
<Route
|
||||
key={job.type === 'workflow_job' ? 'workflow-details' : 'details'}
|
||||
path="/jobs/:typeSegment/:id/details"
|
||||
>
|
||||
<JobDetail
|
||||
job={job}
|
||||
inventorySourceLabels={inventorySourceChoices}
|
||||
/>
|
||||
</Route>,
|
||||
<Route key="output" path="/jobs/:typeSegment/:id/output">
|
||||
{job.type === 'workflow_job' ? (
|
||||
<WorkflowOutput job={job} />
|
||||
) : (
|
||||
<JobOutput
|
||||
<Switch>
|
||||
<Redirect from="/jobs/system/:id" to="/jobs/management/:id" exact />
|
||||
<Redirect
|
||||
from="/jobs/:typeSegment/:id"
|
||||
to="/jobs/:typeSegment/:id/output"
|
||||
exact
|
||||
/>
|
||||
{job && [
|
||||
<Route
|
||||
key={
|
||||
job.type === 'workflow_job' ? 'workflow-details' : 'details'
|
||||
}
|
||||
path="/jobs/:typeSegment/:id/details"
|
||||
>
|
||||
<JobDetail
|
||||
job={job}
|
||||
eventRelatedSearchableKeys={eventRelatedSearchableKeys}
|
||||
eventSearchableKeys={eventSearchableKeys}
|
||||
inventorySourceLabels={inventorySourceChoices}
|
||||
/>
|
||||
)}
|
||||
</Route>,
|
||||
<Route key="not-found" path="*">
|
||||
<ContentError isNotFound>
|
||||
<Link to={`/jobs/${typeSegment}/${id}/details`}>
|
||||
{t`View Job Details`}
|
||||
</Link>
|
||||
</ContentError>
|
||||
</Route>,
|
||||
]}
|
||||
</Switch>
|
||||
</Card>
|
||||
</Route>,
|
||||
<Route key="output" path="/jobs/:typeSegment/:id/output">
|
||||
{job.type === 'workflow_job' ? (
|
||||
<WorkflowOutput job={job} />
|
||||
) : (
|
||||
<JobOutput
|
||||
job={job}
|
||||
eventRelatedSearchableKeys={eventRelatedSearchableKeys}
|
||||
eventSearchableKeys={eventSearchableKeys}
|
||||
/>
|
||||
)}
|
||||
</Route>,
|
||||
<Route key="not-found" path="*">
|
||||
<ContentError isNotFound>
|
||||
<Link to={`/jobs/${typeSegment}/${id}/details`}>
|
||||
{t`View Job Details`}
|
||||
</Link>
|
||||
</ContentError>
|
||||
</Route>,
|
||||
]}
|
||||
</Switch>
|
||||
</Card>
|
||||
</div>
|
||||
</PageSection>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -346,6 +346,20 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
</LoginMainFooterLinksItem>
|
||||
);
|
||||
}
|
||||
if (authKey === 'oidc') {
|
||||
return (
|
||||
<LoginMainFooterLinksItem
|
||||
data-cy="social-auth-oidc"
|
||||
href={loginUrl}
|
||||
key={authKey}
|
||||
onClick={setSessionRedirect}
|
||||
>
|
||||
<Tooltip content={t`Sign in with OIDC`}>
|
||||
<UserCircleIcon size="lg" />
|
||||
</Tooltip>
|
||||
</LoginMainFooterLinksItem>
|
||||
);
|
||||
}
|
||||
if (authKey.startsWith('saml')) {
|
||||
const samlIDP = authKey.split(':')[1] || null;
|
||||
return (
|
||||
|
||||
@@ -172,7 +172,11 @@ function NotificationTemplateDetail({ template, defaultMessages }) {
|
||||
dataCy="nt-detail-timeout"
|
||||
/>
|
||||
{renderOptionsField && (
|
||||
<Detail label={t`Email Options`} value={renderOptions} />
|
||||
<Detail
|
||||
label={t`Email Options`}
|
||||
value={renderOptions}
|
||||
helpText={helpText.emailOptions}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -95,6 +95,9 @@ describe('<NotificationTemplateDetail />', () => {
|
||||
.find('Detail[label="Email Options"]')
|
||||
.containsAllMatchingElements([<li>Use SSL</li>, <li>Use TLS</li>])
|
||||
).toEqual(true);
|
||||
expect(
|
||||
wrapper.find('Detail[label="Email Options"]').prop('helpText')
|
||||
).toBeDefined();
|
||||
});
|
||||
|
||||
test('should render Details when defaultMessages is missing', async () => {
|
||||
@@ -118,5 +121,8 @@ describe('<NotificationTemplateDetail />', () => {
|
||||
.find('Detail[label="Email Options"]')
|
||||
.containsAllMatchingElements([<li>Use SSL</li>, <li>Use TLS</li>])
|
||||
).toEqual(true);
|
||||
expect(
|
||||
wrapper.find('Detail[label="Email Options"]').prop('helpText')
|
||||
).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user