mirror of
https://github.com/ansible/awx.git
synced 2026-02-11 14:44:44 -03:30
Compare commits
399 Commits
21.4.0
...
12640-Refa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d87c091eea | ||
|
|
63fd18edcb | ||
|
|
208254ab81 | ||
|
|
534763727f | ||
|
|
8333b0cf66 | ||
|
|
d1588b94b0 | ||
|
|
2dcc7ec749 | ||
|
|
2d756959d3 | ||
|
|
e6518a1d1c | ||
|
|
84d00722b9 | ||
|
|
a95a76ec56 | ||
|
|
420b3c8b84 | ||
|
|
5ba0bf3a64 | ||
|
|
7031753a6d | ||
|
|
6415671d93 | ||
|
|
e5fd42c4da | ||
|
|
0f675cd375 | ||
|
|
a85268f74a | ||
|
|
0983bd8dc0 | ||
|
|
87c65c9997 | ||
|
|
1b46805373 | ||
|
|
d48e31b928 | ||
|
|
ea51e137eb | ||
|
|
d9f5193a18 | ||
|
|
710b02a443 | ||
|
|
5b5aac675b | ||
|
|
6b0618b244 | ||
|
|
ceea0a0a39 | ||
|
|
6b86c450b1 | ||
|
|
1a696c4f25 | ||
|
|
34501fee24 | ||
|
|
5aa55d7347 | ||
|
|
65179d9cd0 | ||
|
|
42109fb45a | ||
|
|
ca46aec483 | ||
|
|
2e9956c9fc | ||
|
|
5648d9d96f | ||
|
|
2b2ddb68cf | ||
|
|
12e8608f98 | ||
|
|
eaad749cc9 | ||
|
|
4ffa577d05 | ||
|
|
7143777638 | ||
|
|
cc6eaa7f44 | ||
|
|
84fa19f2ad | ||
|
|
c101619d08 | ||
|
|
cdd2282282 | ||
|
|
6e57bc47aa | ||
|
|
a1a4f26f19 | ||
|
|
fb4a7373a1 | ||
|
|
9c2185c68f | ||
|
|
a66b27edff | ||
|
|
2dcb127d4e | ||
|
|
790998335c | ||
|
|
88f0ab0233 | ||
|
|
3ad7913353 | ||
|
|
795569227a | ||
|
|
93f50b5211 | ||
|
|
c53228daf5 | ||
|
|
5b7a359c91 | ||
|
|
01b41afa0f | ||
|
|
bf8ba63860 | ||
|
|
ba26909dc5 | ||
|
|
7d645c8ff6 | ||
|
|
b879cbc2ec | ||
|
|
af8b5243a3 | ||
|
|
4bf612851f | ||
|
|
ada0d45654 | ||
|
|
c153ac9d3b | ||
|
|
78cc9fb019 | ||
|
|
301807466d | ||
|
|
e0c9013d9c | ||
|
|
9c6aa93093 | ||
|
|
4a41098b24 | ||
|
|
0510978516 | ||
|
|
6009d98163 | ||
|
|
532ad777a3 | ||
|
|
b4edfc24ac | ||
|
|
0e578534fa | ||
|
|
6619cc39f7 | ||
|
|
d4b25058cd | ||
|
|
c1ba769b20 | ||
|
|
fd10d83893 | ||
|
|
b1168ce77d | ||
|
|
1fde9c4f0c | ||
|
|
03685e51b5 | ||
|
|
08c18d71bf | ||
|
|
dfe6ce1ba8 | ||
|
|
eaa4f2483f | ||
|
|
68a44529b6 | ||
|
|
25afb8477e | ||
|
|
f3a9d4db07 | ||
|
|
cb49eec2b5 | ||
|
|
3333080616 | ||
|
|
e2b9352dad | ||
|
|
da945eed93 | ||
|
|
ebd200380a | ||
|
|
1b650d6927 | ||
|
|
b6946c7e35 | ||
|
|
0b1891d82a | ||
|
|
3bc86ca8cb | ||
|
|
dba03616f4 | ||
|
|
a59aa44249 | ||
|
|
3b024a057f | ||
|
|
e1c33935fb | ||
|
|
8ebeeaf148 | ||
|
|
28f24c8811 | ||
|
|
89a6162dcd | ||
|
|
7e627e1d1e | ||
|
|
0465a10df5 | ||
|
|
5051224781 | ||
|
|
7956fc3c31 | ||
|
|
9b034ad574 | ||
|
|
4bf9925cf7 | ||
|
|
d2c63a9b36 | ||
|
|
5d3a19e542 | ||
|
|
e4518f7b13 | ||
|
|
350efc12f5 | ||
|
|
604fac2295 | ||
|
|
24bfacb654 | ||
|
|
3bcd539b3d | ||
|
|
81e68cb9bf | ||
|
|
a575f17db5 | ||
|
|
2fba3db48f | ||
|
|
ff6fb32297 | ||
|
|
4c64fb3323 | ||
|
|
1cfbc02d98 | ||
|
|
e231e08869 | ||
|
|
e069150fbf | ||
|
|
61093b2532 | ||
|
|
23f4f7bb00 | ||
|
|
816e491d17 | ||
|
|
dca27b59c9 | ||
|
|
7de5f77262 | ||
|
|
86e7151508 | ||
|
|
75597cf29c | ||
|
|
d07177be9c | ||
|
|
b38e08174a | ||
|
|
b501b30db4 | ||
|
|
64dad61b29 | ||
|
|
2369dc9621 | ||
|
|
ef90adb67e | ||
|
|
a528a78e0e | ||
|
|
ffe970aee5 | ||
|
|
4579ab0d60 | ||
|
|
efeeeefd4c | ||
|
|
c1b20a8ba7 | ||
|
|
2a30a9b10f | ||
|
|
34e8087aee | ||
|
|
ead56bfa1b | ||
|
|
d63c940e2f | ||
|
|
e05eaeccab | ||
|
|
e076f1ee2a | ||
|
|
68e11d2b81 | ||
|
|
697193d3d6 | ||
|
|
4f5596eb0c | ||
|
|
42a7866da9 | ||
|
|
809df74050 | ||
|
|
2e217ed466 | ||
|
|
d5d24e421b | ||
|
|
663ef2cc64 | ||
|
|
4e665ca77f | ||
|
|
33c0fb79d6 | ||
|
|
04d0e3915c | ||
|
|
a27680f7e9 | ||
|
|
4072b2786a | ||
|
|
d0b95c063b | ||
|
|
948d300f43 | ||
|
|
1b9326888e | ||
|
|
d67aef9d8e | ||
|
|
358024d029 | ||
|
|
9df447fe75 | ||
|
|
7e7991bb63 | ||
|
|
35e9d00beb | ||
|
|
461b5221f3 | ||
|
|
10d06f219d | ||
|
|
ecc4f46334 | ||
|
|
a227fea5ef | ||
|
|
3f4d0bc15d | ||
|
|
0812425671 | ||
|
|
94344c0214 | ||
|
|
16da9b784a | ||
|
|
1e952bab95 | ||
|
|
484db004db | ||
|
|
7465d7685f | ||
|
|
15fd5559a7 | ||
|
|
f0c125efb3 | ||
|
|
2d39b81e12 | ||
|
|
1044d34d98 | ||
|
|
63567fcc52 | ||
|
|
492ef6cf64 | ||
|
|
9041dc9dcd | ||
|
|
78973f845b | ||
|
|
cea8c16064 | ||
|
|
e7c97923a3 | ||
|
|
078c3ae6d8 | ||
|
|
1ab3dba476 | ||
|
|
15964dc395 | ||
|
|
b83b65da16 | ||
|
|
430f1986c7 | ||
|
|
c589f8776c | ||
|
|
82679ce9a3 | ||
|
|
6d2e28bfb0 | ||
|
|
7a4da5a8fa | ||
|
|
c475a7b6c0 | ||
|
|
32bb603554 | ||
|
|
8d71292d1a | ||
|
|
e896dc1aa7 | ||
|
|
f5a2246817 | ||
|
|
c467b6ea13 | ||
|
|
1636f6b196 | ||
|
|
5da528ffbb | ||
|
|
2e65ae49a5 | ||
|
|
d06bc815f8 | ||
|
|
0290784f9b | ||
|
|
1cc52afc42 | ||
|
|
88f7f987cd | ||
|
|
f512971991 | ||
|
|
53de245877 | ||
|
|
749622427c | ||
|
|
725d6fa896 | ||
|
|
a107bb684c | ||
|
|
ccbc8ce7de | ||
|
|
260e1d4f2d | ||
|
|
1afa49f3ff | ||
|
|
6f88ea1dc7 | ||
|
|
c59bbdecdb | ||
|
|
f9428c10b9 | ||
|
|
1ca054f43d | ||
|
|
374f76b527 | ||
|
|
f9dd5e0f1c | ||
|
|
bb7509498e | ||
|
|
8a06ffbe15 | ||
|
|
8ad948f268 | ||
|
|
73f808dee7 | ||
|
|
fecab52f86 | ||
|
|
609c67d85e | ||
|
|
0005d249c0 | ||
|
|
8828ea706e | ||
|
|
4070ef3f33 | ||
|
|
39f6e2fa32 | ||
|
|
1dfdff4a9e | ||
|
|
310e354164 | ||
|
|
dda2931e60 | ||
|
|
6d207d2490 | ||
|
|
01037fa561 | ||
|
|
61f3e5cbed | ||
|
|
44995e944a | ||
|
|
4a92fcfc62 | ||
|
|
d3f15f5784 | ||
|
|
2437a84b48 | ||
|
|
696f099940 | ||
|
|
3f0f538c40 | ||
|
|
66529d0f70 | ||
|
|
974f845059 | ||
|
|
f6b3413a11 | ||
|
|
b4ef687b60 | ||
|
|
2ef531b2dc | ||
|
|
125801ec5b | ||
|
|
691d9d7dc4 | ||
|
|
5ca898541f | ||
|
|
24821ff030 | ||
|
|
99815f8962 | ||
|
|
d752e6ce6d | ||
|
|
457dd890cb | ||
|
|
4fbf5e9e2f | ||
|
|
687b4ac71d | ||
|
|
a1b364f80c | ||
|
|
271938c5fc | ||
|
|
ff49cc5636 | ||
|
|
9946e644c8 | ||
|
|
1ed7a50755 | ||
|
|
9f3396d867 | ||
|
|
bcd018707a | ||
|
|
a462978433 | ||
|
|
6d11003975 | ||
|
|
017e474325 | ||
|
|
5d717af778 | ||
|
|
8d08ac559d | ||
|
|
4e24867a0b | ||
|
|
2b4b8839d1 | ||
|
|
dba33f9ef5 | ||
|
|
db2649d7ba | ||
|
|
edc3da85cc | ||
|
|
2357e24d1d | ||
|
|
e4d1056450 | ||
|
|
37d9c9eb1b | ||
|
|
d42a85714a | ||
|
|
88bf03c6bf | ||
|
|
4b8a56be39 | ||
|
|
2aa99234f4 | ||
|
|
bf9f1b1d56 | ||
|
|
704e4781d9 | ||
|
|
4a8613ce4c | ||
|
|
e87fabe6bb | ||
|
|
532aa83555 | ||
|
|
d87bb973d5 | ||
|
|
a72da3bd1a | ||
|
|
56df3f0c2a | ||
|
|
e0c59d12c1 | ||
|
|
7645cc2707 | ||
|
|
6719010050 | ||
|
|
ccd46a1c0f | ||
|
|
cc1e349ea8 | ||
|
|
e509d5f1de | ||
|
|
4fca27c664 | ||
|
|
51be22aebd | ||
|
|
54b21e5872 | ||
|
|
85beb9eb70 | ||
|
|
56739ac246 | ||
|
|
1ea3c564df | ||
|
|
621833ef0e | ||
|
|
16be38bb54 | ||
|
|
c5976e2584 | ||
|
|
3c51cb130f | ||
|
|
c649809eb2 | ||
|
|
43a53f41dd | ||
|
|
a3fef27002 | ||
|
|
cfc1255812 | ||
|
|
278db2cdde | ||
|
|
64157f7207 | ||
|
|
9e8ba6ca09 | ||
|
|
268ab128d7 | ||
|
|
fad5934c1e | ||
|
|
c9e3873a28 | ||
|
|
6a19aabd44 | ||
|
|
11e63e2e89 | ||
|
|
7c885dcadb | ||
|
|
b84a192bad | ||
|
|
35afb10add | ||
|
|
13fc845bcc | ||
|
|
f1bd1f1dfc | ||
|
|
67c9e1a0cb | ||
|
|
f6da9a5073 | ||
|
|
38a0950f46 | ||
|
|
55d295c2a6 | ||
|
|
be45919ee4 | ||
|
|
0a4a9f96c2 | ||
|
|
1ae1da3f9c | ||
|
|
cae2c06190 | ||
|
|
993dd61024 | ||
|
|
ea07aef73e | ||
|
|
268a4ad32d | ||
|
|
3712af4df8 | ||
|
|
8cf75fce8c | ||
|
|
46be2d9e5b | ||
|
|
998000bfbe | ||
|
|
43a50cc62c | ||
|
|
30f556f845 | ||
|
|
c5985c4c81 | ||
|
|
a9170236e1 | ||
|
|
85a5b58d18 | ||
|
|
6fb3c8daa8 | ||
|
|
a0103acbef | ||
|
|
f7e6a32444 | ||
|
|
7bbc256ff1 | ||
|
|
64f62d6755 | ||
|
|
b4cfe868fb | ||
|
|
8d8681580d | ||
|
|
8892cf2622 | ||
|
|
585d3f4e2a | ||
|
|
2c9a0444e6 | ||
|
|
279cebcef3 | ||
|
|
e6f8852b05 | ||
|
|
d06a3f060d | ||
|
|
957b2b7188 | ||
|
|
b94b3a1e91 | ||
|
|
7776a81e22 | ||
|
|
bf89093fac | ||
|
|
76d76d13b0 | ||
|
|
e603c23b40 | ||
|
|
8af4dd5988 | ||
|
|
0a47d05d26 | ||
|
|
b3eb9e0193 | ||
|
|
b26d2ab0e9 | ||
|
|
7eb0c7dd28 | ||
|
|
236c1df676 | ||
|
|
ff118f2177 | ||
|
|
29d91da1d2 | ||
|
|
ad08eafb9a | ||
|
|
431b9370df | ||
|
|
3e93eefe62 | ||
|
|
782667a34e | ||
|
|
90524611ea | ||
|
|
583086ae62 | ||
|
|
19c24cba10 | ||
|
|
5290c692c1 | ||
|
|
90a19057d5 | ||
|
|
a05c328081 | ||
|
|
6d9e353a4e | ||
|
|
82c062eab9 | ||
|
|
c0d59801d5 | ||
|
|
93ea8a0919 | ||
|
|
6d0d8e57a4 | ||
|
|
1fca505b61 | ||
|
|
a0e9c30b4a | ||
|
|
bc94dc0257 | ||
|
|
3aa8320fc7 | ||
|
|
a8e3c37bb9 | ||
|
|
29702400f1 |
@@ -1,3 +1,2 @@
|
|||||||
awx/ui/node_modules
|
|
||||||
Dockerfile
|
Dockerfile
|
||||||
.git
|
.git
|
||||||
|
|||||||
46
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
46
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -20,6 +20,19 @@ body:
|
|||||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: feature-type
|
||||||
|
attributes:
|
||||||
|
label: Feature type
|
||||||
|
description: >-
|
||||||
|
What kind of feature is this?
|
||||||
|
multiple: false
|
||||||
|
options:
|
||||||
|
- "New Feature"
|
||||||
|
- "Enhancement to Existing Feature"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: summary
|
id: summary
|
||||||
attributes:
|
attributes:
|
||||||
@@ -40,3 +53,36 @@ body:
|
|||||||
- label: CLI
|
- label: CLI
|
||||||
- label: Other
|
- label: Other
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: steps-to-reproduce
|
||||||
|
attributes:
|
||||||
|
label: Steps to reproduce
|
||||||
|
description: >-
|
||||||
|
Describe the necessary steps to understand the scenario of the requested enhancement.
|
||||||
|
Include all the steps that will help the developer and QE team understand what you are requesting.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: current-results
|
||||||
|
attributes:
|
||||||
|
label: Current results
|
||||||
|
description: What is currently happening on the scenario?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: sugested-results
|
||||||
|
attributes:
|
||||||
|
label: Sugested feature result
|
||||||
|
description: What is the result this new feature will bring?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-information
|
||||||
|
attributes:
|
||||||
|
label: Additional information
|
||||||
|
description: Please provide any other information you think is relevant that could help us understand your feature request.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
31
.github/workflows/label_issue.yml
vendored
31
.github/workflows/label_issue.yml
vendored
@@ -19,3 +19,34 @@ jobs:
|
|||||||
not-before: 2021-12-07T07:00:00Z
|
not-before: 2021-12-07T07:00:00Z
|
||||||
configuration-path: .github/issue_labeler.yml
|
configuration-path: .github/issue_labeler.yml
|
||||||
enable-versioned-regex: 0
|
enable-versioned-regex: 0
|
||||||
|
|
||||||
|
community:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Label Issue - Community
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
- name: Install python requests
|
||||||
|
run: pip install requests
|
||||||
|
- name: Check if user is a member of Ansible org
|
||||||
|
uses: jannekem/run-python-script-action@v1
|
||||||
|
id: check_user
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
import requests
|
||||||
|
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||||
|
response = requests.get('${{ fromJson(toJson(github.event.issue.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||||
|
is_member = False
|
||||||
|
for org in response.json():
|
||||||
|
if org['login'] == 'ansible':
|
||||||
|
is_member = True
|
||||||
|
if is_member:
|
||||||
|
print("User is member")
|
||||||
|
else:
|
||||||
|
print("User is community")
|
||||||
|
- name: Add community label if not a member
|
||||||
|
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||||
|
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||||
|
with:
|
||||||
|
add-labels: "community"
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
31
.github/workflows/label_pr.yml
vendored
31
.github/workflows/label_pr.yml
vendored
@@ -18,3 +18,34 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
configuration-path: .github/pr_labeler.yml
|
configuration-path: .github/pr_labeler.yml
|
||||||
|
|
||||||
|
community:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Label PR - Community
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
- name: Install python requests
|
||||||
|
run: pip install requests
|
||||||
|
- name: Check if user is a member of Ansible org
|
||||||
|
uses: jannekem/run-python-script-action@v1
|
||||||
|
id: check_user
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
import requests
|
||||||
|
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||||
|
response = requests.get('${{ fromJson(toJson(github.event.pull_request.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||||
|
is_member = False
|
||||||
|
for org in response.json():
|
||||||
|
if org['login'] == 'ansible':
|
||||||
|
is_member = True
|
||||||
|
if is_member:
|
||||||
|
print("User is member")
|
||||||
|
else:
|
||||||
|
print("User is community")
|
||||||
|
- name: Add community label if not a member
|
||||||
|
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||||
|
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||||
|
with:
|
||||||
|
add-labels: "community"
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
29
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
29
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: Dependency Pr Update
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [labeled, opened, reopened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pr-check:
|
||||||
|
name: Update Dependabot Prs
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout branch
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Update PR Body
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
OWNER: ${{ github.repository_owner }}
|
||||||
|
REPO: ${{ github.event.repository.name }}
|
||||||
|
PR: ${{github.event.pull_request.number}}
|
||||||
|
PR_BODY: ${{github.event.pull_request.body}}
|
||||||
|
run: |
|
||||||
|
gh pr checkout ${{ env.PR }}
|
||||||
|
echo "${{ env.PR_BODY }}" > my_pr_body.txt
|
||||||
|
echo "" >> my_pr_body.txt
|
||||||
|
echo "Bug, Docs Fix or other nominal change" >> my_pr_body.txt
|
||||||
|
gh pr edit ${{env.PR}} --body-file my_pr_body.txt
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -153,9 +153,6 @@ use_dev_supervisor.txt
|
|||||||
/sanity/
|
/sanity/
|
||||||
/awx_collection_build/
|
/awx_collection_build/
|
||||||
|
|
||||||
# Setup for metrics gathering
|
|
||||||
tools/prometheus/prometheus.yml
|
|
||||||
|
|
||||||
.idea/*
|
.idea/*
|
||||||
*.unison.tmp
|
*.unison.tmp
|
||||||
*.#
|
*.#
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ ignore: |
|
|||||||
awx/ui/test/e2e/tests/smoke-vars.yml
|
awx/ui/test/e2e/tests/smoke-vars.yml
|
||||||
awx/ui/node_modules
|
awx/ui/node_modules
|
||||||
tools/docker-compose/_sources
|
tools/docker-compose/_sources
|
||||||
|
# django template files
|
||||||
|
awx/api/templates/instance_install_bundle/**
|
||||||
|
|
||||||
extends: default
|
extends: default
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ recursive-include awx *.po
|
|||||||
recursive-include awx *.mo
|
recursive-include awx *.mo
|
||||||
recursive-include awx/static *
|
recursive-include awx/static *
|
||||||
recursive-include awx/templates *.html
|
recursive-include awx/templates *.html
|
||||||
recursive-include awx/api/templates *.md *.html
|
recursive-include awx/api/templates *.md *.html *.yml
|
||||||
recursive-include awx/ui/build *.html
|
recursive-include awx/ui/build *.html
|
||||||
recursive-include awx/ui/build *
|
recursive-include awx/ui/build *
|
||||||
recursive-include awx/playbooks *.yml
|
recursive-include awx/playbooks *.yml
|
||||||
|
|||||||
99
Makefile
99
Makefile
@@ -72,7 +72,7 @@ clean-languages:
|
|||||||
rm -f $(I18N_FLAG_FILE)
|
rm -f $(I18N_FLAG_FILE)
|
||||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||||
|
|
||||||
# Remove temporary build files, compiled Python files.
|
## Remove temporary build files, compiled Python files.
|
||||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||||
rm -rf awx/public
|
rm -rf awx/public
|
||||||
rm -rf awx/lib/site-packages
|
rm -rf awx/lib/site-packages
|
||||||
@@ -94,7 +94,7 @@ clean-api:
|
|||||||
clean-awxkit:
|
clean-awxkit:
|
||||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||||
|
|
||||||
# convenience target to assert environment variables are defined
|
## convenience target to assert environment variables are defined
|
||||||
guard-%:
|
guard-%:
|
||||||
@if [ "$${$*}" = "" ]; then \
|
@if [ "$${$*}" = "" ]; then \
|
||||||
echo "The required environment variable '$*' is not set"; \
|
echo "The required environment variable '$*' is not set"; \
|
||||||
@@ -117,7 +117,7 @@ virtualenv_awx:
|
|||||||
fi; \
|
fi; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install third-party requirements needed for AWX's environment.
|
## Install third-party requirements needed for AWX's environment.
|
||||||
# this does not use system site packages intentionally
|
# this does not use system site packages intentionally
|
||||||
requirements_awx: virtualenv_awx
|
requirements_awx: virtualenv_awx
|
||||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||||
@@ -136,7 +136,7 @@ requirements_dev: requirements_awx requirements_awx_dev
|
|||||||
|
|
||||||
requirements_test: requirements
|
requirements_test: requirements
|
||||||
|
|
||||||
# "Install" awx package in development mode.
|
## "Install" awx package in development mode.
|
||||||
develop:
|
develop:
|
||||||
@if [ "$(VIRTUAL_ENV)" ]; then \
|
@if [ "$(VIRTUAL_ENV)" ]; then \
|
||||||
pip uninstall -y awx; \
|
pip uninstall -y awx; \
|
||||||
@@ -153,21 +153,21 @@ version_file:
|
|||||||
fi; \
|
fi; \
|
||||||
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
|
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
|
||||||
|
|
||||||
# Refresh development environment after pulling new code.
|
## Refresh development environment after pulling new code.
|
||||||
refresh: clean requirements_dev version_file develop migrate
|
refresh: clean requirements_dev version_file develop migrate
|
||||||
|
|
||||||
# Create Django superuser.
|
## Create Django superuser.
|
||||||
adduser:
|
adduser:
|
||||||
$(MANAGEMENT_COMMAND) createsuperuser
|
$(MANAGEMENT_COMMAND) createsuperuser
|
||||||
|
|
||||||
# Create database tables and apply any new migrations.
|
## Create database tables and apply any new migrations.
|
||||||
migrate:
|
migrate:
|
||||||
if [ "$(VENV_BASE)" ]; then \
|
if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
$(MANAGEMENT_COMMAND) migrate --noinput
|
$(MANAGEMENT_COMMAND) migrate --noinput
|
||||||
|
|
||||||
# Run after making changes to the models to create a new migration.
|
## Run after making changes to the models to create a new migration.
|
||||||
dbchange:
|
dbchange:
|
||||||
$(MANAGEMENT_COMMAND) makemigrations
|
$(MANAGEMENT_COMMAND) makemigrations
|
||||||
|
|
||||||
@@ -218,7 +218,7 @@ wsbroadcast:
|
|||||||
fi; \
|
fi; \
|
||||||
$(PYTHON) manage.py run_wsbroadcast
|
$(PYTHON) manage.py run_wsbroadcast
|
||||||
|
|
||||||
# Run to start the background task dispatcher for development.
|
## Run to start the background task dispatcher for development.
|
||||||
dispatcher:
|
dispatcher:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -226,7 +226,7 @@ dispatcher:
|
|||||||
$(PYTHON) manage.py run_dispatcher
|
$(PYTHON) manage.py run_dispatcher
|
||||||
|
|
||||||
|
|
||||||
# Run to start the zeromq callback receiver
|
## Run to start the zeromq callback receiver
|
||||||
receiver:
|
receiver:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -278,7 +278,7 @@ awx-link:
|
|||||||
|
|
||||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||||
PYTEST_ARGS ?= -n auto
|
PYTEST_ARGS ?= -n auto
|
||||||
# Run all API unit tests.
|
## Run all API unit tests.
|
||||||
test:
|
test:
|
||||||
if [ "$(VENV_BASE)" ]; then \
|
if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -341,23 +341,24 @@ test_unit:
|
|||||||
fi; \
|
fi; \
|
||||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||||
|
|
||||||
# Run all API unit tests with coverage enabled.
|
## Run all API unit tests with coverage enabled.
|
||||||
test_coverage:
|
test_coverage:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS)
|
py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS)
|
||||||
|
|
||||||
# Output test coverage as HTML (into htmlcov directory).
|
## Output test coverage as HTML (into htmlcov directory).
|
||||||
coverage_html:
|
coverage_html:
|
||||||
coverage html
|
coverage html
|
||||||
|
|
||||||
# Run API unit tests across multiple Python/Django versions with Tox.
|
## Run API unit tests across multiple Python/Django versions with Tox.
|
||||||
test_tox:
|
test_tox:
|
||||||
tox -v
|
tox -v
|
||||||
|
|
||||||
# Make fake data
|
|
||||||
DATA_GEN_PRESET = ""
|
DATA_GEN_PRESET = ""
|
||||||
|
## Make fake data
|
||||||
bulk_data:
|
bulk_data:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -378,9 +379,10 @@ clean-ui:
|
|||||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||||
|
|
||||||
awx/ui/node_modules:
|
awx/ui/node_modules:
|
||||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
|
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci
|
||||||
|
|
||||||
$(UI_BUILD_FLAG_FILE): awx/ui/node_modules
|
$(UI_BUILD_FLAG_FILE):
|
||||||
|
$(MAKE) awx/ui/node_modules
|
||||||
$(PYTHON) tools/scripts/compilemessages.py
|
$(PYTHON) tools/scripts/compilemessages.py
|
||||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||||
@@ -451,6 +453,11 @@ COMPOSE_OPTS ?=
|
|||||||
CONTROL_PLANE_NODE_COUNT ?= 1
|
CONTROL_PLANE_NODE_COUNT ?= 1
|
||||||
EXECUTION_NODE_COUNT ?= 2
|
EXECUTION_NODE_COUNT ?= 2
|
||||||
MINIKUBE_CONTAINER_GROUP ?= false
|
MINIKUBE_CONTAINER_GROUP ?= false
|
||||||
|
EXTRA_SOURCES_ANSIBLE_OPTS ?=
|
||||||
|
|
||||||
|
ifneq ($(ADMIN_PASSWORD),)
|
||||||
|
EXTRA_SOURCES_ANSIBLE_OPTS := -e admin_password=$(ADMIN_PASSWORD) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||||
|
endif
|
||||||
|
|
||||||
docker-compose-sources: .git/hooks/pre-commit
|
docker-compose-sources: .git/hooks/pre-commit
|
||||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||||
@@ -468,7 +475,8 @@ docker-compose-sources: .git/hooks/pre-commit
|
|||||||
-e enable_ldap=$(LDAP) \
|
-e enable_ldap=$(LDAP) \
|
||||||
-e enable_splunk=$(SPLUNK) \
|
-e enable_splunk=$(SPLUNK) \
|
||||||
-e enable_prometheus=$(PROMETHEUS) \
|
-e enable_prometheus=$(PROMETHEUS) \
|
||||||
-e enable_grafana=$(GRAFANA)
|
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
docker-compose: awx/projects docker-compose-sources
|
docker-compose: awx/projects docker-compose-sources
|
||||||
@@ -502,7 +510,7 @@ docker-compose-container-group-clean:
|
|||||||
fi
|
fi
|
||||||
rm -rf tools/docker-compose-minikube/_sources/
|
rm -rf tools/docker-compose-minikube/_sources/
|
||||||
|
|
||||||
# Base development image build
|
## Base development image build
|
||||||
docker-compose-build:
|
docker-compose-build:
|
||||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||||
@@ -520,7 +528,7 @@ docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
|||||||
|
|
||||||
docker-refresh: docker-clean docker-compose
|
docker-refresh: docker-clean docker-compose
|
||||||
|
|
||||||
# Docker Development Environment with Elastic Stack Connected
|
## Docker Development Environment with Elastic Stack Connected
|
||||||
docker-compose-elk: awx/projects docker-compose-sources
|
docker-compose-elk: awx/projects docker-compose-sources
|
||||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||||
|
|
||||||
@@ -557,26 +565,34 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
|||||||
-e template_dest=_build_kube_dev \
|
-e template_dest=_build_kube_dev \
|
||||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||||
|
|
||||||
|
## Build awx_kube_devel image for development on local Kubernetes environment.
|
||||||
awx-kube-dev-build: Dockerfile.kube-dev
|
awx-kube-dev-build: Dockerfile.kube-dev
|
||||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
||||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
|
## Build awx image for deployment on Kubernetes environment.
|
||||||
|
awx-kube-build: Dockerfile
|
||||||
|
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||||
|
--build-arg VERSION=$(VERSION) \
|
||||||
|
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||||
|
--build-arg HEADLESS=$(HEADLESS) \
|
||||||
|
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
# Translation TASKS
|
# Translation TASKS
|
||||||
# --------------------------------------
|
# --------------------------------------
|
||||||
|
|
||||||
# generate UI .pot file, an empty template of strings yet to be translated
|
## generate UI .pot file, an empty template of strings yet to be translated
|
||||||
pot: $(UI_BUILD_FLAG_FILE)
|
pot: $(UI_BUILD_FLAG_FILE)
|
||||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||||
|
|
||||||
# generate UI .po files for each locale (will update translated strings for `en`)
|
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||||
po: $(UI_BUILD_FLAG_FILE)
|
po: $(UI_BUILD_FLAG_FILE)
|
||||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||||
|
|
||||||
# generate API django .pot .po
|
LANG = "en_us"
|
||||||
LANG = "en-us"
|
## generate API django .pot .po
|
||||||
messages:
|
messages:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
@@ -585,3 +601,38 @@ messages:
|
|||||||
|
|
||||||
print-%:
|
print-%:
|
||||||
@echo $($*)
|
@echo $($*)
|
||||||
|
|
||||||
|
# HELP related targets
|
||||||
|
# --------------------------------------
|
||||||
|
|
||||||
|
HELP_FILTER=.PHONY
|
||||||
|
|
||||||
|
## Display help targets
|
||||||
|
help:
|
||||||
|
@printf "Available targets:\n"
|
||||||
|
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||||
|
|
||||||
|
## Display help for all targets
|
||||||
|
help/all:
|
||||||
|
@printf "Available targets:\n"
|
||||||
|
@make -s help/generate
|
||||||
|
|
||||||
|
## Generate help output from MAKEFILE_LIST
|
||||||
|
help/generate:
|
||||||
|
@awk '/^[-a-zA-Z_0-9%:\\\.\/]+:/ { \
|
||||||
|
helpMessage = match(lastLine, /^## (.*)/); \
|
||||||
|
if (helpMessage) { \
|
||||||
|
helpCommand = $$1; \
|
||||||
|
helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
|
||||||
|
gsub("\\\\", "", helpCommand); \
|
||||||
|
gsub(":+$$", "", helpCommand); \
|
||||||
|
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, helpMessage; \
|
||||||
|
} else { \
|
||||||
|
helpCommand = $$1; \
|
||||||
|
gsub("\\\\", "", helpCommand); \
|
||||||
|
gsub(":+$$", "", helpCommand); \
|
||||||
|
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, "No help available"; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||||
|
@printf "\n"
|
||||||
@@ -190,7 +190,7 @@ def manage():
|
|||||||
sys.stdout.write('%s\n' % __version__)
|
sys.stdout.write('%s\n' % __version__)
|
||||||
# If running as a user without permission to read settings, display an
|
# If running as a user without permission to read settings, display an
|
||||||
# error message. Allow --help to still work.
|
# error message. Allow --help to still work.
|
||||||
elif settings.SECRET_KEY == 'permission-denied':
|
elif not os.getenv('SKIP_SECRET_KEY_CHECK', False) and settings.SECRET_KEY == 'permission-denied':
|
||||||
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
|
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
|
||||||
execute_from_command_line(sys.argv)
|
execute_from_command_line(sys.argv)
|
||||||
sys.stdout.write('\n')
|
sys.stdout.write('\n')
|
||||||
|
|||||||
@@ -157,7 +157,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
|||||||
|
|
||||||
# A list of fields that we know can be filtered on without the possiblity
|
# A list of fields that we know can be filtered on without the possiblity
|
||||||
# of introducing duplicates
|
# of introducing duplicates
|
||||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField)
|
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||||
|
|
||||||
def get_fields_from_lookup(self, model, lookup):
|
def get_fields_from_lookup(self, model, lookup):
|
||||||
|
|
||||||
|
|||||||
@@ -63,7 +63,6 @@ __all__ = [
|
|||||||
'SubDetailAPIView',
|
'SubDetailAPIView',
|
||||||
'ResourceAccessList',
|
'ResourceAccessList',
|
||||||
'ParentMixin',
|
'ParentMixin',
|
||||||
'DeleteLastUnattachLabelMixin',
|
|
||||||
'SubListAttachDetachAPIView',
|
'SubListAttachDetachAPIView',
|
||||||
'CopyAPIView',
|
'CopyAPIView',
|
||||||
'BaseUsersList',
|
'BaseUsersList',
|
||||||
@@ -98,7 +97,6 @@ class LoggedLoginView(auth_views.LoginView):
|
|||||||
current_user = UserSerializer(self.request.user)
|
current_user = UserSerializer(self.request.user)
|
||||||
current_user = smart_str(JSONRenderer().render(current_user.data))
|
current_user = smart_str(JSONRenderer().render(current_user.data))
|
||||||
current_user = urllib.parse.quote('%s' % current_user, '')
|
current_user = urllib.parse.quote('%s' % current_user, '')
|
||||||
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
|
|
||||||
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
@@ -775,28 +773,6 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
|
|||||||
return {'id': None}
|
return {'id': None}
|
||||||
|
|
||||||
|
|
||||||
class DeleteLastUnattachLabelMixin(object):
|
|
||||||
"""
|
|
||||||
Models for which you want the last instance to be deleted from the database
|
|
||||||
when the last disassociate is called should inherit from this class. Further,
|
|
||||||
the model should implement is_detached()
|
|
||||||
"""
|
|
||||||
|
|
||||||
def unattach(self, request, *args, **kwargs):
|
|
||||||
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
|
|
||||||
if res:
|
|
||||||
return res
|
|
||||||
|
|
||||||
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
|
|
||||||
|
|
||||||
obj = self.model.objects.get(id=sub_id)
|
|
||||||
|
|
||||||
if obj.is_detached():
|
|
||||||
obj.delete()
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
|
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -154,6 +154,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
|||||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
||||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||||
|
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
|
||||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||||
@@ -614,7 +615,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
|||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
attrs = super(BaseSerializer, self).validate(attrs)
|
attrs = super(BaseSerializer, self).validate(attrs)
|
||||||
try:
|
try:
|
||||||
# Create/update a model instance and run it's full_clean() method to
|
# Create/update a model instance and run its full_clean() method to
|
||||||
# do any validation implemented on the model class.
|
# do any validation implemented on the model class.
|
||||||
exclusions = self.get_validation_exclusions(self.instance)
|
exclusions = self.get_validation_exclusions(self.instance)
|
||||||
obj = self.instance or self.Meta.model()
|
obj = self.instance or self.Meta.model()
|
||||||
@@ -1470,6 +1471,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
|||||||
'allow_override',
|
'allow_override',
|
||||||
'custom_virtualenv',
|
'custom_virtualenv',
|
||||||
'default_environment',
|
'default_environment',
|
||||||
|
'signature_validation_credential',
|
||||||
) + (
|
) + (
|
||||||
'last_update_failed',
|
'last_update_failed',
|
||||||
'last_updated',
|
'last_updated',
|
||||||
@@ -1678,6 +1680,7 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
|||||||
'total_inventory_sources',
|
'total_inventory_sources',
|
||||||
'inventory_sources_with_failures',
|
'inventory_sources_with_failures',
|
||||||
'pending_deletion',
|
'pending_deletion',
|
||||||
|
'prevent_instance_group_fallback',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -2230,6 +2233,7 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
|||||||
'source_project_update',
|
'source_project_update',
|
||||||
'custom_virtualenv',
|
'custom_virtualenv',
|
||||||
'instance_group',
|
'instance_group',
|
||||||
|
'scm_revision',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -2920,6 +2924,12 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
|||||||
'ask_verbosity_on_launch',
|
'ask_verbosity_on_launch',
|
||||||
'ask_inventory_on_launch',
|
'ask_inventory_on_launch',
|
||||||
'ask_credential_on_launch',
|
'ask_credential_on_launch',
|
||||||
|
'ask_execution_environment_on_launch',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'ask_forks_on_launch',
|
||||||
|
'ask_job_slice_count_on_launch',
|
||||||
|
'ask_timeout_on_launch',
|
||||||
|
'ask_instance_groups_on_launch',
|
||||||
'survey_enabled',
|
'survey_enabled',
|
||||||
'become_enabled',
|
'become_enabled',
|
||||||
'diff_mode',
|
'diff_mode',
|
||||||
@@ -2928,6 +2938,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
|||||||
'job_slice_count',
|
'job_slice_count',
|
||||||
'webhook_service',
|
'webhook_service',
|
||||||
'webhook_credential',
|
'webhook_credential',
|
||||||
|
'prevent_instance_group_fallback',
|
||||||
)
|
)
|
||||||
read_only_fields = ('*', 'custom_virtualenv')
|
read_only_fields = ('*', 'custom_virtualenv')
|
||||||
|
|
||||||
@@ -3182,7 +3193,7 @@ class JobRelaunchSerializer(BaseSerializer):
|
|||||||
return attrs
|
return attrs
|
||||||
|
|
||||||
|
|
||||||
class JobCreateScheduleSerializer(BaseSerializer):
|
class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer):
|
||||||
|
|
||||||
can_schedule = serializers.SerializerMethodField()
|
can_schedule = serializers.SerializerMethodField()
|
||||||
prompts = serializers.SerializerMethodField()
|
prompts = serializers.SerializerMethodField()
|
||||||
@@ -3208,14 +3219,17 @@ class JobCreateScheduleSerializer(BaseSerializer):
|
|||||||
try:
|
try:
|
||||||
config = obj.launch_config
|
config = obj.launch_config
|
||||||
ret = config.prompts_dict(display=True)
|
ret = config.prompts_dict(display=True)
|
||||||
if 'inventory' in ret:
|
for field_name in ('inventory', 'execution_environment'):
|
||||||
ret['inventory'] = self._summarize('inventory', ret['inventory'])
|
if field_name in ret:
|
||||||
if 'credentials' in ret:
|
ret[field_name] = self._summarize(field_name, ret[field_name])
|
||||||
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
|
for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')):
|
||||||
ret['credentials'] = all_creds
|
if field_name in ret:
|
||||||
|
ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]]
|
||||||
|
if 'labels' in ret:
|
||||||
|
ret['labels'] = self._summary_field_labels(config)
|
||||||
return ret
|
return ret
|
||||||
except JobLaunchConfig.DoesNotExist:
|
except JobLaunchConfig.DoesNotExist:
|
||||||
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
|
return {'all': _('Unknown, job may have been run before launch configurations were saved.')}
|
||||||
|
|
||||||
|
|
||||||
class AdHocCommandSerializer(UnifiedJobSerializer):
|
class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||||
@@ -3385,6 +3399,9 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
|||||||
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
|
||||||
|
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = WorkflowJobTemplate
|
model = WorkflowJobTemplate
|
||||||
fields = (
|
fields = (
|
||||||
@@ -3403,6 +3420,11 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
|||||||
'webhook_service',
|
'webhook_service',
|
||||||
'webhook_credential',
|
'webhook_credential',
|
||||||
'-execution_environment',
|
'-execution_environment',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'ask_skip_tags_on_launch',
|
||||||
|
'ask_tags_on_launch',
|
||||||
|
'skip_tags',
|
||||||
|
'job_tags',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -3446,7 +3468,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
|||||||
|
|
||||||
# process char_prompts, these are not direct fields on the model
|
# process char_prompts, these are not direct fields on the model
|
||||||
mock_obj = self.Meta.model()
|
mock_obj = self.Meta.model()
|
||||||
for field_name in ('scm_branch', 'limit'):
|
for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'):
|
||||||
if field_name in attrs:
|
if field_name in attrs:
|
||||||
setattr(mock_obj, field_name, attrs[field_name])
|
setattr(mock_obj, field_name, attrs[field_name])
|
||||||
attrs.pop(field_name)
|
attrs.pop(field_name)
|
||||||
@@ -3472,6 +3494,9 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
|||||||
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
|
||||||
|
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = WorkflowJob
|
model = WorkflowJob
|
||||||
fields = (
|
fields = (
|
||||||
@@ -3491,6 +3516,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
|||||||
'webhook_service',
|
'webhook_service',
|
||||||
'webhook_credential',
|
'webhook_credential',
|
||||||
'webhook_guid',
|
'webhook_guid',
|
||||||
|
'skip_tags',
|
||||||
|
'job_tags',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -3607,6 +3634,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
|||||||
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||||
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
|
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
|
||||||
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
|
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
|
||||||
|
forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
|
||||||
|
job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
|
||||||
|
timeout = serializers.IntegerField(required=False, allow_null=True, default=None)
|
||||||
exclude_errors = ()
|
exclude_errors = ()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
@@ -3622,13 +3652,21 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
|||||||
'skip_tags',
|
'skip_tags',
|
||||||
'diff_mode',
|
'diff_mode',
|
||||||
'verbosity',
|
'verbosity',
|
||||||
|
'execution_environment',
|
||||||
|
'forks',
|
||||||
|
'job_slice_count',
|
||||||
|
'timeout',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
|
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
|
||||||
if obj.inventory_id:
|
if obj.inventory_id:
|
||||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
|
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
|
||||||
|
if obj.execution_environment_id:
|
||||||
|
res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id})
|
||||||
|
res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||||
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||||
|
res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def _build_mock_obj(self, attrs):
|
def _build_mock_obj(self, attrs):
|
||||||
@@ -4080,7 +4118,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
|||||||
|
|
||||||
|
|
||||||
class JobLaunchSerializer(BaseSerializer):
|
class JobLaunchSerializer(BaseSerializer):
|
||||||
|
|
||||||
# Representational fields
|
# Representational fields
|
||||||
passwords_needed_to_start = serializers.ReadOnlyField()
|
passwords_needed_to_start = serializers.ReadOnlyField()
|
||||||
can_start_without_user_input = serializers.BooleanField(read_only=True)
|
can_start_without_user_input = serializers.BooleanField(read_only=True)
|
||||||
@@ -4103,6 +4140,12 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||||
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||||
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
|
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
|
||||||
|
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
|
||||||
|
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||||
|
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||||
|
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||||
|
timeout = serializers.IntegerField(required=False, write_only=True)
|
||||||
|
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = JobTemplate
|
model = JobTemplate
|
||||||
@@ -4130,6 +4173,12 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
'ask_verbosity_on_launch',
|
'ask_verbosity_on_launch',
|
||||||
'ask_inventory_on_launch',
|
'ask_inventory_on_launch',
|
||||||
'ask_credential_on_launch',
|
'ask_credential_on_launch',
|
||||||
|
'ask_execution_environment_on_launch',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'ask_forks_on_launch',
|
||||||
|
'ask_job_slice_count_on_launch',
|
||||||
|
'ask_timeout_on_launch',
|
||||||
|
'ask_instance_groups_on_launch',
|
||||||
'survey_enabled',
|
'survey_enabled',
|
||||||
'variables_needed_to_start',
|
'variables_needed_to_start',
|
||||||
'credential_needed_to_start',
|
'credential_needed_to_start',
|
||||||
@@ -4137,6 +4186,12 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
'job_template_data',
|
'job_template_data',
|
||||||
'defaults',
|
'defaults',
|
||||||
'verbosity',
|
'verbosity',
|
||||||
|
'execution_environment',
|
||||||
|
'labels',
|
||||||
|
'forks',
|
||||||
|
'job_slice_count',
|
||||||
|
'timeout',
|
||||||
|
'instance_groups',
|
||||||
)
|
)
|
||||||
read_only_fields = (
|
read_only_fields = (
|
||||||
'ask_scm_branch_on_launch',
|
'ask_scm_branch_on_launch',
|
||||||
@@ -4149,6 +4204,12 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
'ask_verbosity_on_launch',
|
'ask_verbosity_on_launch',
|
||||||
'ask_inventory_on_launch',
|
'ask_inventory_on_launch',
|
||||||
'ask_credential_on_launch',
|
'ask_credential_on_launch',
|
||||||
|
'ask_execution_environment_on_launch',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'ask_forks_on_launch',
|
||||||
|
'ask_job_slice_count_on_launch',
|
||||||
|
'ask_timeout_on_launch',
|
||||||
|
'ask_instance_groups_on_launch',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_credential_needed_to_start(self, obj):
|
def get_credential_needed_to_start(self, obj):
|
||||||
@@ -4173,6 +4234,17 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
||||||
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
||||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||||
|
elif field_name == 'execution_environment':
|
||||||
|
if obj.execution_environment_id:
|
||||||
|
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
|
||||||
|
else:
|
||||||
|
defaults_dict[field_name] = {}
|
||||||
|
elif field_name == 'labels':
|
||||||
|
for label in obj.labels.all():
|
||||||
|
label_dict = {'id': label.id, 'name': label.name}
|
||||||
|
defaults_dict.setdefault(field_name, []).append(label_dict)
|
||||||
|
elif field_name == 'instance_groups':
|
||||||
|
defaults_dict[field_name] = []
|
||||||
else:
|
else:
|
||||||
defaults_dict[field_name] = getattr(obj, field_name)
|
defaults_dict[field_name] = getattr(obj, field_name)
|
||||||
return defaults_dict
|
return defaults_dict
|
||||||
@@ -4195,6 +4267,15 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
elif template.project.status in ('error', 'failed'):
|
elif template.project.status in ('error', 'failed'):
|
||||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||||
|
|
||||||
|
latest_update = template.project.project_updates.last()
|
||||||
|
if latest_update is not None and latest_update.failed:
|
||||||
|
failed_validation_tasks = latest_update.project_update_events.filter(
|
||||||
|
event='runner_on_failed',
|
||||||
|
play="Perform project signature/checksum verification",
|
||||||
|
)
|
||||||
|
if failed_validation_tasks:
|
||||||
|
errors['playbook'] = _("Last project update failed due to signature validation failure.")
|
||||||
|
|
||||||
# cannot run a playbook without an inventory
|
# cannot run a playbook without an inventory
|
||||||
if template.inventory and template.inventory.pending_deletion is True:
|
if template.inventory and template.inventory.pending_deletion is True:
|
||||||
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
|
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
|
||||||
@@ -4271,6 +4352,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
|||||||
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||||
workflow_job_template_data = serializers.SerializerMethodField()
|
workflow_job_template_data = serializers.SerializerMethodField()
|
||||||
|
|
||||||
|
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||||
|
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||||
|
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = WorkflowJobTemplate
|
model = WorkflowJobTemplate
|
||||||
fields = (
|
fields = (
|
||||||
@@ -4290,8 +4375,22 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
|||||||
'workflow_job_template_data',
|
'workflow_job_template_data',
|
||||||
'survey_enabled',
|
'survey_enabled',
|
||||||
'ask_variables_on_launch',
|
'ask_variables_on_launch',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'labels',
|
||||||
|
'ask_skip_tags_on_launch',
|
||||||
|
'ask_tags_on_launch',
|
||||||
|
'skip_tags',
|
||||||
|
'job_tags',
|
||||||
|
)
|
||||||
|
read_only_fields = (
|
||||||
|
'ask_inventory_on_launch',
|
||||||
|
'ask_variables_on_launch',
|
||||||
|
'ask_skip_tags_on_launch',
|
||||||
|
'ask_labels_on_launch',
|
||||||
|
'ask_limit_on_launch',
|
||||||
|
'ask_scm_branch_on_launch',
|
||||||
|
'ask_tags_on_launch',
|
||||||
)
|
)
|
||||||
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
|
|
||||||
|
|
||||||
def get_survey_enabled(self, obj):
|
def get_survey_enabled(self, obj):
|
||||||
if obj:
|
if obj:
|
||||||
@@ -4299,10 +4398,15 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def get_defaults(self, obj):
|
def get_defaults(self, obj):
|
||||||
|
|
||||||
defaults_dict = {}
|
defaults_dict = {}
|
||||||
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
|
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
|
||||||
if field_name == 'inventory':
|
if field_name == 'inventory':
|
||||||
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
|
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
|
||||||
|
elif field_name == 'labels':
|
||||||
|
for label in obj.labels.all():
|
||||||
|
label_dict = {"id": label.id, "name": label.name}
|
||||||
|
defaults_dict.setdefault(field_name, []).append(label_dict)
|
||||||
else:
|
else:
|
||||||
defaults_dict[field_name] = getattr(obj, field_name)
|
defaults_dict[field_name] = getattr(obj, field_name)
|
||||||
return defaults_dict
|
return defaults_dict
|
||||||
@@ -4311,6 +4415,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
|||||||
return dict(name=obj.name, id=obj.id, description=obj.description)
|
return dict(name=obj.name, id=obj.id, description=obj.description)
|
||||||
|
|
||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
|
|
||||||
template = self.instance
|
template = self.instance
|
||||||
|
|
||||||
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
|
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
|
||||||
@@ -4328,6 +4433,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
|||||||
WFJT_inventory = template.inventory
|
WFJT_inventory = template.inventory
|
||||||
WFJT_limit = template.limit
|
WFJT_limit = template.limit
|
||||||
WFJT_scm_branch = template.scm_branch
|
WFJT_scm_branch = template.scm_branch
|
||||||
|
|
||||||
super(WorkflowJobLaunchSerializer, self).validate(attrs)
|
super(WorkflowJobLaunchSerializer, self).validate(attrs)
|
||||||
template.extra_vars = WFJT_extra_vars
|
template.extra_vars = WFJT_extra_vars
|
||||||
template.inventory = WFJT_inventory
|
template.inventory = WFJT_inventory
|
||||||
@@ -4719,6 +4825,8 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
|||||||
if isinstance(obj.unified_job_template, SystemJobTemplate):
|
if isinstance(obj.unified_job_template, SystemJobTemplate):
|
||||||
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
|
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
|
||||||
|
|
||||||
|
# We are not showing instance groups on summary fields because JTs don't either
|
||||||
|
|
||||||
if 'inventory' in summary_fields:
|
if 'inventory' in summary_fields:
|
||||||
return summary_fields
|
return summary_fields
|
||||||
|
|
||||||
@@ -4753,7 +4861,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
|||||||
class InstanceLinkSerializer(BaseSerializer):
|
class InstanceLinkSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = InstanceLink
|
model = InstanceLink
|
||||||
fields = ('source', 'target')
|
fields = ('source', 'target', 'link_state')
|
||||||
|
|
||||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||||
@@ -4762,63 +4870,80 @@ class InstanceLinkSerializer(BaseSerializer):
|
|||||||
class InstanceNodeSerializer(BaseSerializer):
|
class InstanceNodeSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Instance
|
model = Instance
|
||||||
fields = ('id', 'hostname', 'node_type', 'node_state')
|
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
||||||
|
|
||||||
node_state = serializers.SerializerMethodField()
|
|
||||||
|
|
||||||
def get_node_state(self, obj):
|
|
||||||
if not obj.enabled:
|
|
||||||
return "disabled"
|
|
||||||
return "error" if obj.errors else "healthy"
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceSerializer(BaseSerializer):
|
class InstanceSerializer(BaseSerializer):
|
||||||
|
show_capabilities = ['edit']
|
||||||
|
|
||||||
consumed_capacity = serializers.SerializerMethodField()
|
consumed_capacity = serializers.SerializerMethodField()
|
||||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance'), read_only=True)
|
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||||
|
health_check_pending = serializers.SerializerMethodField()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Instance
|
model = Instance
|
||||||
read_only_fields = ('uuid', 'hostname', 'version', 'node_type')
|
read_only_fields = ('ip_address', 'uuid', 'version')
|
||||||
fields = (
|
fields = (
|
||||||
"id",
|
'id',
|
||||||
"type",
|
'hostname',
|
||||||
"url",
|
'type',
|
||||||
"related",
|
'url',
|
||||||
"uuid",
|
'related',
|
||||||
"hostname",
|
'summary_fields',
|
||||||
"created",
|
'uuid',
|
||||||
"modified",
|
'created',
|
||||||
"last_seen",
|
'modified',
|
||||||
"last_health_check",
|
'last_seen',
|
||||||
"errors",
|
'health_check_started',
|
||||||
|
'health_check_pending',
|
||||||
|
'last_health_check',
|
||||||
|
'errors',
|
||||||
'capacity_adjustment',
|
'capacity_adjustment',
|
||||||
"version",
|
'version',
|
||||||
"capacity",
|
'capacity',
|
||||||
"consumed_capacity",
|
'consumed_capacity',
|
||||||
"percent_capacity_remaining",
|
'percent_capacity_remaining',
|
||||||
"jobs_running",
|
'jobs_running',
|
||||||
"jobs_total",
|
'jobs_total',
|
||||||
"cpu",
|
'cpu',
|
||||||
"memory",
|
'memory',
|
||||||
"cpu_capacity",
|
'cpu_capacity',
|
||||||
"mem_capacity",
|
'mem_capacity',
|
||||||
"enabled",
|
'enabled',
|
||||||
"managed_by_policy",
|
'managed_by_policy',
|
||||||
"node_type",
|
'node_type',
|
||||||
|
'node_state',
|
||||||
|
'ip_address',
|
||||||
|
'listener_port',
|
||||||
)
|
)
|
||||||
|
extra_kwargs = {
|
||||||
|
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||||
|
'node_state': {'initial': Instance.States.INSTALLED, 'default': Instance.States.INSTALLED},
|
||||||
|
}
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(InstanceSerializer, self).get_related(obj)
|
res = super(InstanceSerializer, self).get_related(obj)
|
||||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||||
|
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
|
||||||
|
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||||
|
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||||
if obj.node_type != 'hop':
|
if obj.node_type != 'hop':
|
||||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def get_summary_fields(self, obj):
|
||||||
|
summary = super().get_summary_fields(obj)
|
||||||
|
|
||||||
|
# use this handle to distinguish between a listView and a detailView
|
||||||
|
if self.is_detail_view:
|
||||||
|
summary['links'] = InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source').filter(source=obj), many=True).data
|
||||||
|
|
||||||
|
return summary
|
||||||
|
|
||||||
def get_consumed_capacity(self, obj):
|
def get_consumed_capacity(self, obj):
|
||||||
return obj.consumed_capacity
|
return obj.consumed_capacity
|
||||||
|
|
||||||
@@ -4828,10 +4953,54 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
else:
|
else:
|
||||||
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
|
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
|
||||||
|
|
||||||
def validate(self, attrs):
|
def get_health_check_pending(self, obj):
|
||||||
if self.instance.node_type == 'hop':
|
return obj.health_check_pending
|
||||||
raise serializers.ValidationError(_('Hop node instances may not be changed.'))
|
|
||||||
return attrs
|
def validate(self, data):
|
||||||
|
if self.instance:
|
||||||
|
if self.instance.node_type == Instance.Types.HOP:
|
||||||
|
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||||
|
else:
|
||||||
|
if not settings.IS_K8S:
|
||||||
|
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||||
|
return data
|
||||||
|
|
||||||
|
def validate_node_type(self, value):
|
||||||
|
if not self.instance:
|
||||||
|
if value not in (Instance.Types.EXECUTION,):
|
||||||
|
raise serializers.ValidationError("Can only create execution nodes.")
|
||||||
|
else:
|
||||||
|
if self.instance.node_type != value:
|
||||||
|
raise serializers.ValidationError("Cannot change node type.")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def validate_node_state(self, value):
|
||||||
|
if self.instance:
|
||||||
|
if value != self.instance.node_state:
|
||||||
|
if not settings.IS_K8S:
|
||||||
|
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||||
|
if value != Instance.States.DEPROVISIONING:
|
||||||
|
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||||
|
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||||
|
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||||
|
else:
|
||||||
|
if value and value != Instance.States.INSTALLED:
|
||||||
|
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def validate_hostname(self, value):
|
||||||
|
if self.instance and self.instance.hostname != value:
|
||||||
|
raise serializers.ValidationError("Cannot change hostname.")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def validate_listener_port(self, value):
|
||||||
|
if self.instance and self.instance.listener_port != value:
|
||||||
|
raise serializers.ValidationError("Cannot change listener port.")
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||||
|
|||||||
21
awx/api/templates/instance_install_bundle/group_vars/all.yml
Normal file
21
awx/api/templates/instance_install_bundle/group_vars/all.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
receptor_verify: true
|
||||||
|
receptor_tls: true
|
||||||
|
receptor_work_commands:
|
||||||
|
ansible-runner:
|
||||||
|
command: ansible-runner
|
||||||
|
params: worker
|
||||||
|
allowruntimeparams: true
|
||||||
|
verifysignature: true
|
||||||
|
custom_worksign_public_keyfile: receptor/work-public-key.pem
|
||||||
|
custom_tls_certfile: receptor/tls/receptor.crt
|
||||||
|
custom_tls_keyfile: receptor/tls/receptor.key
|
||||||
|
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
|
||||||
|
receptor_user: awx
|
||||||
|
receptor_group: awx
|
||||||
|
receptor_protocol: 'tcp'
|
||||||
|
receptor_listener: true
|
||||||
|
receptor_port: {{ instance.listener_port }}
|
||||||
|
receptor_dependencies:
|
||||||
|
- podman
|
||||||
|
- crun
|
||||||
|
- python39-pip
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
{% verbatim %}
|
||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Create the receptor user
|
||||||
|
user:
|
||||||
|
name: "{{ receptor_user }}"
|
||||||
|
shell: /bin/bash
|
||||||
|
- name: Enable Copr repo for Receptor
|
||||||
|
command: dnf copr enable ansible-awx/receptor -y
|
||||||
|
- import_role:
|
||||||
|
name: ansible.receptor.setup
|
||||||
|
- name: Install ansible-runner
|
||||||
|
pip:
|
||||||
|
name: ansible-runner
|
||||||
|
executable: pip3.9
|
||||||
|
{% endverbatim %}
|
||||||
7
awx/api/templates/instance_install_bundle/inventory.yml
Normal file
7
awx/api/templates/instance_install_bundle/inventory.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
all:
|
||||||
|
hosts:
|
||||||
|
remote-execution:
|
||||||
|
ansible_host: {{ instance.hostname }}
|
||||||
|
ansible_user: <username> # user provided
|
||||||
|
ansible_ssh_private_key_file: ~/.ssh/id_rsa
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
collections:
|
||||||
|
- name: ansible.receptor
|
||||||
|
source: https://github.com/ansible/receptor-collection/
|
||||||
|
type: git
|
||||||
|
version: 0.1.1
|
||||||
17
awx/api/urls/debug.py
Normal file
17
awx/api/urls/debug.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from django.urls import re_path
|
||||||
|
|
||||||
|
from awx.api.views.debug import (
|
||||||
|
DebugRootView,
|
||||||
|
TaskManagerDebugView,
|
||||||
|
DependencyManagerDebugView,
|
||||||
|
WorkflowManagerDebugView,
|
||||||
|
)
|
||||||
|
|
||||||
|
urls = [
|
||||||
|
re_path(r'^$', DebugRootView.as_view(), name='debug'),
|
||||||
|
re_path(r'^task_manager/$', TaskManagerDebugView.as_view(), name='task_manager'),
|
||||||
|
re_path(r'^dependency_manager/$', DependencyManagerDebugView.as_view(), name='dependency_manager'),
|
||||||
|
re_path(r'^workflow_manager/$', WorkflowManagerDebugView.as_view(), name='workflow_manager'),
|
||||||
|
]
|
||||||
|
|
||||||
|
__all__ = ['urls']
|
||||||
@@ -3,7 +3,15 @@
|
|||||||
|
|
||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from awx.api.views import InstanceList, InstanceDetail, InstanceUnifiedJobsList, InstanceInstanceGroupsList, InstanceHealthCheck
|
from awx.api.views import (
|
||||||
|
InstanceList,
|
||||||
|
InstanceDetail,
|
||||||
|
InstanceUnifiedJobsList,
|
||||||
|
InstanceInstanceGroupsList,
|
||||||
|
InstanceHealthCheck,
|
||||||
|
InstanceInstallBundle,
|
||||||
|
InstancePeersList,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
urls = [
|
urls = [
|
||||||
@@ -12,6 +20,8 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'),
|
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from awx.api.views import LabelList, LabelDetail
|
from awx.api.views.labels import LabelList, LabelDetail
|
||||||
|
|
||||||
|
|
||||||
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]
|
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList
|
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList
|
||||||
|
|
||||||
|
|
||||||
urls = [
|
urls = [
|
||||||
@@ -11,6 +11,8 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
|
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
|
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
|
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'),
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -2,9 +2,9 @@
|
|||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
|
||||||
from __future__ import absolute_import, unicode_literals
|
from __future__ import absolute_import, unicode_literals
|
||||||
from django.conf import settings
|
|
||||||
from django.urls import include, re_path
|
from django.urls import include, re_path
|
||||||
|
|
||||||
|
from awx import MODE
|
||||||
from awx.api.generics import LoggedLoginView, LoggedLogoutView
|
from awx.api.generics import LoggedLoginView, LoggedLogoutView
|
||||||
from awx.api.views import (
|
from awx.api.views import (
|
||||||
ApiRootView,
|
ApiRootView,
|
||||||
@@ -145,7 +145,12 @@ urlpatterns = [
|
|||||||
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
|
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
|
||||||
re_path(r'^o/', include(oauth2_root_urls)),
|
re_path(r'^o/', include(oauth2_root_urls)),
|
||||||
]
|
]
|
||||||
if settings.SETTINGS_MODULE == 'awx.settings.development':
|
if MODE == 'development':
|
||||||
|
# Only include these if we are in the development environment
|
||||||
from awx.api.swagger import SwaggerSchemaView
|
from awx.api.swagger import SwaggerSchemaView
|
||||||
|
|
||||||
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
||||||
|
|
||||||
|
from awx.api.urls.debug import urls as debug_urls
|
||||||
|
|
||||||
|
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from awx.api.views import (
|
|||||||
WorkflowJobNodeFailureNodesList,
|
WorkflowJobNodeFailureNodesList,
|
||||||
WorkflowJobNodeAlwaysNodesList,
|
WorkflowJobNodeAlwaysNodesList,
|
||||||
WorkflowJobNodeCredentialsList,
|
WorkflowJobNodeCredentialsList,
|
||||||
|
WorkflowJobNodeLabelsList,
|
||||||
|
WorkflowJobNodeInstanceGroupsList,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -20,6 +22,8 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
|
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
|
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
|
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'),
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -11,6 +11,8 @@ from awx.api.views import (
|
|||||||
WorkflowJobTemplateNodeAlwaysNodesList,
|
WorkflowJobTemplateNodeAlwaysNodesList,
|
||||||
WorkflowJobTemplateNodeCredentialsList,
|
WorkflowJobTemplateNodeCredentialsList,
|
||||||
WorkflowJobTemplateNodeCreateApproval,
|
WorkflowJobTemplateNodeCreateApproval,
|
||||||
|
WorkflowJobTemplateNodeLabelsList,
|
||||||
|
WorkflowJobTemplateNodeInstanceGroupsList,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -21,6 +23,8 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
|
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
|
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
|
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
|
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ from django.conf import settings
|
|||||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||||
from django.db.models import Q, Sum
|
from django.db.models import Q, Sum
|
||||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||||
|
from django.db.models.fields.related import ManyToManyField, ForeignKey
|
||||||
from django.shortcuts import get_object_or_404
|
from django.shortcuts import get_object_or_404
|
||||||
from django.utils.safestring import mark_safe
|
from django.utils.safestring import mark_safe
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
@@ -68,7 +69,6 @@ from awx.api.generics import (
|
|||||||
APIView,
|
APIView,
|
||||||
BaseUsersList,
|
BaseUsersList,
|
||||||
CopyAPIView,
|
CopyAPIView,
|
||||||
DeleteLastUnattachLabelMixin,
|
|
||||||
GenericAPIView,
|
GenericAPIView,
|
||||||
ListAPIView,
|
ListAPIView,
|
||||||
ListCreateAPIView,
|
ListCreateAPIView,
|
||||||
@@ -85,6 +85,7 @@ from awx.api.generics import (
|
|||||||
SubListCreateAttachDetachAPIView,
|
SubListCreateAttachDetachAPIView,
|
||||||
SubListDestroyAPIView,
|
SubListDestroyAPIView,
|
||||||
)
|
)
|
||||||
|
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main import models
|
from awx.main import models
|
||||||
from awx.main.utils import (
|
from awx.main.utils import (
|
||||||
@@ -93,7 +94,7 @@ from awx.main.utils import (
|
|||||||
get_object_or_400,
|
get_object_or_400,
|
||||||
getattrd,
|
getattrd,
|
||||||
get_pk_from_dict,
|
get_pk_from_dict,
|
||||||
schedule_task_manager,
|
ScheduleWorkflowManager,
|
||||||
ignore_inventory_computed_fields,
|
ignore_inventory_computed_fields,
|
||||||
)
|
)
|
||||||
from awx.main.utils.encryption import encrypt_value
|
from awx.main.utils.encryption import encrypt_value
|
||||||
@@ -121,6 +122,22 @@ from awx.api.views.mixin import (
|
|||||||
UnifiedJobDeletionMixin,
|
UnifiedJobDeletionMixin,
|
||||||
NoTruncateMixin,
|
NoTruncateMixin,
|
||||||
)
|
)
|
||||||
|
from awx.api.views.instance_install_bundle import InstanceInstallBundle # noqa
|
||||||
|
from awx.api.views.inventory import ( # noqa
|
||||||
|
InventoryList,
|
||||||
|
InventoryDetail,
|
||||||
|
InventoryUpdateEventsList,
|
||||||
|
InventoryList,
|
||||||
|
InventoryDetail,
|
||||||
|
InventoryActivityStreamList,
|
||||||
|
InventoryInstanceGroupsList,
|
||||||
|
InventoryAccessList,
|
||||||
|
InventoryObjectRolesList,
|
||||||
|
InventoryJobTemplateList,
|
||||||
|
InventoryLabelList,
|
||||||
|
InventoryCopy,
|
||||||
|
)
|
||||||
|
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
|
||||||
from awx.api.views.organization import ( # noqa
|
from awx.api.views.organization import ( # noqa
|
||||||
OrganizationList,
|
OrganizationList,
|
||||||
OrganizationDetail,
|
OrganizationDetail,
|
||||||
@@ -144,21 +161,6 @@ from awx.api.views.organization import ( # noqa
|
|||||||
OrganizationAccessList,
|
OrganizationAccessList,
|
||||||
OrganizationObjectRolesList,
|
OrganizationObjectRolesList,
|
||||||
)
|
)
|
||||||
from awx.api.views.inventory import ( # noqa
|
|
||||||
InventoryList,
|
|
||||||
InventoryDetail,
|
|
||||||
InventoryUpdateEventsList,
|
|
||||||
InventoryList,
|
|
||||||
InventoryDetail,
|
|
||||||
InventoryActivityStreamList,
|
|
||||||
InventoryInstanceGroupsList,
|
|
||||||
InventoryAccessList,
|
|
||||||
InventoryObjectRolesList,
|
|
||||||
InventoryJobTemplateList,
|
|
||||||
InventoryLabelList,
|
|
||||||
InventoryCopy,
|
|
||||||
)
|
|
||||||
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
|
|
||||||
from awx.api.views.root import ( # noqa
|
from awx.api.views.root import ( # noqa
|
||||||
ApiRootView,
|
ApiRootView,
|
||||||
ApiOAuthAuthorizationRootView,
|
ApiOAuthAuthorizationRootView,
|
||||||
@@ -173,7 +175,6 @@ from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, Gitlab
|
|||||||
from awx.api.pagination import UnifiedJobEventPagination
|
from awx.api.pagination import UnifiedJobEventPagination
|
||||||
from awx.main.utils import set_environ
|
from awx.main.utils import set_environ
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views')
|
logger = logging.getLogger('awx.api.views')
|
||||||
|
|
||||||
|
|
||||||
@@ -358,7 +359,7 @@ class DashboardJobsGraphView(APIView):
|
|||||||
return Response(dashboard_data)
|
return Response(dashboard_data)
|
||||||
|
|
||||||
|
|
||||||
class InstanceList(ListAPIView):
|
class InstanceList(ListCreateAPIView):
|
||||||
|
|
||||||
name = _("Instances")
|
name = _("Instances")
|
||||||
model = models.Instance
|
model = models.Instance
|
||||||
@@ -397,6 +398,17 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
|||||||
return qs
|
return qs
|
||||||
|
|
||||||
|
|
||||||
|
class InstancePeersList(SubListAPIView):
|
||||||
|
|
||||||
|
name = _("Instance Peers")
|
||||||
|
parent_model = models.Instance
|
||||||
|
model = models.Instance
|
||||||
|
serializer_class = serializers.InstanceSerializer
|
||||||
|
parent_access = 'read'
|
||||||
|
search_fields = {'hostname'}
|
||||||
|
relationship = 'peers'
|
||||||
|
|
||||||
|
|
||||||
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
||||||
|
|
||||||
name = _("Instance's Instance Groups")
|
name = _("Instance's Instance Groups")
|
||||||
@@ -439,40 +451,21 @@ class InstanceHealthCheck(GenericAPIView):
|
|||||||
|
|
||||||
def post(self, request, *args, **kwargs):
|
def post(self, request, *args, **kwargs):
|
||||||
obj = self.get_object()
|
obj = self.get_object()
|
||||||
|
if obj.health_check_pending:
|
||||||
|
return Response({'msg': f"Health check was already in progress for {obj.hostname}."}, status=status.HTTP_200_OK)
|
||||||
|
|
||||||
if obj.node_type == 'execution':
|
# Note: hop nodes are already excluded by the get_queryset method
|
||||||
|
obj.health_check_started = now()
|
||||||
|
obj.save(update_fields=['health_check_started'])
|
||||||
|
if obj.node_type == models.Instance.Types.EXECUTION:
|
||||||
from awx.main.tasks.system import execution_node_health_check
|
from awx.main.tasks.system import execution_node_health_check
|
||||||
|
|
||||||
runner_data = execution_node_health_check(obj.hostname)
|
execution_node_health_check.apply_async([obj.hostname])
|
||||||
obj.refresh_from_db()
|
|
||||||
data = self.get_serializer(data=request.data).to_representation(obj)
|
|
||||||
# Add in some extra unsaved fields
|
|
||||||
for extra_field in ('transmit_timing', 'run_timing'):
|
|
||||||
if extra_field in runner_data:
|
|
||||||
data[extra_field] = runner_data[extra_field]
|
|
||||||
else:
|
else:
|
||||||
from awx.main.tasks.system import cluster_node_health_check
|
from awx.main.tasks.system import cluster_node_health_check
|
||||||
|
|
||||||
if settings.CLUSTER_HOST_ID == obj.hostname:
|
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
|
||||||
cluster_node_health_check(obj.hostname)
|
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK)
|
||||||
else:
|
|
||||||
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
|
|
||||||
start_time = time.time()
|
|
||||||
prior_check_time = obj.last_health_check
|
|
||||||
while time.time() - start_time < 50.0:
|
|
||||||
obj.refresh_from_db(fields=['last_health_check'])
|
|
||||||
if obj.last_health_check != prior_check_time:
|
|
||||||
break
|
|
||||||
if time.time() - start_time < 1.0:
|
|
||||||
time.sleep(0.1)
|
|
||||||
else:
|
|
||||||
time.sleep(1.0)
|
|
||||||
else:
|
|
||||||
obj.mark_offline(errors=_('Health check initiated by user determined this instance to be unresponsive'))
|
|
||||||
obj.refresh_from_db()
|
|
||||||
data = self.get_serializer(data=request.data).to_representation(obj)
|
|
||||||
|
|
||||||
return Response(data, status=status.HTTP_200_OK)
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceGroupList(ListCreateAPIView):
|
class InstanceGroupList(ListCreateAPIView):
|
||||||
@@ -617,6 +610,19 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
|
|||||||
parent_model = models.Schedule
|
parent_model = models.Schedule
|
||||||
|
|
||||||
|
|
||||||
|
class ScheduleLabelsList(LabelSubListCreateAttachDetachView):
|
||||||
|
|
||||||
|
parent_model = models.Schedule
|
||||||
|
|
||||||
|
|
||||||
|
class ScheduleInstanceGroupList(SubListAttachDetachAPIView):
|
||||||
|
|
||||||
|
model = models.InstanceGroup
|
||||||
|
serializer_class = serializers.InstanceGroupSerializer
|
||||||
|
parent_model = models.Schedule
|
||||||
|
relationship = 'instance_groups'
|
||||||
|
|
||||||
|
|
||||||
class ScheduleUnifiedJobsList(SubListAPIView):
|
class ScheduleUnifiedJobsList(SubListAPIView):
|
||||||
|
|
||||||
model = models.UnifiedJob
|
model = models.UnifiedJob
|
||||||
@@ -2381,10 +2387,13 @@ class JobTemplateLaunch(RetrieveAPIView):
|
|||||||
for field, ask_field_name in modified_ask_mapping.items():
|
for field, ask_field_name in modified_ask_mapping.items():
|
||||||
if not getattr(obj, ask_field_name):
|
if not getattr(obj, ask_field_name):
|
||||||
data.pop(field, None)
|
data.pop(field, None)
|
||||||
elif field == 'inventory':
|
elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
|
||||||
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
||||||
elif field == 'credentials':
|
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
|
||||||
data[field] = [cred.id for cred in obj.credentials.all()]
|
if field == 'instance_groups':
|
||||||
|
data[field] = []
|
||||||
|
continue
|
||||||
|
data[field] = [item.id for item in getattr(obj, field).all()]
|
||||||
else:
|
else:
|
||||||
data[field] = getattr(obj, field)
|
data[field] = getattr(obj, field)
|
||||||
return data
|
return data
|
||||||
@@ -2397,9 +2406,8 @@ class JobTemplateLaunch(RetrieveAPIView):
|
|||||||
"""
|
"""
|
||||||
modern_data = data.copy()
|
modern_data = data.copy()
|
||||||
|
|
||||||
id_fd = '{}_id'.format('inventory')
|
if 'inventory' not in modern_data and 'inventory_id' in modern_data:
|
||||||
if 'inventory' not in modern_data and id_fd in modern_data:
|
modern_data['inventory'] = modern_data['inventory_id']
|
||||||
modern_data['inventory'] = modern_data[id_fd]
|
|
||||||
|
|
||||||
# credential passwords were historically provided as top-level attributes
|
# credential passwords were historically provided as top-level attributes
|
||||||
if 'credential_passwords' not in modern_data:
|
if 'credential_passwords' not in modern_data:
|
||||||
@@ -2719,28 +2727,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
|||||||
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||||
|
|
||||||
|
|
||||||
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
|
class JobTemplateLabelList(LabelSubListCreateAttachDetachView):
|
||||||
|
|
||||||
model = models.Label
|
|
||||||
serializer_class = serializers.LabelSerializer
|
|
||||||
parent_model = models.JobTemplate
|
parent_model = models.JobTemplate
|
||||||
relationship = 'labels'
|
|
||||||
|
|
||||||
def post(self, request, *args, **kwargs):
|
|
||||||
# If a label already exists in the database, attach it instead of erroring out
|
|
||||||
# that it already exists
|
|
||||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
|
||||||
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
|
||||||
if existing.exists():
|
|
||||||
existing = existing[0]
|
|
||||||
request.data['id'] = existing.id
|
|
||||||
del request.data['name']
|
|
||||||
del request.data['organization']
|
|
||||||
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
|
|
||||||
return Response(
|
|
||||||
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
|
|
||||||
)
|
|
||||||
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class JobTemplateCallback(GenericAPIView):
|
class JobTemplateCallback(GenericAPIView):
|
||||||
@@ -2966,6 +2955,22 @@ class WorkflowJobNodeCredentialsList(SubListAPIView):
|
|||||||
relationship = 'credentials'
|
relationship = 'credentials'
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobNodeLabelsList(SubListAPIView):
|
||||||
|
|
||||||
|
model = models.Label
|
||||||
|
serializer_class = serializers.LabelSerializer
|
||||||
|
parent_model = models.WorkflowJobNode
|
||||||
|
relationship = 'labels'
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView):
|
||||||
|
|
||||||
|
model = models.InstanceGroup
|
||||||
|
serializer_class = serializers.InstanceGroupSerializer
|
||||||
|
parent_model = models.WorkflowJobNode
|
||||||
|
relationship = 'instance_groups'
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJobTemplateNodeList(ListCreateAPIView):
|
class WorkflowJobTemplateNodeList(ListCreateAPIView):
|
||||||
|
|
||||||
model = models.WorkflowJobTemplateNode
|
model = models.WorkflowJobTemplateNode
|
||||||
@@ -2984,6 +2989,19 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
|
|||||||
parent_model = models.WorkflowJobTemplateNode
|
parent_model = models.WorkflowJobTemplateNode
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView):
|
||||||
|
|
||||||
|
parent_model = models.WorkflowJobTemplateNode
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView):
|
||||||
|
|
||||||
|
model = models.InstanceGroup
|
||||||
|
serializer_class = serializers.InstanceGroupSerializer
|
||||||
|
parent_model = models.WorkflowJobTemplateNode
|
||||||
|
relationship = 'instance_groups'
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||||
|
|
||||||
model = models.WorkflowJobTemplateNode
|
model = models.WorkflowJobTemplateNode
|
||||||
@@ -3196,13 +3214,17 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView):
|
|||||||
data['extra_vars'] = extra_vars
|
data['extra_vars'] = extra_vars
|
||||||
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
|
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
|
||||||
modified_ask_mapping.pop('extra_vars')
|
modified_ask_mapping.pop('extra_vars')
|
||||||
for field_name, ask_field_name in obj.get_ask_mapping().items():
|
|
||||||
|
for field, ask_field_name in modified_ask_mapping.items():
|
||||||
if not getattr(obj, ask_field_name):
|
if not getattr(obj, ask_field_name):
|
||||||
data.pop(field_name, None)
|
data.pop(field, None)
|
||||||
elif field_name == 'inventory':
|
elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
|
||||||
data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None)
|
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
||||||
|
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
|
||||||
|
data[field] = [item.id for item in getattr(obj, field).all()]
|
||||||
else:
|
else:
|
||||||
data[field_name] = getattr(obj, field_name)
|
data[field] = getattr(obj, field)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def post(self, request, *args, **kwargs):
|
def post(self, request, *args, **kwargs):
|
||||||
@@ -3391,7 +3413,7 @@ class WorkflowJobCancel(RetrieveAPIView):
|
|||||||
obj = self.get_object()
|
obj = self.get_object()
|
||||||
if obj.can_cancel:
|
if obj.can_cancel:
|
||||||
obj.cancel()
|
obj.cancel()
|
||||||
schedule_task_manager()
|
ScheduleWorkflowManager().schedule()
|
||||||
return Response(status=status.HTTP_202_ACCEPTED)
|
return Response(status=status.HTTP_202_ACCEPTED)
|
||||||
else:
|
else:
|
||||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||||
@@ -3689,15 +3711,21 @@ class JobCreateSchedule(RetrieveAPIView):
|
|||||||
extra_data=config.extra_data,
|
extra_data=config.extra_data,
|
||||||
survey_passwords=config.survey_passwords,
|
survey_passwords=config.survey_passwords,
|
||||||
inventory=config.inventory,
|
inventory=config.inventory,
|
||||||
|
execution_environment=config.execution_environment,
|
||||||
char_prompts=config.char_prompts,
|
char_prompts=config.char_prompts,
|
||||||
credentials=set(config.credentials.all()),
|
credentials=set(config.credentials.all()),
|
||||||
|
labels=set(config.labels.all()),
|
||||||
|
instance_groups=list(config.instance_groups.all()),
|
||||||
)
|
)
|
||||||
if not request.user.can_access(models.Schedule, 'add', schedule_data):
|
if not request.user.can_access(models.Schedule, 'add', schedule_data):
|
||||||
raise PermissionDenied()
|
raise PermissionDenied()
|
||||||
|
|
||||||
creds_list = schedule_data.pop('credentials')
|
related_fields = ('credentials', 'labels', 'instance_groups')
|
||||||
|
related = [schedule_data.pop(relationship) for relationship in related_fields]
|
||||||
schedule = models.Schedule.objects.create(**schedule_data)
|
schedule = models.Schedule.objects.create(**schedule_data)
|
||||||
schedule.credentials.add(*creds_list)
|
for relationship, items in zip(related_fields, related):
|
||||||
|
for item in items:
|
||||||
|
getattr(schedule, relationship).add(item)
|
||||||
|
|
||||||
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
|
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
|
||||||
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
|
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
|
||||||
@@ -3839,7 +3867,7 @@ class JobJobEventsList(BaseJobEventsList):
|
|||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
job = self.get_parent_object()
|
job = self.get_parent_object()
|
||||||
self.check_parent_access(job)
|
self.check_parent_access(job)
|
||||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
return job.get_event_queryset().prefetch_related('job__job_template', 'host').order_by('start_line')
|
||||||
|
|
||||||
|
|
||||||
class JobJobEventsChildrenSummary(APIView):
|
class JobJobEventsChildrenSummary(APIView):
|
||||||
@@ -4428,18 +4456,6 @@ class NotificationDetail(RetrieveAPIView):
|
|||||||
serializer_class = serializers.NotificationSerializer
|
serializer_class = serializers.NotificationSerializer
|
||||||
|
|
||||||
|
|
||||||
class LabelList(ListCreateAPIView):
|
|
||||||
|
|
||||||
model = models.Label
|
|
||||||
serializer_class = serializers.LabelSerializer
|
|
||||||
|
|
||||||
|
|
||||||
class LabelDetail(RetrieveUpdateAPIView):
|
|
||||||
|
|
||||||
model = models.Label
|
|
||||||
serializer_class = serializers.LabelSerializer
|
|
||||||
|
|
||||||
|
|
||||||
class ActivityStreamList(SimpleListAPIView):
|
class ActivityStreamList(SimpleListAPIView):
|
||||||
|
|
||||||
model = models.ActivityStream
|
model = models.ActivityStream
|
||||||
|
|||||||
68
awx/api/views/debug.py
Normal file
68
awx/api/views/debug.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
|
from rest_framework.permissions import AllowAny
|
||||||
|
from rest_framework.response import Response
|
||||||
|
from awx.api.generics import APIView
|
||||||
|
|
||||||
|
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||||
|
|
||||||
|
|
||||||
|
class TaskManagerDebugView(APIView):
|
||||||
|
_ignore_model_permissions = True
|
||||||
|
exclude_from_schema = True
|
||||||
|
permission_classes = [AllowAny]
|
||||||
|
prefix = 'Task'
|
||||||
|
|
||||||
|
def get(self, request):
|
||||||
|
TaskManager().schedule()
|
||||||
|
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||||
|
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||||
|
else:
|
||||||
|
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||||
|
return Response(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class DependencyManagerDebugView(APIView):
|
||||||
|
_ignore_model_permissions = True
|
||||||
|
exclude_from_schema = True
|
||||||
|
permission_classes = [AllowAny]
|
||||||
|
prefix = 'Dependency'
|
||||||
|
|
||||||
|
def get(self, request):
|
||||||
|
DependencyManager().schedule()
|
||||||
|
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||||
|
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||||
|
else:
|
||||||
|
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||||
|
return Response(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowManagerDebugView(APIView):
|
||||||
|
_ignore_model_permissions = True
|
||||||
|
exclude_from_schema = True
|
||||||
|
permission_classes = [AllowAny]
|
||||||
|
prefix = 'Workflow'
|
||||||
|
|
||||||
|
def get(self, request):
|
||||||
|
WorkflowManager().schedule()
|
||||||
|
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||||
|
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||||
|
else:
|
||||||
|
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||||
|
return Response(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class DebugRootView(APIView):
|
||||||
|
_ignore_model_permissions = True
|
||||||
|
exclude_from_schema = True
|
||||||
|
permission_classes = [AllowAny]
|
||||||
|
|
||||||
|
def get(self, request, format=None):
|
||||||
|
'''List of available debug urls'''
|
||||||
|
data = OrderedDict()
|
||||||
|
data['task_manager'] = '/api/debug/task_manager/'
|
||||||
|
data['dependency_manager'] = '/api/debug/dependency_manager/'
|
||||||
|
data['workflow_manager'] = '/api/debug/workflow_manager/'
|
||||||
|
return Response(data)
|
||||||
199
awx/api/views/instance_install_bundle.py
Normal file
199
awx/api/views/instance_install_bundle.py
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
# Copyright (c) 2018 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import io
|
||||||
|
import ipaddress
|
||||||
|
import os
|
||||||
|
import tarfile
|
||||||
|
|
||||||
|
import asn1
|
||||||
|
from awx.api import serializers
|
||||||
|
from awx.api.generics import GenericAPIView, Response
|
||||||
|
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||||
|
from awx.main import models
|
||||||
|
from cryptography import x509
|
||||||
|
from cryptography.hazmat.primitives import hashes, serialization
|
||||||
|
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||||
|
from cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName
|
||||||
|
from cryptography.x509.oid import NameOID
|
||||||
|
from django.http import HttpResponse
|
||||||
|
from django.template.loader import render_to_string
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
from rest_framework import status
|
||||||
|
|
||||||
|
# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
|
||||||
|
RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||||
|
|
||||||
|
# generate install bundle for the instance
|
||||||
|
# install bundle directory structure
|
||||||
|
# ├── install_receptor.yml (playbook)
|
||||||
|
# ├── inventory.yml
|
||||||
|
# ├── group_vars
|
||||||
|
# │ └── all.yml
|
||||||
|
# ├── receptor
|
||||||
|
# │ ├── tls
|
||||||
|
# │ │ ├── ca
|
||||||
|
# │ │ │ └── receptor-ca.crt
|
||||||
|
# │ │ ├── receptor.crt
|
||||||
|
# │ │ └── receptor.key
|
||||||
|
# │ └── work-public-key.pem
|
||||||
|
# └── requirements.yml
|
||||||
|
class InstanceInstallBundle(GenericAPIView):
|
||||||
|
|
||||||
|
name = _('Install Bundle')
|
||||||
|
model = models.Instance
|
||||||
|
serializer_class = serializers.InstanceSerializer
|
||||||
|
permission_classes = (IsSystemAdminOrAuditor,)
|
||||||
|
|
||||||
|
def get(self, request, *args, **kwargs):
|
||||||
|
instance_obj = self.get_object()
|
||||||
|
|
||||||
|
if instance_obj.node_type not in ('execution',):
|
||||||
|
return Response(
|
||||||
|
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||||
|
status=status.HTTP_400_BAD_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
with io.BytesIO() as f:
|
||||||
|
with tarfile.open(fileobj=f, mode='w:gz') as tar:
|
||||||
|
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
|
||||||
|
tar.add(
|
||||||
|
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
|
||||||
|
)
|
||||||
|
|
||||||
|
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
|
||||||
|
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
|
||||||
|
|
||||||
|
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||||
|
key, cert = generate_receptor_tls(instance_obj)
|
||||||
|
|
||||||
|
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||||
|
key_tarinfo.size = len(key)
|
||||||
|
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||||
|
|
||||||
|
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||||
|
cert_tarinfo.size = len(cert)
|
||||||
|
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||||
|
|
||||||
|
# generate and write install_receptor.yml to the tar file
|
||||||
|
playbook = generate_playbook().encode('utf-8')
|
||||||
|
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||||
|
playbook_tarinfo.size = len(playbook)
|
||||||
|
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||||
|
|
||||||
|
# generate and write inventory.yml to the tar file
|
||||||
|
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||||
|
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||||
|
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||||
|
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||||
|
|
||||||
|
# generate and write group_vars/all.yml to the tar file
|
||||||
|
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||||
|
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||||
|
group_vars_tarinfo.size = len(group_vars)
|
||||||
|
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||||
|
|
||||||
|
# generate and write requirements.yml to the tar file
|
||||||
|
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||||
|
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||||
|
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||||
|
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||||
|
|
||||||
|
# respond with the tarfile
|
||||||
|
f.seek(0)
|
||||||
|
response = HttpResponse(f.read(), status=status.HTTP_200_OK)
|
||||||
|
response['Content-Disposition'] = f"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz"
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def generate_playbook():
|
||||||
|
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_requirements_yml():
|
||||||
|
return render_to_string("instance_install_bundle/requirements.yml")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_inventory_yml(instance_obj):
|
||||||
|
return render_to_string("instance_install_bundle/inventory.yml", context=dict(instance=instance_obj))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_group_vars_all_yml(instance_obj):
|
||||||
|
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_receptor_tls(instance_obj):
|
||||||
|
# generate private key for the receptor
|
||||||
|
key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
|
||||||
|
|
||||||
|
# encode receptor hostname to asn1
|
||||||
|
hostname = instance_obj.hostname
|
||||||
|
encoder = asn1.Encoder()
|
||||||
|
encoder.start()
|
||||||
|
encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)
|
||||||
|
hostname_asn1 = encoder.output()
|
||||||
|
|
||||||
|
san_params = [
|
||||||
|
DNSName(hostname),
|
||||||
|
OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))
|
||||||
|
except ipaddress.AddressValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# generate certificate for the receptor
|
||||||
|
csr = (
|
||||||
|
x509.CertificateSigningRequestBuilder()
|
||||||
|
.subject_name(
|
||||||
|
x509.Name(
|
||||||
|
[
|
||||||
|
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.add_extension(
|
||||||
|
x509.SubjectAlternativeName(san_params),
|
||||||
|
critical=False,
|
||||||
|
)
|
||||||
|
.sign(key, hashes.SHA256())
|
||||||
|
)
|
||||||
|
|
||||||
|
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
|
||||||
|
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
|
||||||
|
ca_key = serialization.load_pem_private_key(
|
||||||
|
f.read(),
|
||||||
|
password=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
|
||||||
|
ca_cert = x509.load_pem_x509_certificate(f.read())
|
||||||
|
|
||||||
|
cert = (
|
||||||
|
x509.CertificateBuilder()
|
||||||
|
.subject_name(csr.subject)
|
||||||
|
.issuer_name(ca_cert.issuer)
|
||||||
|
.public_key(csr.public_key())
|
||||||
|
.serial_number(x509.random_serial_number())
|
||||||
|
.not_valid_before(datetime.datetime.utcnow())
|
||||||
|
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10))
|
||||||
|
.add_extension(
|
||||||
|
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
|
||||||
|
critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
|
||||||
|
)
|
||||||
|
.sign(ca_key, hashes.SHA256())
|
||||||
|
)
|
||||||
|
|
||||||
|
key = key.private_bytes(
|
||||||
|
encoding=serialization.Encoding.PEM,
|
||||||
|
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||||
|
encryption_algorithm=serialization.NoEncryption(),
|
||||||
|
)
|
||||||
|
|
||||||
|
cert = cert.public_bytes(
|
||||||
|
encoding=serialization.Encoding.PEM,
|
||||||
|
)
|
||||||
|
|
||||||
|
return key, cert
|
||||||
@@ -18,8 +18,6 @@ from rest_framework import status
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||||
|
|
||||||
from awx.main.models.label import Label
|
|
||||||
|
|
||||||
from awx.api.generics import (
|
from awx.api.generics import (
|
||||||
ListCreateAPIView,
|
ListCreateAPIView,
|
||||||
RetrieveUpdateDestroyAPIView,
|
RetrieveUpdateDestroyAPIView,
|
||||||
@@ -27,9 +25,8 @@ from awx.api.generics import (
|
|||||||
SubListAttachDetachAPIView,
|
SubListAttachDetachAPIView,
|
||||||
ResourceAccessList,
|
ResourceAccessList,
|
||||||
CopyAPIView,
|
CopyAPIView,
|
||||||
DeleteLastUnattachLabelMixin,
|
|
||||||
SubListCreateAttachDetachAPIView,
|
|
||||||
)
|
)
|
||||||
|
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||||
|
|
||||||
|
|
||||||
from awx.api.serializers import (
|
from awx.api.serializers import (
|
||||||
@@ -39,7 +36,6 @@ from awx.api.serializers import (
|
|||||||
InstanceGroupSerializer,
|
InstanceGroupSerializer,
|
||||||
InventoryUpdateEventSerializer,
|
InventoryUpdateEventSerializer,
|
||||||
JobTemplateSerializer,
|
JobTemplateSerializer,
|
||||||
LabelSerializer,
|
|
||||||
)
|
)
|
||||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
|
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
|
||||||
|
|
||||||
@@ -157,28 +153,9 @@ class InventoryJobTemplateList(SubListAPIView):
|
|||||||
return qs.filter(inventory=parent)
|
return qs.filter(inventory=parent)
|
||||||
|
|
||||||
|
|
||||||
class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView):
|
class InventoryLabelList(LabelSubListCreateAttachDetachView):
|
||||||
|
|
||||||
model = Label
|
|
||||||
serializer_class = LabelSerializer
|
|
||||||
parent_model = Inventory
|
parent_model = Inventory
|
||||||
relationship = 'labels'
|
|
||||||
|
|
||||||
def post(self, request, *args, **kwargs):
|
|
||||||
# If a label already exists in the database, attach it instead of erroring out
|
|
||||||
# that it already exists
|
|
||||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
|
||||||
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
|
||||||
if existing.exists():
|
|
||||||
existing = existing[0]
|
|
||||||
request.data['id'] = existing.id
|
|
||||||
del request.data['name']
|
|
||||||
del request.data['organization']
|
|
||||||
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
|
|
||||||
return Response(
|
|
||||||
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
|
|
||||||
)
|
|
||||||
return super(InventoryLabelList, self).post(request, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryCopy(CopyAPIView):
|
class InventoryCopy(CopyAPIView):
|
||||||
|
|||||||
71
awx/api/views/labels.py
Normal file
71
awx/api/views/labels.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# AWX
|
||||||
|
from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView
|
||||||
|
from awx.main.models import Label
|
||||||
|
from awx.api.serializers import LabelSerializer
|
||||||
|
|
||||||
|
# Django
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
|
# Django REST Framework
|
||||||
|
from rest_framework.response import Response
|
||||||
|
from rest_framework.status import HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
|
class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
|
||||||
|
"""
|
||||||
|
For related labels lists like /api/v2/inventories/N/labels/
|
||||||
|
|
||||||
|
We want want the last instance to be deleted from the database
|
||||||
|
when the last disassociate happens.
|
||||||
|
|
||||||
|
Subclasses need to define parent_model
|
||||||
|
"""
|
||||||
|
|
||||||
|
model = Label
|
||||||
|
serializer_class = LabelSerializer
|
||||||
|
relationship = 'labels'
|
||||||
|
|
||||||
|
def unattach(self, request, *args, **kwargs):
|
||||||
|
(sub_id, res) = super().unattach_validate(request)
|
||||||
|
if res:
|
||||||
|
return res
|
||||||
|
|
||||||
|
res = super().unattach_by_id(request, sub_id)
|
||||||
|
|
||||||
|
obj = self.model.objects.get(id=sub_id)
|
||||||
|
|
||||||
|
if obj.is_detached():
|
||||||
|
obj.delete()
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def post(self, request, *args, **kwargs):
|
||||||
|
# If a label already exists in the database, attach it instead of erroring out
|
||||||
|
# that it already exists
|
||||||
|
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
||||||
|
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
||||||
|
if existing.exists():
|
||||||
|
existing = existing[0]
|
||||||
|
request.data['id'] = existing.id
|
||||||
|
del request.data['name']
|
||||||
|
del request.data['organization']
|
||||||
|
|
||||||
|
# Give a 400 error if we have attached too many labels to this object
|
||||||
|
label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name
|
||||||
|
if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100:
|
||||||
|
return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
|
return super().post(request, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class LabelDetail(RetrieveUpdateAPIView):
|
||||||
|
|
||||||
|
model = Label
|
||||||
|
serializer_class = LabelSerializer
|
||||||
|
|
||||||
|
|
||||||
|
class LabelList(ListCreateAPIView):
|
||||||
|
|
||||||
|
name = _("Labels")
|
||||||
|
model = Label
|
||||||
|
serializer_class = LabelSerializer
|
||||||
@@ -80,7 +80,7 @@ def _ctit_db_wrapper(trans_safe=False):
|
|||||||
yield
|
yield
|
||||||
except DBError as exc:
|
except DBError as exc:
|
||||||
if trans_safe:
|
if trans_safe:
|
||||||
level = logger.exception
|
level = logger.warning
|
||||||
if isinstance(exc, ProgrammingError):
|
if isinstance(exc, ProgrammingError):
|
||||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||||
# this generally means we can't fetch Tower configuration
|
# this generally means we can't fetch Tower configuration
|
||||||
@@ -89,7 +89,7 @@ def _ctit_db_wrapper(trans_safe=False):
|
|||||||
# has come up *before* the database has finished migrating, and
|
# has come up *before* the database has finished migrating, and
|
||||||
# especially that the conf.settings table doesn't exist yet
|
# especially that the conf.settings table doesn't exist yet
|
||||||
level = logger.debug
|
level = logger.debug
|
||||||
level('Database settings are not available, using defaults.')
|
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
||||||
else:
|
else:
|
||||||
logger.exception('Error modifying something related to database settings.')
|
logger.exception('Error modifying something related to database settings.')
|
||||||
finally:
|
finally:
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from django.conf import settings
|
|||||||
from django.db.models import Q, Prefetch
|
from django.db.models import Q, Prefetch
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.core.exceptions import ObjectDoesNotExist
|
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||||
|
|
||||||
# Django REST Framework
|
# Django REST Framework
|
||||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||||
@@ -281,13 +281,23 @@ class BaseAccess(object):
|
|||||||
"""
|
"""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def assure_relationship_exists(self, obj, relationship):
|
||||||
|
if '.' in relationship:
|
||||||
|
return # not attempting validation for complex relationships now
|
||||||
|
try:
|
||||||
|
obj._meta.get_field(relationship)
|
||||||
|
except FieldDoesNotExist:
|
||||||
|
raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')
|
||||||
|
|
||||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||||
|
self.assure_relationship_exists(obj, relationship)
|
||||||
if skip_sub_obj_read_check:
|
if skip_sub_obj_read_check:
|
||||||
return self.can_change(obj, None)
|
return self.can_change(obj, None)
|
||||||
else:
|
else:
|
||||||
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
|
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
|
||||||
|
|
||||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||||
|
self.assure_relationship_exists(obj, relationship)
|
||||||
return self.can_change(obj, data)
|
return self.can_change(obj, data)
|
||||||
|
|
||||||
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
|
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
|
||||||
@@ -328,6 +338,8 @@ class BaseAccess(object):
|
|||||||
role = getattr(resource, role_field, None)
|
role = getattr(resource, role_field, None)
|
||||||
if role is None:
|
if role is None:
|
||||||
# Handle special case where resource does not have direct roles
|
# Handle special case where resource does not have direct roles
|
||||||
|
if role_field == 'read_role':
|
||||||
|
return self.user.can_access(type(resource), 'read', resource)
|
||||||
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
|
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
|
||||||
return self.user.can_access(type(resource), access_method_type, resource, None)
|
return self.user.can_access(type(resource), access_method_type, resource, None)
|
||||||
return self.user in role
|
return self.user in role
|
||||||
@@ -499,6 +511,21 @@ class BaseAccess(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class UnifiedCredentialsMixin(BaseAccess):
|
||||||
|
"""
|
||||||
|
The credentials many-to-many is a standard relationship for JT, jobs, and others
|
||||||
|
Permission to attach is always use permission, and permission to unattach is admin to the parent object
|
||||||
|
"""
|
||||||
|
|
||||||
|
@check_superuser
|
||||||
|
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||||
|
if relationship == 'credentials':
|
||||||
|
if not isinstance(sub_obj, Credential):
|
||||||
|
raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}')
|
||||||
|
return self.can_change(obj, None) and (self.user in sub_obj.use_role)
|
||||||
|
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||||
|
|
||||||
|
|
||||||
class NotificationAttachMixin(BaseAccess):
|
class NotificationAttachMixin(BaseAccess):
|
||||||
"""For models that can have notifications attached
|
"""For models that can have notifications attached
|
||||||
|
|
||||||
@@ -552,7 +579,8 @@ class InstanceAccess(BaseAccess):
|
|||||||
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
|
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
|
||||||
|
|
||||||
def can_add(self, data):
|
def can_add(self, data):
|
||||||
return False
|
|
||||||
|
return self.user.is_superuser
|
||||||
|
|
||||||
def can_change(self, obj, data):
|
def can_change(self, obj, data):
|
||||||
return False
|
return False
|
||||||
@@ -1031,7 +1059,7 @@ class GroupAccess(BaseAccess):
|
|||||||
return bool(obj and self.user in obj.inventory.admin_role)
|
return bool(obj and self.user in obj.inventory.admin_role)
|
||||||
|
|
||||||
|
|
||||||
class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can see inventory sources whenever I can see their inventory.
|
I can see inventory sources whenever I can see their inventory.
|
||||||
I can change inventory sources whenever I can change their inventory.
|
I can change inventory sources whenever I can change their inventory.
|
||||||
@@ -1075,18 +1103,6 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
return self.user in obj.inventory.update_role
|
return self.user in obj.inventory.update_role
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@check_superuser
|
|
||||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
|
||||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
|
||||||
return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role
|
|
||||||
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
|
||||||
|
|
||||||
@check_superuser
|
|
||||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
|
||||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
|
||||||
return obj and obj.inventory and self.user in obj.inventory.admin_role
|
|
||||||
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class InventoryUpdateAccess(BaseAccess):
|
class InventoryUpdateAccess(BaseAccess):
|
||||||
"""
|
"""
|
||||||
@@ -1485,7 +1501,7 @@ class ProjectUpdateAccess(BaseAccess):
|
|||||||
return obj and self.user in obj.project.admin_role
|
return obj and self.user in obj.project.admin_role
|
||||||
|
|
||||||
|
|
||||||
class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can see job templates when:
|
I can see job templates when:
|
||||||
- I have read role for the job template.
|
- I have read role for the job template.
|
||||||
@@ -1549,8 +1565,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
if self.user not in inventory.use_role:
|
if self.user not in inventory.use_role:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
ee = get_value(ExecutionEnvironment, 'execution_environment')
|
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||||
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
project = get_value(Project, 'project')
|
project = get_value(Project, 'project')
|
||||||
@@ -1600,10 +1615,8 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
if self.changes_are_non_sensitive(obj, data):
|
if self.changes_are_non_sensitive(obj, data):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if data.get('execution_environment'):
|
if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'):
|
||||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
return False
|
||||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
|
||||||
return False
|
|
||||||
|
|
||||||
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
||||||
is_mandatory = True
|
is_mandatory = True
|
||||||
@@ -1667,17 +1680,13 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
if not obj.organization:
|
if not obj.organization:
|
||||||
return False
|
return False
|
||||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
|
||||||
return self.user in obj.admin_role and self.user in sub_obj.use_role
|
|
||||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||||
if relationship == "instance_groups":
|
if relationship == "instance_groups":
|
||||||
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||||
return self.user in obj.admin_role
|
|
||||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class JobAccess(BaseAccess):
|
class JobAccess(BaseAccess):
|
||||||
@@ -1824,7 +1833,7 @@ class SystemJobAccess(BaseAccess):
|
|||||||
return False # no relaunching of system jobs
|
return False # no relaunching of system jobs
|
||||||
|
|
||||||
|
|
||||||
class JobLaunchConfigAccess(BaseAccess):
|
class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||||
"""
|
"""
|
||||||
Launch configs must have permissions checked for
|
Launch configs must have permissions checked for
|
||||||
- relaunching
|
- relaunching
|
||||||
@@ -1832,63 +1841,69 @@ class JobLaunchConfigAccess(BaseAccess):
|
|||||||
|
|
||||||
In order to create a new object with a copy of this launch config, I need:
|
In order to create a new object with a copy of this launch config, I need:
|
||||||
- use access to related inventory (if present)
|
- use access to related inventory (if present)
|
||||||
|
- read access to Execution Environment (if present), unless the specified ee is already in the template
|
||||||
- use role to many-related credentials (if any present)
|
- use role to many-related credentials (if any present)
|
||||||
|
- read access to many-related labels (if any present), unless the specified label is already in the template
|
||||||
|
- read access to many-related instance groups (if any present), unless the specified instance group is already in the template
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model = JobLaunchConfig
|
model = JobLaunchConfig
|
||||||
select_related = 'job'
|
select_related = 'job'
|
||||||
prefetch_related = ('credentials', 'inventory')
|
prefetch_related = ('credentials', 'inventory')
|
||||||
|
|
||||||
def _unusable_creds_exist(self, qs):
|
M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup}
|
||||||
return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists()
|
|
||||||
|
|
||||||
def has_credentials_access(self, obj):
|
def _related_filtered_queryset(self, cls):
|
||||||
# user has access if no related credentials exist that the user lacks use role for
|
if cls is Label:
|
||||||
return not self._unusable_creds_exist(obj.credentials)
|
return LabelAccess(self.user).filtered_queryset()
|
||||||
|
elif cls is InstanceGroup:
|
||||||
|
return InstanceGroupAccess(self.user).filtered_queryset()
|
||||||
|
else:
|
||||||
|
return cls._accessible_pk_qs(cls, self.user, 'use_role')
|
||||||
|
|
||||||
|
def has_obj_m2m_access(self, obj):
|
||||||
|
for relationship, cls in self.M2M_CHECKS.items():
|
||||||
|
if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
def can_add(self, data, template=None):
|
def can_add(self, data, template=None):
|
||||||
# This is a special case, we don't check related many-to-many elsewhere
|
# This is a special case, we don't check related many-to-many elsewhere
|
||||||
# launch RBAC checks use this
|
# launch RBAC checks use this
|
||||||
if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
|
if 'reference_obj' in data:
|
||||||
if 'reference_obj' in data:
|
if not self.has_obj_m2m_access(data['reference_obj']):
|
||||||
prompted_cred_qs = data['reference_obj'].credentials.all()
|
|
||||||
else:
|
|
||||||
# If given model objects, only use the primary key from them
|
|
||||||
cred_pks = [cred.pk for cred in data['credentials']]
|
|
||||||
if template:
|
|
||||||
for cred in template.credentials.all():
|
|
||||||
if cred.pk in cred_pks:
|
|
||||||
cred_pks.remove(cred.pk)
|
|
||||||
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
|
|
||||||
if self._unusable_creds_exist(prompted_cred_qs):
|
|
||||||
return False
|
return False
|
||||||
return self.check_related('inventory', Inventory, data, role_field='use_role')
|
else:
|
||||||
|
for relationship, cls in self.M2M_CHECKS.items():
|
||||||
|
if relationship in data and data[relationship]:
|
||||||
|
# If given model objects, only use the primary key from them
|
||||||
|
sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]]
|
||||||
|
if template:
|
||||||
|
for sub_obj in getattr(template, relationship).all():
|
||||||
|
if sub_obj.pk in sub_obj_pks:
|
||||||
|
sub_obj_pks.remove(sub_obj.pk)
|
||||||
|
if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
|
||||||
|
return False
|
||||||
|
return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related(
|
||||||
|
'execution_environment', ExecutionEnvironment, data, role_field='read_role'
|
||||||
|
)
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
def can_use(self, obj):
|
def can_use(self, obj):
|
||||||
return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj)
|
return (
|
||||||
|
self.has_obj_m2m_access(obj)
|
||||||
|
and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
|
||||||
|
and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role')
|
||||||
|
)
|
||||||
|
|
||||||
def can_change(self, obj, data):
|
def can_change(self, obj, data):
|
||||||
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
|
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related(
|
||||||
|
'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'
|
||||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
)
|
||||||
if isinstance(sub_obj, Credential) and relationship == 'credentials':
|
|
||||||
return self.user in sub_obj.use_role
|
|
||||||
else:
|
|
||||||
raise NotImplementedError('Only credentials can be attached to launch configurations.')
|
|
||||||
|
|
||||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
|
||||||
if isinstance(sub_obj, Credential) and relationship == 'credentials':
|
|
||||||
if skip_sub_obj_read_check:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return self.user in sub_obj.read_role
|
|
||||||
else:
|
|
||||||
raise NotImplementedError('Only credentials can be attached to launch configurations.')
|
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJobTemplateNodeAccess(BaseAccess):
|
class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can see/use a WorkflowJobTemplateNode if I have read permission
|
I can see/use a WorkflowJobTemplateNode if I have read permission
|
||||||
to associated Workflow Job Template
|
to associated Workflow Job Template
|
||||||
@@ -1911,7 +1926,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
model = WorkflowJobTemplateNode
|
model = WorkflowJobTemplateNode
|
||||||
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template')
|
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template')
|
||||||
|
|
||||||
def filtered_queryset(self):
|
def filtered_queryset(self):
|
||||||
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
|
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
|
||||||
@@ -1923,7 +1938,8 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
|||||||
return (
|
return (
|
||||||
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
|
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
|
||||||
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
|
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
|
||||||
and JobLaunchConfigAccess(self.user).can_add(data)
|
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||||
|
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||||
)
|
)
|
||||||
|
|
||||||
def wfjt_admin(self, obj):
|
def wfjt_admin(self, obj):
|
||||||
@@ -1932,17 +1948,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
|||||||
else:
|
else:
|
||||||
return self.user in obj.workflow_job_template.admin_role
|
return self.user in obj.workflow_job_template.admin_role
|
||||||
|
|
||||||
def ujt_execute(self, obj):
|
def ujt_execute(self, obj, data=None):
|
||||||
if not obj.unified_job_template:
|
if not obj.unified_job_template:
|
||||||
return True
|
return True
|
||||||
return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True)
|
return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True)
|
||||||
|
|
||||||
def can_change(self, obj, data):
|
def can_change(self, obj, data):
|
||||||
if not data:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# should not be able to edit the prompts if lacking access to UJT or WFJT
|
# should not be able to edit the prompts if lacking access to UJT or WFJT
|
||||||
return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
|
return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
|
||||||
|
|
||||||
def can_delete(self, obj):
|
def can_delete(self, obj):
|
||||||
return self.wfjt_admin(obj)
|
return self.wfjt_admin(obj)
|
||||||
@@ -1955,29 +1968,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||||
if not self.wfjt_admin(obj):
|
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||||
return False
|
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj)
|
||||||
if relationship == 'credentials':
|
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||||
# Need permission to related template to attach a credential
|
|
||||||
if not self.ujt_execute(obj):
|
|
||||||
return False
|
|
||||||
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
|
||||||
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
|
||||||
return self.check_same_WFJT(obj, sub_obj)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
|
|
||||||
|
|
||||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||||
if not self.wfjt_admin(obj):
|
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||||
return False
|
return self.wfjt_admin(obj)
|
||||||
if relationship == 'credentials':
|
return super().can_unattach(obj, sub_obj, relationship, data=None)
|
||||||
if not self.ujt_execute(obj):
|
|
||||||
return False
|
|
||||||
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
|
||||||
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
|
||||||
return self.check_same_WFJT(obj, sub_obj)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
|
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJobNodeAccess(BaseAccess):
|
class WorkflowJobNodeAccess(BaseAccess):
|
||||||
@@ -2052,13 +2050,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
if not data: # So the browseable API will work
|
if not data: # So the browseable API will work
|
||||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||||
|
|
||||||
if data.get('execution_environment'):
|
return bool(
|
||||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
|
||||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||||
return False
|
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||||
|
|
||||||
return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related(
|
|
||||||
'inventory', Inventory, data, role_field='use_role'
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def can_copy(self, obj):
|
def can_copy(self, obj):
|
||||||
@@ -2104,14 +2099,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
if self.user.is_superuser:
|
if self.user.is_superuser:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if data and data.get('execution_environment'):
|
|
||||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
|
||||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
|
||||||
return False
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
|
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
|
||||||
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
|
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
|
||||||
|
and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role')
|
||||||
and self.user in obj.admin_role
|
and self.user in obj.admin_role
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -2518,7 +2509,7 @@ class UnifiedJobAccess(BaseAccess):
|
|||||||
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
|
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
|
||||||
|
|
||||||
|
|
||||||
class ScheduleAccess(BaseAccess):
|
class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
|
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
|
||||||
"""
|
"""
|
||||||
@@ -2559,12 +2550,6 @@ class ScheduleAccess(BaseAccess):
|
|||||||
def can_delete(self, obj):
|
def can_delete(self, obj):
|
||||||
return self.can_change(obj, {})
|
return self.can_change(obj, {})
|
||||||
|
|
||||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
|
||||||
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
|
||||||
|
|
||||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
|
||||||
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
|
||||||
|
|
||||||
|
|
||||||
class NotificationTemplateAccess(BaseAccess):
|
class NotificationTemplateAccess(BaseAccess):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from awx.conf.license import get_license
|
|||||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||||
from awx.main import models
|
from awx.main import models
|
||||||
from awx.main.analytics import register
|
from awx.main.analytics import register
|
||||||
|
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||||
@@ -235,25 +236,25 @@ def projects_by_scm_type(since, **kwargs):
|
|||||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||||
def instance_info(since, include_hostnames=False, **kwargs):
|
def instance_info(since, include_hostnames=False, **kwargs):
|
||||||
info = {}
|
info = {}
|
||||||
instances = models.Instance.objects.values_list('hostname').values(
|
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'enabled'
|
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
|
||||||
)
|
tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||||
for instance in instances:
|
for tm_instance in tm_instances.instances_by_hostname.values():
|
||||||
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'], status__in=('running', 'waiting')))
|
instance = tm_instance.obj
|
||||||
instance_info = {
|
instance_info = {
|
||||||
'uuid': instance['uuid'],
|
'uuid': instance.uuid,
|
||||||
'version': instance['version'],
|
'version': instance.version,
|
||||||
'capacity': instance['capacity'],
|
'capacity': instance.capacity,
|
||||||
'cpu': instance['cpu'],
|
'cpu': instance.cpu,
|
||||||
'memory': instance['memory'],
|
'memory': instance.memory,
|
||||||
'managed_by_policy': instance['managed_by_policy'],
|
'managed_by_policy': instance.managed_by_policy,
|
||||||
'enabled': instance['enabled'],
|
'enabled': instance.enabled,
|
||||||
'consumed_capacity': consumed_capacity,
|
'consumed_capacity': tm_instance.consumed_capacity,
|
||||||
'remaining_capacity': instance['capacity'] - consumed_capacity,
|
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
|
||||||
}
|
}
|
||||||
if include_hostnames is True:
|
if include_hostnames is True:
|
||||||
instance_info['hostname'] = instance['hostname']
|
instance_info['hostname'] = instance.hostname
|
||||||
info[instance['uuid']] = instance_info
|
info[instance.uuid] = instance_info
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ from prometheus_client import CollectorRegistry, Gauge, Info, generate_latest
|
|||||||
|
|
||||||
from awx.conf.license import get_license
|
from awx.conf.license import get_license
|
||||||
from awx.main.utils import get_awx_version
|
from awx.main.utils import get_awx_version
|
||||||
|
from awx.main.models import UnifiedJob
|
||||||
from awx.main.analytics.collectors import (
|
from awx.main.analytics.collectors import (
|
||||||
counts,
|
counts,
|
||||||
instance_info,
|
instance_info,
|
||||||
@@ -169,8 +170,9 @@ def metrics():
|
|||||||
|
|
||||||
all_job_data = job_counts(None)
|
all_job_data = job_counts(None)
|
||||||
statuses = all_job_data.get('status', {})
|
statuses = all_job_data.get('status', {})
|
||||||
for status, value in statuses.items():
|
states = set(dict(UnifiedJob.STATUS_CHOICES).keys()) - set(['new'])
|
||||||
STATUS.labels(status=status).set(value)
|
for state in states:
|
||||||
|
STATUS.labels(status=state).set(statuses.get(state, 0))
|
||||||
|
|
||||||
RUNNING_JOBS.set(current_counts['running_jobs'])
|
RUNNING_JOBS.set(current_counts['running_jobs'])
|
||||||
PENDING_JOBS.set(current_counts['pending_jobs'])
|
PENDING_JOBS.set(current_counts['pending_jobs'])
|
||||||
|
|||||||
@@ -166,7 +166,11 @@ class Metrics:
|
|||||||
elif settings.IS_TESTING():
|
elif settings.IS_TESTING():
|
||||||
self.instance_name = "awx_testing"
|
self.instance_name = "awx_testing"
|
||||||
else:
|
else:
|
||||||
self.instance_name = Instance.objects.me().hostname
|
try:
|
||||||
|
self.instance_name = Instance.objects.me().hostname
|
||||||
|
except Exception as e:
|
||||||
|
self.instance_name = settings.CLUSTER_HOST_ID
|
||||||
|
logger.info(f'Instance {self.instance_name} seems to be unregistered, error: {e}')
|
||||||
|
|
||||||
# metric name, help_text
|
# metric name, help_text
|
||||||
METRICSLIST = [
|
METRICSLIST = [
|
||||||
@@ -184,19 +188,29 @@ class Metrics:
|
|||||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading all tasks from db'),
|
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||||
SetFloatM('task_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
|
||||||
SetFloatM('task_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow jobs'),
|
|
||||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
IntM('task_manager_schedule_calls', 'Number of calls to task manager schedule'),
|
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||||
|
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||||
|
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||||
|
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||||
|
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
|
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
|
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
|
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||||
|
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
|
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
|
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
|
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||||
|
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||||
]
|
]
|
||||||
# turn metric list into dictionary with the metric name as a key
|
# turn metric list into dictionary with the metric name as a key
|
||||||
self.METRICS = {}
|
self.METRICS = {}
|
||||||
@@ -303,7 +317,12 @@ class Metrics:
|
|||||||
self.previous_send_metrics.set(current_time)
|
self.previous_send_metrics.set(current_time)
|
||||||
self.previous_send_metrics.store_value(self.conn)
|
self.previous_send_metrics.store_value(self.conn)
|
||||||
finally:
|
finally:
|
||||||
lock.release()
|
try:
|
||||||
|
lock.release()
|
||||||
|
except Exception as exc:
|
||||||
|
# After system failures, we might throw redis.exceptions.LockNotOwnedError
|
||||||
|
# this is to avoid print a Traceback, and importantly, avoid raising an exception into parent context
|
||||||
|
logger.warning(f'Error releasing subsystem metrics redis lock, error: {str(exc)}')
|
||||||
|
|
||||||
def load_other_metrics(self, request):
|
def load_other_metrics(self, request):
|
||||||
# data received from other nodes are stored in their own keys
|
# data received from other nodes are stored in their own keys
|
||||||
|
|||||||
@@ -446,7 +446,7 @@ register(
|
|||||||
label=_('Default Job Idle Timeout'),
|
label=_('Default Job Idle Timeout'),
|
||||||
help_text=_(
|
help_text=_(
|
||||||
'If no output is detected from ansible in this number of seconds the execution will be terminated. '
|
'If no output is detected from ansible in this number of seconds the execution will be terminated. '
|
||||||
'Use value of 0 to used default idle_timeout is 600s.'
|
'Use value of 0 to indicate that no idle timeout should be imposed.'
|
||||||
),
|
),
|
||||||
category=_('Jobs'),
|
category=_('Jobs'),
|
||||||
category_slug='jobs',
|
category_slug='jobs',
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import select
|
|||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from django.db import connection as pg_connection
|
||||||
|
|
||||||
|
|
||||||
NOT_READY = ([], [], [])
|
NOT_READY = ([], [], [])
|
||||||
@@ -15,7 +16,6 @@ def get_local_queuename():
|
|||||||
|
|
||||||
class PubSub(object):
|
class PubSub(object):
|
||||||
def __init__(self, conn):
|
def __init__(self, conn):
|
||||||
assert conn.autocommit, "Connection must be in autocommit mode."
|
|
||||||
self.conn = conn
|
self.conn = conn
|
||||||
|
|
||||||
def listen(self, channel):
|
def listen(self, channel):
|
||||||
@@ -31,6 +31,9 @@ class PubSub(object):
|
|||||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||||
|
|
||||||
def events(self, select_timeout=5, yield_timeouts=False):
|
def events(self, select_timeout=5, yield_timeouts=False):
|
||||||
|
if not self.conn.autocommit:
|
||||||
|
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||||
if yield_timeouts:
|
if yield_timeouts:
|
||||||
@@ -45,11 +48,32 @@ class PubSub(object):
|
|||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def pg_bus_conn():
|
def pg_bus_conn(new_connection=False):
|
||||||
conf = settings.DATABASES['default']
|
'''
|
||||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {}))
|
Any listeners probably want to establish a new database connection,
|
||||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
separate from the Django connection used for queries, because that will prevent
|
||||||
conn.set_session(autocommit=True)
|
losing connection to the channel whenever a .close() happens.
|
||||||
|
|
||||||
|
Any publishers probably want to use the existing connection
|
||||||
|
so that messages follow postgres transaction rules
|
||||||
|
https://www.postgresql.org/docs/current/sql-notify.html
|
||||||
|
'''
|
||||||
|
|
||||||
|
if new_connection:
|
||||||
|
conf = settings.DATABASES['default']
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||||
|
)
|
||||||
|
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||||
|
conn.set_session(autocommit=True)
|
||||||
|
else:
|
||||||
|
if pg_connection.connection is None:
|
||||||
|
pg_connection.connect()
|
||||||
|
if pg_connection.connection is None:
|
||||||
|
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||||
|
conn = pg_connection.connection
|
||||||
|
|
||||||
pubsub = PubSub(conn)
|
pubsub = PubSub(conn)
|
||||||
yield pubsub
|
yield pubsub
|
||||||
conn.close()
|
if new_connection:
|
||||||
|
conn.close()
|
||||||
|
|||||||
@@ -37,18 +37,24 @@ class Control(object):
|
|||||||
def running(self, *args, **kwargs):
|
def running(self, *args, **kwargs):
|
||||||
return self.control_with_reply('running', *args, **kwargs)
|
return self.control_with_reply('running', *args, **kwargs)
|
||||||
|
|
||||||
|
def cancel(self, task_ids, *args, **kwargs):
|
||||||
|
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_reply_queue_name(cls):
|
def generate_reply_queue_name(cls):
|
||||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||||
|
|
||||||
def control_with_reply(self, command, timeout=5):
|
def control_with_reply(self, command, timeout=5, extra_data=None):
|
||||||
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
|
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||||
reply_queue = Control.generate_reply_queue_name()
|
reply_queue = Control.generate_reply_queue_name()
|
||||||
self.result = None
|
self.result = None
|
||||||
|
|
||||||
with pg_bus_conn() as conn:
|
with pg_bus_conn(new_connection=True) as conn:
|
||||||
conn.listen(reply_queue)
|
conn.listen(reply_queue)
|
||||||
conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
|
send_data = {'control': command, 'reply_to': reply_queue}
|
||||||
|
if extra_data:
|
||||||
|
send_data.update(extra_data)
|
||||||
|
conn.notify(self.queuename, json.dumps(send_data))
|
||||||
|
|
||||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||||
if reply is None:
|
if reply is None:
|
||||||
|
|||||||
@@ -16,13 +16,14 @@ from queue import Full as QueueFull, Empty as QueueEmpty
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import connection as django_connection, connections
|
from django.db import connection as django_connection, connections
|
||||||
from django.core.cache import cache as django_cache
|
from django.core.cache import cache as django_cache
|
||||||
|
from django.utils.timezone import now as tz_now
|
||||||
from django_guid import set_guid
|
from django_guid import set_guid
|
||||||
from jinja2 import Template
|
from jinja2 import Template
|
||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
from awx.main.models import UnifiedJob
|
from awx.main.models import UnifiedJob
|
||||||
from awx.main.dispatch import reaper
|
from awx.main.dispatch import reaper
|
||||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity, log_excess_runtime
|
||||||
|
|
||||||
if 'run_callback_receiver' in sys.argv:
|
if 'run_callback_receiver' in sys.argv:
|
||||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||||
@@ -328,12 +329,16 @@ class AutoscalePool(WorkerPool):
|
|||||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||||
|
|
||||||
|
# add magic prime number of extra workers to ensure
|
||||||
|
# we have a few extra workers to run the heartbeat
|
||||||
|
self.max_workers += 7
|
||||||
|
|
||||||
# max workers can't be less than min_workers
|
# max workers can't be less than min_workers
|
||||||
self.max_workers = max(self.min_workers, self.max_workers)
|
self.max_workers = max(self.min_workers, self.max_workers)
|
||||||
|
|
||||||
def debug(self, *args, **kwargs):
|
# the task manager enforces settings.TASK_MANAGER_TIMEOUT on its own
|
||||||
self.cleanup()
|
# but if the task takes longer than the time defined here, we will force it to stop here
|
||||||
return super(AutoscalePool, self).debug(*args, **kwargs)
|
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def should_grow(self):
|
def should_grow(self):
|
||||||
@@ -351,6 +356,7 @@ class AutoscalePool(WorkerPool):
|
|||||||
def debug_meta(self):
|
def debug_meta(self):
|
||||||
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
||||||
|
|
||||||
|
@log_excess_runtime(logger)
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
"""
|
"""
|
||||||
Perform some internal account and cleanup. This is run on
|
Perform some internal account and cleanup. This is run on
|
||||||
@@ -359,8 +365,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
1. Discover worker processes that exited, and recover messages they
|
1. Discover worker processes that exited, and recover messages they
|
||||||
were handling.
|
were handling.
|
||||||
2. Clean up unnecessary, idle workers.
|
2. Clean up unnecessary, idle workers.
|
||||||
3. Check to see if the database says this node is running any tasks
|
|
||||||
that aren't actually running. If so, reap them.
|
|
||||||
|
|
||||||
IMPORTANT: this function is one of the few places in the dispatcher
|
IMPORTANT: this function is one of the few places in the dispatcher
|
||||||
(aside from setting lookups) where we talk to the database. As such,
|
(aside from setting lookups) where we talk to the database. As such,
|
||||||
@@ -401,13 +405,15 @@ class AutoscalePool(WorkerPool):
|
|||||||
# the task manager to never do more work
|
# the task manager to never do more work
|
||||||
current_task = w.current_task
|
current_task = w.current_task
|
||||||
if current_task and isinstance(current_task, dict):
|
if current_task and isinstance(current_task, dict):
|
||||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||||
|
current_task_name = current_task.get('task', '')
|
||||||
|
if any(current_task_name.endswith(e) for e in endings):
|
||||||
if 'started' not in current_task:
|
if 'started' not in current_task:
|
||||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||||
age = time.time() - current_task['started']
|
age = time.time() - current_task['started']
|
||||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||||
if age > (60 * 5):
|
if age > self.task_manager_timeout:
|
||||||
logger.error(f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}') # noqa
|
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||||
os.kill(w.pid, signal.SIGTERM)
|
os.kill(w.pid, signal.SIGTERM)
|
||||||
|
|
||||||
for m in orphaned:
|
for m in orphaned:
|
||||||
@@ -417,13 +423,17 @@ class AutoscalePool(WorkerPool):
|
|||||||
idx = random.choice(range(len(self.workers)))
|
idx = random.choice(range(len(self.workers)))
|
||||||
self.write(idx, m)
|
self.write(idx, m)
|
||||||
|
|
||||||
# if the database says a job is running on this node, but it's *not*,
|
def add_bind_kwargs(self, body):
|
||||||
# then reap it
|
bind_kwargs = body.pop('bind_kwargs', [])
|
||||||
running_uuids = []
|
body.setdefault('kwargs', {})
|
||||||
for worker in self.workers:
|
if 'dispatch_time' in bind_kwargs:
|
||||||
worker.calculate_managed_tasks()
|
body['kwargs']['dispatch_time'] = tz_now().isoformat()
|
||||||
running_uuids.extend(list(worker.managed_tasks.keys()))
|
if 'worker_tasks' in bind_kwargs:
|
||||||
reaper.reap(excluded_uuids=running_uuids)
|
worker_tasks = {}
|
||||||
|
for worker in self.workers:
|
||||||
|
worker.calculate_managed_tasks()
|
||||||
|
worker_tasks[worker.pid] = list(worker.managed_tasks.keys())
|
||||||
|
body['kwargs']['worker_tasks'] = worker_tasks
|
||||||
|
|
||||||
def up(self):
|
def up(self):
|
||||||
if self.full:
|
if self.full:
|
||||||
@@ -438,6 +448,8 @@ class AutoscalePool(WorkerPool):
|
|||||||
if 'guid' in body:
|
if 'guid' in body:
|
||||||
set_guid(body['guid'])
|
set_guid(body['guid'])
|
||||||
try:
|
try:
|
||||||
|
if isinstance(body, dict) and body.get('bind_kwargs'):
|
||||||
|
self.add_bind_kwargs(body)
|
||||||
# when the cluster heartbeat occurs, clean up internally
|
# when the cluster heartbeat occurs, clean up internally
|
||||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||||
self.cleanup()
|
self.cleanup()
|
||||||
@@ -452,6 +464,10 @@ class AutoscalePool(WorkerPool):
|
|||||||
w.put(body)
|
w.put(body)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
task_name = 'unknown'
|
||||||
|
if isinstance(body, dict):
|
||||||
|
task_name = body.get('task')
|
||||||
|
logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||||
except Exception:
|
except Exception:
|
||||||
for conn in connections.all():
|
for conn in connections.all():
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import inspect
|
|||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
import time
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
@@ -49,13 +50,21 @@ class task:
|
|||||||
@task(queue='tower_broadcast')
|
@task(queue='tower_broadcast')
|
||||||
def announce():
|
def announce():
|
||||||
print("Run this everywhere!")
|
print("Run this everywhere!")
|
||||||
|
|
||||||
|
# The special parameter bind_kwargs tells the main dispatcher process to add certain kwargs
|
||||||
|
|
||||||
|
@task(bind_kwargs=['dispatch_time'])
|
||||||
|
def print_time(dispatch_time=None):
|
||||||
|
print(f"Time I was dispatched: {dispatch_time}")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, queue=None):
|
def __init__(self, queue=None, bind_kwargs=None):
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
|
self.bind_kwargs = bind_kwargs
|
||||||
|
|
||||||
def __call__(self, fn=None):
|
def __call__(self, fn=None):
|
||||||
queue = self.queue
|
queue = self.queue
|
||||||
|
bind_kwargs = self.bind_kwargs
|
||||||
|
|
||||||
class PublisherMixin(object):
|
class PublisherMixin(object):
|
||||||
|
|
||||||
@@ -75,10 +84,12 @@ class task:
|
|||||||
msg = f'{cls.name}: Queue value required and may not be None'
|
msg = f'{cls.name}: Queue value required and may not be None'
|
||||||
logger.error(msg)
|
logger.error(msg)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name}
|
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||||
guid = get_guid()
|
guid = get_guid()
|
||||||
if guid:
|
if guid:
|
||||||
obj['guid'] = guid
|
obj['guid'] = guid
|
||||||
|
if bind_kwargs:
|
||||||
|
obj['bind_kwargs'] = bind_kwargs
|
||||||
obj.update(**kw)
|
obj.update(**kw)
|
||||||
if callable(queue):
|
if callable(queue):
|
||||||
queue = queue()
|
queue = queue()
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from datetime import timedelta
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from django.db.models import Q
|
from django.db.models import Q
|
||||||
|
from django.conf import settings
|
||||||
from django.utils.timezone import now as tz_now
|
from django.utils.timezone import now as tz_now
|
||||||
from django.contrib.contenttypes.models import ContentType
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
|
||||||
@@ -15,44 +16,71 @@ def startup_reaping():
|
|||||||
If this particular instance is starting, then we know that any running jobs are invalid
|
If this particular instance is starting, then we know that any running jobs are invalid
|
||||||
so we will reap those jobs as a special action here
|
so we will reap those jobs as a special action here
|
||||||
"""
|
"""
|
||||||
me = Instance.objects.me()
|
try:
|
||||||
|
me = Instance.objects.me()
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.warning(f'Local instance is not registered, not running startup reaper: {e}')
|
||||||
|
return
|
||||||
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
|
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
|
||||||
job_ids = []
|
job_ids = []
|
||||||
for j in jobs:
|
for j in jobs:
|
||||||
job_ids.append(j.id)
|
job_ids.append(j.id)
|
||||||
j.status = 'failed'
|
reap_job(
|
||||||
j.start_args = ''
|
j,
|
||||||
j.job_explanation += 'Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.'
|
'failed',
|
||||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
job_explanation='Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.',
|
||||||
if hasattr(j, 'send_notification_templates'):
|
)
|
||||||
j.send_notification_templates('failed')
|
|
||||||
j.websocket_emit_status('failed')
|
|
||||||
if job_ids:
|
if job_ids:
|
||||||
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
||||||
|
|
||||||
|
|
||||||
def reap_job(j, status):
|
def reap_job(j, status, job_explanation=None):
|
||||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
j.refresh_from_db(fields=['status', 'job_explanation'])
|
||||||
|
status_before = j.status
|
||||||
|
if status_before not in ('running', 'waiting'):
|
||||||
# just in case, don't reap jobs that aren't running
|
# just in case, don't reap jobs that aren't running
|
||||||
return
|
return
|
||||||
j.status = status
|
j.status = status
|
||||||
j.start_args = '' # blank field to remove encrypted passwords
|
j.start_args = '' # blank field to remove encrypted passwords
|
||||||
j.job_explanation += ' '.join(
|
if j.job_explanation:
|
||||||
(
|
j.job_explanation += ' ' # Separate messages for readability
|
||||||
'Task was marked as running but was not present in',
|
if job_explanation is None:
|
||||||
'the job queue, so it has been marked as failed.',
|
j.job_explanation += 'Task was marked as running but was not present in the job queue, so it has been marked as failed.'
|
||||||
)
|
else:
|
||||||
)
|
j.job_explanation += job_explanation
|
||||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||||
if hasattr(j, 'send_notification_templates'):
|
if hasattr(j, 'send_notification_templates'):
|
||||||
j.send_notification_templates('failed')
|
j.send_notification_templates('failed')
|
||||||
j.websocket_emit_status(status)
|
j.websocket_emit_status(status)
|
||||||
logger.error('{} is no longer running; reaping'.format(j.log_format))
|
logger.error(f'{j.log_format} is no longer {status_before}; reaping')
|
||||||
|
|
||||||
|
|
||||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
def reap_waiting(instance=None, status='failed', job_explanation=None, grace_period=None, excluded_uuids=None, ref_time=None):
|
||||||
"""
|
"""
|
||||||
Reap all jobs in waiting|running for this instance.
|
Reap all jobs in waiting for this instance.
|
||||||
|
"""
|
||||||
|
if grace_period is None:
|
||||||
|
grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT
|
||||||
|
|
||||||
|
me = instance
|
||||||
|
if me is None:
|
||||||
|
try:
|
||||||
|
me = Instance.objects.me()
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
||||||
|
return
|
||||||
|
if ref_time is None:
|
||||||
|
ref_time = tz_now()
|
||||||
|
jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=me.hostname)
|
||||||
|
if excluded_uuids:
|
||||||
|
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||||
|
for j in jobs:
|
||||||
|
reap_job(j, status, job_explanation=job_explanation)
|
||||||
|
|
||||||
|
|
||||||
|
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
|
||||||
|
"""
|
||||||
|
Reap all jobs in running for this instance.
|
||||||
"""
|
"""
|
||||||
me = instance
|
me = instance
|
||||||
if me is None:
|
if me is None:
|
||||||
@@ -61,12 +89,11 @@ def reap(instance=None, status='failed', excluded_uuids=[]):
|
|||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
||||||
return
|
return
|
||||||
now = tz_now()
|
|
||||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||||
jobs = UnifiedJob.objects.filter(
|
jobs = UnifiedJob.objects.filter(
|
||||||
(Q(status='running') | Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
|
Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||||
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
|
)
|
||||||
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
if excluded_uuids:
|
||||||
).exclude(celery_task_id__in=excluded_uuids)
|
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||||
for j in jobs:
|
for j in jobs:
|
||||||
reap_job(j, status)
|
reap_job(j, status, job_explanation=job_explanation)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ from django.conf import settings
|
|||||||
|
|
||||||
from awx.main.dispatch.pool import WorkerPool
|
from awx.main.dispatch.pool import WorkerPool
|
||||||
from awx.main.dispatch import pg_bus_conn
|
from awx.main.dispatch import pg_bus_conn
|
||||||
|
from awx.main.utils.common import log_excess_runtime
|
||||||
|
|
||||||
if 'run_callback_receiver' in sys.argv:
|
if 'run_callback_receiver' in sys.argv:
|
||||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||||
@@ -62,7 +63,7 @@ class AWXConsumerBase(object):
|
|||||||
def control(self, body):
|
def control(self, body):
|
||||||
logger.warning(f'Received control signal:\n{body}')
|
logger.warning(f'Received control signal:\n{body}')
|
||||||
control = body.get('control')
|
control = body.get('control')
|
||||||
if control in ('status', 'running'):
|
if control in ('status', 'running', 'cancel'):
|
||||||
reply_queue = body['reply_to']
|
reply_queue = body['reply_to']
|
||||||
if control == 'status':
|
if control == 'status':
|
||||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||||
@@ -71,6 +72,17 @@ class AWXConsumerBase(object):
|
|||||||
for worker in self.pool.workers:
|
for worker in self.pool.workers:
|
||||||
worker.calculate_managed_tasks()
|
worker.calculate_managed_tasks()
|
||||||
msg.extend(worker.managed_tasks.keys())
|
msg.extend(worker.managed_tasks.keys())
|
||||||
|
elif control == 'cancel':
|
||||||
|
msg = []
|
||||||
|
task_ids = set(body['task_ids'])
|
||||||
|
for worker in self.pool.workers:
|
||||||
|
task = worker.current_task
|
||||||
|
if task and task['uuid'] in task_ids:
|
||||||
|
logger.warn(f'Sending SIGTERM to task id={task["uuid"]}, task={task.get("task")}, args={task.get("args")}')
|
||||||
|
os.kill(worker.pid, signal.SIGTERM)
|
||||||
|
msg.append(task['uuid'])
|
||||||
|
if task_ids and not msg:
|
||||||
|
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||||
|
|
||||||
with pg_bus_conn() as conn:
|
with pg_bus_conn() as conn:
|
||||||
conn.notify(reply_queue, json.dumps(msg))
|
conn.notify(reply_queue, json.dumps(msg))
|
||||||
@@ -81,6 +93,9 @@ class AWXConsumerBase(object):
|
|||||||
logger.error('unrecognized control message: {}'.format(control))
|
logger.error('unrecognized control message: {}'.format(control))
|
||||||
|
|
||||||
def process_task(self, body):
|
def process_task(self, body):
|
||||||
|
if isinstance(body, dict):
|
||||||
|
body['time_ack'] = time.time()
|
||||||
|
|
||||||
if 'control' in body:
|
if 'control' in body:
|
||||||
try:
|
try:
|
||||||
return self.control(body)
|
return self.control(body)
|
||||||
@@ -101,6 +116,7 @@ class AWXConsumerBase(object):
|
|||||||
self.total_messages += 1
|
self.total_messages += 1
|
||||||
self.record_statistics()
|
self.record_statistics()
|
||||||
|
|
||||||
|
@log_excess_runtime(logger)
|
||||||
def record_statistics(self):
|
def record_statistics(self):
|
||||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||||
try:
|
try:
|
||||||
@@ -149,7 +165,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
with pg_bus_conn() as conn:
|
with pg_bus_conn(new_connection=True) as conn:
|
||||||
for queue in self.queues:
|
for queue in self.queues:
|
||||||
conn.listen(queue)
|
conn.listen(queue)
|
||||||
if init is False:
|
if init is False:
|
||||||
|
|||||||
@@ -167,17 +167,27 @@ class CallbackBrokerWorker(BaseWorker):
|
|||||||
try:
|
try:
|
||||||
cls.objects.bulk_create(events)
|
cls.objects.bulk_create(events)
|
||||||
metrics_bulk_events_saved += len(events)
|
metrics_bulk_events_saved += len(events)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
|
||||||
# if an exception occurs, we should re-attempt to save the
|
# if an exception occurs, we should re-attempt to save the
|
||||||
# events one-by-one, because something in the list is
|
# events one-by-one, because something in the list is
|
||||||
# broken/stale
|
# broken/stale
|
||||||
|
consecutive_errors = 0
|
||||||
|
events_saved = 0
|
||||||
metrics_events_batch_save_errors += 1
|
metrics_events_batch_save_errors += 1
|
||||||
for e in events:
|
for e in events:
|
||||||
try:
|
try:
|
||||||
e.save()
|
e.save()
|
||||||
metrics_singular_events_saved += 1
|
events_saved += 1
|
||||||
except Exception:
|
consecutive_errors = 0
|
||||||
logger.exception('Database Error Saving Job Event')
|
except Exception as exc_indv:
|
||||||
|
consecutive_errors += 1
|
||||||
|
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
|
||||||
|
if consecutive_errors >= 5:
|
||||||
|
raise
|
||||||
|
metrics_singular_events_saved += events_saved
|
||||||
|
if events_saved == 0:
|
||||||
|
raise
|
||||||
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
||||||
for e in events:
|
for e in events:
|
||||||
if not getattr(e, '_skip_websocket_message', False):
|
if not getattr(e, '_skip_websocket_message', False):
|
||||||
@@ -257,17 +267,18 @@ class CallbackBrokerWorker(BaseWorker):
|
|||||||
try:
|
try:
|
||||||
self.flush(force=flush)
|
self.flush(force=flush)
|
||||||
break
|
break
|
||||||
except (OperationalError, InterfaceError, InternalError):
|
except (OperationalError, InterfaceError, InternalError) as exc:
|
||||||
if retries >= self.MAX_RETRIES:
|
if retries >= self.MAX_RETRIES:
|
||||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||||
return
|
return
|
||||||
delay = 60 * retries
|
delay = 60 * retries
|
||||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
|
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}')
|
||||||
django_connection.close()
|
django_connection.close()
|
||||||
time.sleep(delay)
|
time.sleep(delay)
|
||||||
retries += 1
|
retries += 1
|
||||||
except DatabaseError:
|
except DatabaseError:
|
||||||
logger.exception('Database Error Saving Job Event')
|
logger.exception('Database Error Flushing Job Events')
|
||||||
|
django_connection.close()
|
||||||
break
|
break
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
tb = traceback.format_exc()
|
tb = traceback.format_exc()
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import logging
|
|||||||
import importlib
|
import importlib
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
import time
|
||||||
|
|
||||||
from kubernetes.config import kube_config
|
from kubernetes.config import kube_config
|
||||||
|
|
||||||
@@ -60,8 +61,19 @@ class TaskWorker(BaseWorker):
|
|||||||
# the callable is a class, e.g., RunJob; instantiate and
|
# the callable is a class, e.g., RunJob; instantiate and
|
||||||
# return its `run()` method
|
# return its `run()` method
|
||||||
_call = _call().run
|
_call = _call().run
|
||||||
|
|
||||||
|
log_extra = ''
|
||||||
|
logger_method = logger.debug
|
||||||
|
if ('time_ack' in body) and ('time_pub' in body):
|
||||||
|
time_publish = body['time_ack'] - body['time_pub']
|
||||||
|
time_waiting = time.time() - body['time_ack']
|
||||||
|
if time_waiting > 5.0 or time_publish > 5.0:
|
||||||
|
# If task too a very long time to process, add this information to the log
|
||||||
|
log_extra = f' took {time_publish:.4f} to ack, {time_waiting:.4f} in local dispatcher'
|
||||||
|
logger_method = logger.info
|
||||||
# don't print kwargs, they often contain launch-time secrets
|
# don't print kwargs, they often contain launch-time secrets
|
||||||
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
|
logger_method(f'task {uuid} starting {task}(*{args}){log_extra}')
|
||||||
|
|
||||||
return _call(*args, **kwargs)
|
return _call(*args, **kwargs)
|
||||||
|
|
||||||
def perform_work(self, body):
|
def perform_work(self, body):
|
||||||
|
|||||||
@@ -862,7 +862,7 @@ class Command(BaseCommand):
|
|||||||
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
||||||
)
|
)
|
||||||
inventory_update = inventory_source.create_inventory_update(
|
inventory_update = inventory_source.create_inventory_update(
|
||||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||||
)
|
)
|
||||||
|
|
||||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||||
heartbeat = f' heartbeat="{x.modified:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||||
|
|
||||||
print()
|
print()
|
||||||
|
|||||||
@@ -27,7 +27,9 @@ class Command(BaseCommand):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def handle(self, **options):
|
def handle(self, **options):
|
||||||
|
# provides a mapping of hostname to Instance objects
|
||||||
nodes = Instance.objects.in_bulk(field_name='hostname')
|
nodes = Instance.objects.in_bulk(field_name='hostname')
|
||||||
|
|
||||||
if options['source'] not in nodes:
|
if options['source'] not in nodes:
|
||||||
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
||||||
if not (options['peers'] or options['disconnect'] or options['exact'] is not None):
|
if not (options['peers'] or options['disconnect'] or options['exact'] is not None):
|
||||||
@@ -57,7 +59,9 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
results = 0
|
results = 0
|
||||||
for target in options['peers']:
|
for target in options['peers']:
|
||||||
_, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
|
_, created = InstanceLink.objects.update_or_create(
|
||||||
|
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||||
|
)
|
||||||
if created:
|
if created:
|
||||||
results += 1
|
results += 1
|
||||||
|
|
||||||
@@ -80,7 +84,9 @@ class Command(BaseCommand):
|
|||||||
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
|
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
|
||||||
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
|
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
|
||||||
for target in peers - links:
|
for target in peers - links:
|
||||||
_, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
|
_, created = InstanceLink.objects.update_or_create(
|
||||||
|
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||||
|
)
|
||||||
if created:
|
if created:
|
||||||
additions += 1
|
additions += 1
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
# Copyright (c) 2015 Ansible, Inc.
|
# Copyright (c) 2015 Ansible, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
import logging
|
import logging
|
||||||
|
import yaml
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.cache import cache as django_cache
|
from django.core.cache import cache as django_cache
|
||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
from django.db import connection as django_connection
|
from django.db import connection as django_connection
|
||||||
|
|
||||||
from awx.main.dispatch import get_local_queuename, reaper
|
from awx.main.dispatch import get_local_queuename
|
||||||
from awx.main.dispatch.control import Control
|
from awx.main.dispatch.control import Control
|
||||||
from awx.main.dispatch.pool import AutoscalePool
|
from awx.main.dispatch.pool import AutoscalePool
|
||||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||||
@@ -30,7 +31,16 @@ class Command(BaseCommand):
|
|||||||
'--reload',
|
'--reload',
|
||||||
dest='reload',
|
dest='reload',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help=('cause the dispatcher to recycle all of its worker processes;' 'running jobs will run to completion first'),
|
help=('cause the dispatcher to recycle all of its worker processes; running jobs will run to completion first'),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--cancel',
|
||||||
|
dest='cancel',
|
||||||
|
help=(
|
||||||
|
'Cancel a particular task id. Takes either a single id string, or a JSON list of multiple ids. '
|
||||||
|
'Can take in output from the --running argument as input to cancel all tasks. '
|
||||||
|
'Only running tasks can be canceled, queued tasks must be started before they can be canceled.'
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle(self, *arg, **options):
|
def handle(self, *arg, **options):
|
||||||
@@ -42,6 +52,16 @@ class Command(BaseCommand):
|
|||||||
return
|
return
|
||||||
if options.get('reload'):
|
if options.get('reload'):
|
||||||
return Control('dispatcher').control({'control': 'reload'})
|
return Control('dispatcher').control({'control': 'reload'})
|
||||||
|
if options.get('cancel'):
|
||||||
|
cancel_str = options.get('cancel')
|
||||||
|
try:
|
||||||
|
cancel_data = yaml.safe_load(cancel_str)
|
||||||
|
except Exception:
|
||||||
|
cancel_data = [cancel_str]
|
||||||
|
if not isinstance(cancel_data, list):
|
||||||
|
cancel_data = [cancel_str]
|
||||||
|
print(Control('dispatcher').cancel(cancel_data))
|
||||||
|
return
|
||||||
|
|
||||||
# It's important to close these because we're _about_ to fork, and we
|
# It's important to close these because we're _about_ to fork, and we
|
||||||
# don't want the forked processes to inherit the open sockets
|
# don't want the forked processes to inherit the open sockets
|
||||||
@@ -53,7 +73,6 @@ class Command(BaseCommand):
|
|||||||
# (like the node heartbeat)
|
# (like the node heartbeat)
|
||||||
periodic.run_continuously()
|
periodic.run_continuously()
|
||||||
|
|
||||||
reaper.startup_reaping()
|
|
||||||
consumer = None
|
consumer = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -95,8 +95,13 @@ class Command(BaseCommand):
|
|||||||
# database migrations are still running
|
# database migrations are still running
|
||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
|
|
||||||
executor = MigrationExecutor(connection)
|
try:
|
||||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
executor = MigrationExecutor(connection)
|
||||||
|
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||||
|
time.sleep(10)
|
||||||
|
return
|
||||||
|
|
||||||
# In containerized deployments, migrations happen in the task container,
|
# In containerized deployments, migrations happen in the task container,
|
||||||
# and the services running there don't start until migrations are
|
# and the services running there don't start until migrations are
|
||||||
|
|||||||
@@ -129,10 +129,13 @@ class InstanceManager(models.Manager):
|
|||||||
# if instance was not retrieved by uuid and hostname was, use the hostname
|
# if instance was not retrieved by uuid and hostname was, use the hostname
|
||||||
instance = self.filter(hostname=hostname)
|
instance = self.filter(hostname=hostname)
|
||||||
|
|
||||||
|
from awx.main.models import Instance
|
||||||
|
|
||||||
# Return existing instance
|
# Return existing instance
|
||||||
if instance.exists():
|
if instance.exists():
|
||||||
instance = instance.first() # in the unusual occasion that there is more than one, only get one
|
instance = instance.first() # in the unusual occasion that there is more than one, only get one
|
||||||
update_fields = []
|
instance.node_state = Instance.States.INSTALLED # Wait for it to show up on the mesh
|
||||||
|
update_fields = ['node_state']
|
||||||
# if instance was retrieved by uuid and hostname has changed, update hostname
|
# if instance was retrieved by uuid and hostname has changed, update hostname
|
||||||
if instance.hostname != hostname:
|
if instance.hostname != hostname:
|
||||||
logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname))
|
logger.warning("passed in hostname {0} is different from the original hostname {1}, updating to {0}".format(hostname, instance.hostname))
|
||||||
@@ -141,6 +144,7 @@ class InstanceManager(models.Manager):
|
|||||||
# if any other fields are to be updated
|
# if any other fields are to be updated
|
||||||
if instance.ip_address != ip_address:
|
if instance.ip_address != ip_address:
|
||||||
instance.ip_address = ip_address
|
instance.ip_address = ip_address
|
||||||
|
update_fields.append('ip_address')
|
||||||
if instance.node_type != node_type:
|
if instance.node_type != node_type:
|
||||||
instance.node_type = node_type
|
instance.node_type = node_type
|
||||||
update_fields.append('node_type')
|
update_fields.append('node_type')
|
||||||
@@ -151,12 +155,12 @@ class InstanceManager(models.Manager):
|
|||||||
return (False, instance)
|
return (False, instance)
|
||||||
|
|
||||||
# Create new instance, and fill in default values
|
# Create new instance, and fill in default values
|
||||||
create_defaults = dict(capacity=0)
|
create_defaults = {'node_state': Instance.States.INSTALLED, 'capacity': 0}
|
||||||
if defaults is not None:
|
if defaults is not None:
|
||||||
create_defaults.update(defaults)
|
create_defaults.update(defaults)
|
||||||
uuid_option = {}
|
uuid_option = {}
|
||||||
if uuid is not None:
|
if uuid is not None:
|
||||||
uuid_option = dict(uuid=uuid)
|
uuid_option = {'uuid': uuid}
|
||||||
if node_type == 'execution' and 'version' not in create_defaults:
|
if node_type == 'execution' and 'version' not in create_defaults:
|
||||||
create_defaults['version'] = RECEPTOR_PENDING
|
create_defaults['version'] = RECEPTOR_PENDING
|
||||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||||
|
|||||||
35
awx/main/migrations/0165_task_manager_refactor.py
Normal file
35
awx/main/migrations/0165_task_manager_refactor.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-08-10 14:03
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0164_remove_inventorysource_update_on_project_update'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='unifiedjob',
|
||||||
|
name='preferred_instance_groups_cache',
|
||||||
|
field=models.JSONField(
|
||||||
|
blank=True, default=None, editable=False, help_text='A cached list with pk values from preferred instance groups.', null=True
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='unifiedjob',
|
||||||
|
name='task_impact',
|
||||||
|
field=models.PositiveIntegerField(default=0, editable=False, help_text='Number of forks an instance consumes when running this job.'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowapproval',
|
||||||
|
name='expires',
|
||||||
|
field=models.DateTimeField(
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
help_text='The time this approval will expire. This is the created time plus timeout, used for filtering.',
|
||||||
|
null=True,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
40
awx/main/migrations/0166_alter_jobevent_host.py
Normal file
40
awx/main/migrations/0166_alter_jobevent_host.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-07-06 13:19
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
import django.db.models.deletion
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0165_task_manager_refactor'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='adhoccommandevent',
|
||||||
|
name='host',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_constraint=False,
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
null=True,
|
||||||
|
on_delete=django.db.models.deletion.SET_NULL,
|
||||||
|
related_name='ad_hoc_command_events',
|
||||||
|
to='main.host',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='jobevent',
|
||||||
|
name='host',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_constraint=False,
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
null=True,
|
||||||
|
on_delete=django.db.models.deletion.DO_NOTHING,
|
||||||
|
related_name='job_events_as_primary_host',
|
||||||
|
to='main.host',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-08-24 14:02
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
import django.db.models.deletion
|
||||||
|
|
||||||
|
from awx.main.models import CredentialType
|
||||||
|
from awx.main.utils.common import set_current_apps
|
||||||
|
|
||||||
|
|
||||||
|
def setup_tower_managed_defaults(apps, schema_editor):
|
||||||
|
set_current_apps(apps)
|
||||||
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0166_alter_jobevent_host'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='project',
|
||||||
|
name='signature_validation_credential',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
null=True,
|
||||||
|
on_delete=django.db.models.deletion.SET_NULL,
|
||||||
|
related_name='projects_signature_validation',
|
||||||
|
to='main.credential',
|
||||||
|
help_text='An optional credential used for validating files in the project against unexpected changes.',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='credentialtype',
|
||||||
|
name='kind',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('ssh', 'Machine'),
|
||||||
|
('vault', 'Vault'),
|
||||||
|
('net', 'Network'),
|
||||||
|
('scm', 'Source Control'),
|
||||||
|
('cloud', 'Cloud'),
|
||||||
|
('registry', 'Container Registry'),
|
||||||
|
('token', 'Personal Access Token'),
|
||||||
|
('insights', 'Insights'),
|
||||||
|
('external', 'External'),
|
||||||
|
('kubernetes', 'Kubernetes'),
|
||||||
|
('galaxy', 'Galaxy/Automation Hub'),
|
||||||
|
('cryptography', 'Cryptography'),
|
||||||
|
],
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.RunPython(setup_tower_managed_defaults),
|
||||||
|
]
|
||||||
25
awx/main/migrations/0168_inventoryupdate_scm_revision.py
Normal file
25
awx/main/migrations/0168_inventoryupdate_scm_revision.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-09-08 16:03
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0167_project_signature_validation_credential'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='inventoryupdate',
|
||||||
|
name='scm_revision',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
default='',
|
||||||
|
editable=False,
|
||||||
|
help_text='The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm',
|
||||||
|
max_length=1024,
|
||||||
|
verbose_name='SCM Revision',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
225
awx/main/migrations/0169_jt_prompt_everything_on_launch.py
Normal file
225
awx/main/migrations/0169_jt_prompt_everything_on_launch.py
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-09-15 14:07
|
||||||
|
|
||||||
|
import awx.main.fields
|
||||||
|
import awx.main.utils.polymorphic
|
||||||
|
from django.db import migrations, models
|
||||||
|
import django.db.models.deletion
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0168_inventoryupdate_scm_revision'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='execution_environment',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
help_text='The container image to be used for execution.',
|
||||||
|
null=True,
|
||||||
|
on_delete=awx.main.utils.polymorphic.SET_NULL,
|
||||||
|
related_name='joblaunchconfig_as_prompt',
|
||||||
|
to='main.executionenvironment',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='labels',
|
||||||
|
field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_execution_environment_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_forks_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_instance_groups_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_job_slice_count_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_labels_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='ask_timeout_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='schedule',
|
||||||
|
name='execution_environment',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
help_text='The container image to be used for execution.',
|
||||||
|
null=True,
|
||||||
|
on_delete=awx.main.utils.polymorphic.SET_NULL,
|
||||||
|
related_name='schedule_as_prompt',
|
||||||
|
to='main.executionenvironment',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='schedule',
|
||||||
|
name='labels',
|
||||||
|
field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='execution_environment',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
help_text='The container image to be used for execution.',
|
||||||
|
null=True,
|
||||||
|
on_delete=awx.main.utils.polymorphic.SET_NULL,
|
||||||
|
related_name='workflowjobnode_as_prompt',
|
||||||
|
to='main.executionenvironment',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='labels',
|
||||||
|
field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplate',
|
||||||
|
name='ask_labels_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplate',
|
||||||
|
name='ask_skip_tags_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplate',
|
||||||
|
name='ask_tags_on_launch',
|
||||||
|
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplatenode',
|
||||||
|
name='execution_environment',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
help_text='The container image to be used for execution.',
|
||||||
|
null=True,
|
||||||
|
on_delete=awx.main.utils.polymorphic.SET_NULL,
|
||||||
|
related_name='workflowjobtemplatenode_as_prompt',
|
||||||
|
to='main.executionenvironment',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplatenode',
|
||||||
|
name='labels',
|
||||||
|
field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||||
|
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||||
|
('workflowjobtemplatenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobtemplatenode')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='WorkflowJobNodeBaseInstanceGroupMembership',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||||
|
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||||
|
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobnode')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='WorkflowJobInstanceGroupMembership',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||||
|
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||||
|
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjob')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='ScheduleInstanceGroupMembership',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||||
|
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||||
|
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.schedule')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='JobLaunchConfigInstanceGroupMembership',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||||
|
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||||
|
('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='instance_groups',
|
||||||
|
field=awx.main.fields.OrderedManyToManyField(
|
||||||
|
blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='schedule',
|
||||||
|
name='instance_groups',
|
||||||
|
field=awx.main.fields.OrderedManyToManyField(
|
||||||
|
blank=True, editable=False, related_name='schedule_instance_groups', through='main.ScheduleInstanceGroupMembership', to='main.InstanceGroup'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='instance_groups',
|
||||||
|
field=awx.main.fields.OrderedManyToManyField(
|
||||||
|
blank=True,
|
||||||
|
editable=False,
|
||||||
|
related_name='workflow_job_instance_groups',
|
||||||
|
through='main.WorkflowJobInstanceGroupMembership',
|
||||||
|
to='main.InstanceGroup',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='instance_groups',
|
||||||
|
field=awx.main.fields.OrderedManyToManyField(
|
||||||
|
blank=True,
|
||||||
|
editable=False,
|
||||||
|
related_name='workflow_job_node_instance_groups',
|
||||||
|
through='main.WorkflowJobNodeBaseInstanceGroupMembership',
|
||||||
|
to='main.InstanceGroup',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobtemplatenode',
|
||||||
|
name='instance_groups',
|
||||||
|
field=awx.main.fields.OrderedManyToManyField(
|
||||||
|
blank=True,
|
||||||
|
editable=False,
|
||||||
|
related_name='workflow_job_template_node_instance_groups',
|
||||||
|
through='main.WorkflowJobTemplateNodeBaseInstanceGroupMembership',
|
||||||
|
to='main.InstanceGroup',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
79
awx/main/migrations/0170_node_and_link_state.py
Normal file
79
awx/main/migrations/0170_node_and_link_state.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-08-02 17:53
|
||||||
|
|
||||||
|
import django.core.validators
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
def forwards(apps, schema_editor):
|
||||||
|
# All existing InstanceLink objects need to be in the state
|
||||||
|
# 'Established', which is the default, so nothing needs to be done
|
||||||
|
# for that.
|
||||||
|
|
||||||
|
Instance = apps.get_model('main', 'Instance')
|
||||||
|
for instance in Instance.objects.all():
|
||||||
|
instance.node_state = 'ready' if not instance.errors else 'unavailable'
|
||||||
|
instance.save(update_fields=['node_state'])
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0169_jt_prompt_everything_on_launch'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instance',
|
||||||
|
name='listener_port',
|
||||||
|
field=models.PositiveIntegerField(
|
||||||
|
blank=True,
|
||||||
|
default=27199,
|
||||||
|
help_text='Port that Receptor will listen for incoming connections on.',
|
||||||
|
validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instance',
|
||||||
|
name='node_state',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('provisioning', 'Provisioning'),
|
||||||
|
('provision-fail', 'Provisioning Failure'),
|
||||||
|
('installed', 'Installed'),
|
||||||
|
('ready', 'Ready'),
|
||||||
|
('unavailable', 'Unavailable'),
|
||||||
|
('deprovisioning', 'De-provisioning'),
|
||||||
|
('deprovision-fail', 'De-provisioning Failure'),
|
||||||
|
],
|
||||||
|
default='ready',
|
||||||
|
help_text='Indicates the current life cycle stage of this instance.',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='link_state',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
|
||||||
|
default='established',
|
||||||
|
help_text='Indicates the current life cycle stage of this peer link.',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instance',
|
||||||
|
name='node_type',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('control', 'Control plane node'),
|
||||||
|
('execution', 'Execution plane node'),
|
||||||
|
('hybrid', 'Controller and execution'),
|
||||||
|
('hop', 'Message-passing node, no execution capability'),
|
||||||
|
],
|
||||||
|
default='hybrid',
|
||||||
|
help_text='Role that this node plays in the mesh.',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.RunPython(forwards, reverse_code=migrations.RunPython.noop),
|
||||||
|
]
|
||||||
18
awx/main/migrations/0171_add_health_check_started.py
Normal file
18
awx/main/migrations/0171_add_health_check_started.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-09-26 20:54
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0170_node_and_link_state'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instance',
|
||||||
|
name='health_check_started',
|
||||||
|
field=models.DateTimeField(editable=False, help_text='The last time a health check was initiated on this instance.', null=True),
|
||||||
|
),
|
||||||
|
]
|
||||||
29
awx/main/migrations/0172_prevent_instance_fallback.py
Normal file
29
awx/main/migrations/0172_prevent_instance_fallback.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Generated by Django 3.2.13 on 2022-09-29 18:10
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0171_add_health_check_started'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='inventory',
|
||||||
|
name='prevent_instance_group_fallback',
|
||||||
|
field=models.BooleanField(
|
||||||
|
default=False,
|
||||||
|
help_text='If enabled, the inventory will prevent adding any organization instance groups to the list of preferred instances groups to run associated job templates on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='prevent_instance_group_fallback',
|
||||||
|
field=models.BooleanField(
|
||||||
|
default=False,
|
||||||
|
help_text='If enabled, the job template will prevent adding any inventory or organization instance groups to the list of preferred instances groups to run on.If this setting is enabled and you provided an empty list, the global instance groups will be applied.',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -36,7 +36,7 @@ def create_clearsessions_jt(apps, schema_editor):
|
|||||||
if created:
|
if created:
|
||||||
sched = Schedule(
|
sched = Schedule(
|
||||||
name='Cleanup Expired Sessions',
|
name='Cleanup Expired Sessions',
|
||||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
|
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time,
|
||||||
description='Cleans out expired browser sessions',
|
description='Cleans out expired browser sessions',
|
||||||
enabled=True,
|
enabled=True,
|
||||||
created=now_dt,
|
created=now_dt,
|
||||||
@@ -69,7 +69,7 @@ def create_cleartokens_jt(apps, schema_editor):
|
|||||||
if created:
|
if created:
|
||||||
sched = Schedule(
|
sched = Schedule(
|
||||||
name='Cleanup Expired OAuth 2 Tokens',
|
name='Cleanup Expired OAuth 2 Tokens',
|
||||||
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1' % schedule_time,
|
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1' % schedule_time,
|
||||||
description='Removes expired OAuth 2 access and refresh tokens',
|
description='Removes expired OAuth 2 access and refresh tokens',
|
||||||
enabled=True,
|
enabled=True,
|
||||||
created=now_dt,
|
created=now_dt,
|
||||||
|
|||||||
@@ -90,6 +90,9 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
|||||||
|
|
||||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
def clean_inventory(self):
|
def clean_inventory(self):
|
||||||
inv = self.inventory
|
inv = self.inventory
|
||||||
if not inv:
|
if not inv:
|
||||||
@@ -178,12 +181,12 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
|||||||
def get_passwords_needed_to_start(self):
|
def get_passwords_needed_to_start(self):
|
||||||
return self.passwords_needed_to_start
|
return self.passwords_needed_to_start
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
# NOTE: We sorta have to assume the host count matches and that forks default to 5
|
# NOTE: We sorta have to assume the host count matches and that forks default to 5
|
||||||
from awx.main.models.inventory import Host
|
if self.inventory:
|
||||||
|
count_hosts = self.inventory.total_hosts
|
||||||
count_hosts = Host.objects.filter(enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
|
else:
|
||||||
|
count_hosts = 5
|
||||||
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
|
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
@@ -207,23 +210,32 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
|||||||
|
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
update_fields = kwargs.get('update_fields', [])
|
update_fields = kwargs.get('update_fields', [])
|
||||||
|
|
||||||
|
def add_to_update_fields(name):
|
||||||
|
if name not in update_fields:
|
||||||
|
update_fields.append(name)
|
||||||
|
|
||||||
|
if not self.preferred_instance_groups_cache:
|
||||||
|
self.preferred_instance_groups_cache = self._get_preferred_instance_group_cache()
|
||||||
|
add_to_update_fields("preferred_instance_groups_cache")
|
||||||
if not self.name:
|
if not self.name:
|
||||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||||
if 'name' not in update_fields:
|
add_to_update_fields("name")
|
||||||
update_fields.append('name')
|
if self.task_impact == 0:
|
||||||
|
self.task_impact = self._get_task_impact()
|
||||||
|
add_to_update_fields("task_impact")
|
||||||
super(AdHocCommand, self).save(*args, **kwargs)
|
super(AdHocCommand, self).save(*args, **kwargs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def preferred_instance_groups(self):
|
def preferred_instance_groups(self):
|
||||||
if self.inventory is not None and self.inventory.organization is not None:
|
selected_groups = []
|
||||||
organization_groups = [x for x in self.inventory.organization.instance_groups.all()]
|
|
||||||
else:
|
|
||||||
organization_groups = []
|
|
||||||
if self.inventory is not None:
|
if self.inventory is not None:
|
||||||
inventory_groups = [x for x in self.inventory.instance_groups.all()]
|
for instance_group in self.inventory.instance_groups.all():
|
||||||
else:
|
selected_groups.append(instance_group)
|
||||||
inventory_groups = []
|
if not self.inventory.prevent_instance_group_fallback and self.inventory.organization is not None:
|
||||||
selected_groups = inventory_groups + organization_groups
|
for instance_group in self.inventory.organization.instance_groups.all():
|
||||||
|
selected_groups.append(instance_group)
|
||||||
|
|
||||||
if not selected_groups:
|
if not selected_groups:
|
||||||
return self.global_instance_groups
|
return self.global_instance_groups
|
||||||
return selected_groups
|
return selected_groups
|
||||||
|
|||||||
@@ -316,16 +316,17 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
|||||||
user = get_current_user()
|
user = get_current_user()
|
||||||
if user and not user.id:
|
if user and not user.id:
|
||||||
user = None
|
user = None
|
||||||
if not self.pk and not self.created_by:
|
if (not self.pk) and (user is not None) and (not self.created_by):
|
||||||
self.created_by = user
|
self.created_by = user
|
||||||
if 'created_by' not in update_fields:
|
if 'created_by' not in update_fields:
|
||||||
update_fields.append('created_by')
|
update_fields.append('created_by')
|
||||||
# Update modified_by if any editable fields have changed
|
# Update modified_by if any editable fields have changed
|
||||||
new_values = self._get_fields_snapshot()
|
new_values = self._get_fields_snapshot()
|
||||||
if (not self.pk and not self.modified_by) or self._values_have_edits(new_values):
|
if (not self.pk and not self.modified_by) or self._values_have_edits(new_values):
|
||||||
self.modified_by = user
|
if self.modified_by != user:
|
||||||
if 'modified_by' not in update_fields:
|
self.modified_by = user
|
||||||
update_fields.append('modified_by')
|
if 'modified_by' not in update_fields:
|
||||||
|
update_fields.append('modified_by')
|
||||||
super(PrimordialModel, self).save(*args, **kwargs)
|
super(PrimordialModel, self).save(*args, **kwargs)
|
||||||
self._prior_values_store = new_values
|
self._prior_values_store = new_values
|
||||||
|
|
||||||
|
|||||||
@@ -336,6 +336,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
('external', _('External')),
|
('external', _('External')),
|
||||||
('kubernetes', _('Kubernetes')),
|
('kubernetes', _('Kubernetes')),
|
||||||
('galaxy', _('Galaxy/Automation Hub')),
|
('galaxy', _('Galaxy/Automation Hub')),
|
||||||
|
('cryptography', _('Cryptography')),
|
||||||
)
|
)
|
||||||
|
|
||||||
kind = models.CharField(max_length=32, choices=KIND_CHOICES)
|
kind = models.CharField(max_length=32, choices=KIND_CHOICES)
|
||||||
@@ -1171,6 +1172,25 @@ ManagedCredentialType(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ManagedCredentialType(
|
||||||
|
namespace='gpg_public_key',
|
||||||
|
kind='cryptography',
|
||||||
|
name=gettext_noop('GPG Public Key'),
|
||||||
|
inputs={
|
||||||
|
'fields': [
|
||||||
|
{
|
||||||
|
'id': 'gpg_public_key',
|
||||||
|
'label': gettext_noop('GPG Public Key'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': True,
|
||||||
|
'multiline': True,
|
||||||
|
'help_text': gettext_noop('GPG Public Key used to validate content signatures.'),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'required': ['gpg_public_key'],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CredentialInputSource(PrimordialModel):
|
class CredentialInputSource(PrimordialModel):
|
||||||
class Meta:
|
class Meta:
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ analytics_logger = logging.getLogger('awx.analytics.job_events')
|
|||||||
|
|
||||||
logger = logging.getLogger('awx.main.models.events')
|
logger = logging.getLogger('awx.main.models.events')
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['JobEvent', 'ProjectUpdateEvent', 'AdHocCommandEvent', 'InventoryUpdateEvent', 'SystemJobEvent']
|
__all__ = ['JobEvent', 'ProjectUpdateEvent', 'AdHocCommandEvent', 'InventoryUpdateEvent', 'SystemJobEvent']
|
||||||
|
|
||||||
|
|
||||||
@@ -486,13 +485,18 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
editable=False,
|
editable=False,
|
||||||
db_index=False,
|
db_index=False,
|
||||||
)
|
)
|
||||||
|
# When we partitioned the table we accidentally "lost" the foreign key constraint.
|
||||||
|
# However this is good because the cascade on delete at the django layer was causing DB issues
|
||||||
|
# We are going to leave this as a foreign key but mark it as not having a DB relation and
|
||||||
|
# prevent cascading on delete.
|
||||||
host = models.ForeignKey(
|
host = models.ForeignKey(
|
||||||
'Host',
|
'Host',
|
||||||
related_name='job_events_as_primary_host',
|
related_name='job_events_as_primary_host',
|
||||||
null=True,
|
null=True,
|
||||||
default=None,
|
default=None,
|
||||||
on_delete=models.SET_NULL,
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_constraint=False,
|
||||||
)
|
)
|
||||||
host_name = models.CharField(
|
host_name = models.CharField(
|
||||||
max_length=1024,
|
max_length=1024,
|
||||||
@@ -794,6 +798,10 @@ class AdHocCommandEvent(BaseCommandEvent):
|
|||||||
editable=False,
|
editable=False,
|
||||||
db_index=False,
|
db_index=False,
|
||||||
)
|
)
|
||||||
|
# We need to keep this as a FK in the model because AdHocCommand uses a ManyToMany field
|
||||||
|
# to hosts through adhoc_events. But in https://github.com/ansible/awx/pull/8236/ we
|
||||||
|
# removed the nulling of the field in case of a host going away before an event is saved
|
||||||
|
# so this needs to stay SET_NULL on the ORM level
|
||||||
host = models.ForeignKey(
|
host = models.ForeignKey(
|
||||||
'Host',
|
'Host',
|
||||||
related_name='ad_hoc_command_events',
|
related_name='ad_hoc_command_events',
|
||||||
@@ -801,6 +809,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
|||||||
default=None,
|
default=None,
|
||||||
on_delete=models.SET_NULL,
|
on_delete=models.SET_NULL,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_constraint=False,
|
||||||
)
|
)
|
||||||
host_name = models.CharField(
|
host_name = models.CharField(
|
||||||
max_length=1024,
|
max_length=1024,
|
||||||
|
|||||||
@@ -5,13 +5,14 @@ from decimal import Decimal
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from django.core.validators import MinValueValidator
|
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||||
from django.db import models, connection
|
from django.db import models, connection
|
||||||
from django.db.models.signals import post_save, post_delete
|
from django.db.models.signals import post_save, post_delete
|
||||||
from django.dispatch import receiver
|
from django.dispatch import receiver
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.utils.timezone import now, timedelta
|
from django.utils.timezone import now, timedelta
|
||||||
|
from django.db.models import Sum
|
||||||
|
|
||||||
import redis
|
import redis
|
||||||
from solo.models import SingletonModel
|
from solo.models import SingletonModel
|
||||||
@@ -58,6 +59,15 @@ class InstanceLink(BaseModel):
|
|||||||
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
|
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
|
||||||
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
|
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
|
||||||
|
|
||||||
|
class States(models.TextChoices):
|
||||||
|
ADDING = 'adding', _('Adding')
|
||||||
|
ESTABLISHED = 'established', _('Established')
|
||||||
|
REMOVING = 'removing', _('Removing')
|
||||||
|
|
||||||
|
link_state = models.CharField(
|
||||||
|
choices=States.choices, default=States.ESTABLISHED, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||||
|
)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
unique_together = ('source', 'target')
|
unique_together = ('source', 'target')
|
||||||
|
|
||||||
@@ -104,6 +114,11 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
editable=False,
|
editable=False,
|
||||||
help_text=_('Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes.'),
|
help_text=_('Last time instance ran its heartbeat task for main cluster nodes. Last known connection to receptor mesh for execution nodes.'),
|
||||||
)
|
)
|
||||||
|
health_check_started = models.DateTimeField(
|
||||||
|
null=True,
|
||||||
|
editable=False,
|
||||||
|
help_text=_("The last time a health check was initiated on this instance."),
|
||||||
|
)
|
||||||
last_health_check = models.DateTimeField(
|
last_health_check = models.DateTimeField(
|
||||||
null=True,
|
null=True,
|
||||||
editable=False,
|
editable=False,
|
||||||
@@ -126,13 +141,33 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
default=0,
|
default=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
NODE_TYPE_CHOICES = [
|
|
||||||
("control", "Control plane node"),
|
class Types(models.TextChoices):
|
||||||
("execution", "Execution plane node"),
|
CONTROL = 'control', _("Control plane node")
|
||||||
("hybrid", "Controller and execution"),
|
EXECUTION = 'execution', _("Execution plane node")
|
||||||
("hop", "Message-passing node, no execution capability"),
|
HYBRID = 'hybrid', _("Controller and execution")
|
||||||
]
|
HOP = 'hop', _("Message-passing node, no execution capability")
|
||||||
node_type = models.CharField(default='hybrid', choices=NODE_TYPE_CHOICES, max_length=16)
|
|
||||||
|
node_type = models.CharField(default=Types.HYBRID, choices=Types.choices, max_length=16, help_text=_("Role that this node plays in the mesh."))
|
||||||
|
|
||||||
|
class States(models.TextChoices):
|
||||||
|
PROVISIONING = 'provisioning', _('Provisioning')
|
||||||
|
PROVISION_FAIL = 'provision-fail', _('Provisioning Failure')
|
||||||
|
INSTALLED = 'installed', _('Installed')
|
||||||
|
READY = 'ready', _('Ready')
|
||||||
|
UNAVAILABLE = 'unavailable', _('Unavailable')
|
||||||
|
DEPROVISIONING = 'deprovisioning', _('De-provisioning')
|
||||||
|
DEPROVISION_FAIL = 'deprovision-fail', _('De-provisioning Failure')
|
||||||
|
|
||||||
|
node_state = models.CharField(
|
||||||
|
choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.")
|
||||||
|
)
|
||||||
|
listener_port = models.PositiveIntegerField(
|
||||||
|
blank=True,
|
||||||
|
default=27199,
|
||||||
|
validators=[MinValueValidator(1), MaxValueValidator(65535)],
|
||||||
|
help_text=_("Port that Receptor will listen for incoming connections on."),
|
||||||
|
)
|
||||||
|
|
||||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
||||||
|
|
||||||
@@ -149,10 +184,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
def consumed_capacity(self):
|
def consumed_capacity(self):
|
||||||
capacity_consumed = 0
|
capacity_consumed = 0
|
||||||
if self.node_type in ('hybrid', 'execution'):
|
if self.node_type in ('hybrid', 'execution'):
|
||||||
capacity_consumed += sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')))
|
capacity_consumed += (
|
||||||
|
UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')).aggregate(Sum("task_impact"))["task_impact__sum"]
|
||||||
|
or 0
|
||||||
|
)
|
||||||
if self.node_type in ('hybrid', 'control'):
|
if self.node_type in ('hybrid', 'control'):
|
||||||
capacity_consumed += sum(
|
capacity_consumed += (
|
||||||
settings.AWX_CONTROL_NODE_TASK_IMPACT for x in UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting'))
|
settings.AWX_CONTROL_NODE_TASK_IMPACT * UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting')).count()
|
||||||
)
|
)
|
||||||
return capacity_consumed
|
return capacity_consumed
|
||||||
|
|
||||||
@@ -174,6 +212,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
def jobs_total(self):
|
def jobs_total(self):
|
||||||
return UnifiedJob.objects.filter(execution_node=self.hostname).count()
|
return UnifiedJob.objects.filter(execution_node=self.hostname).count()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def health_check_pending(self):
|
||||||
|
if self.health_check_started is None:
|
||||||
|
return False
|
||||||
|
if self.last_health_check is None:
|
||||||
|
return True
|
||||||
|
return self.health_check_started > self.last_health_check
|
||||||
|
|
||||||
def get_cleanup_task_kwargs(self, **kwargs):
|
def get_cleanup_task_kwargs(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
Produce options to use for the command: ansible-runner worker cleanup
|
Produce options to use for the command: ansible-runner worker cleanup
|
||||||
@@ -203,24 +249,28 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
return True
|
return True
|
||||||
if ref_time is None:
|
if ref_time is None:
|
||||||
ref_time = now()
|
ref_time = now()
|
||||||
grace_period = settings.CLUSTER_NODE_HEARTBEAT_PERIOD * 2
|
grace_period = settings.CLUSTER_NODE_HEARTBEAT_PERIOD * settings.CLUSTER_NODE_MISSED_HEARTBEAT_TOLERANCE
|
||||||
if self.node_type in ('execution', 'hop'):
|
if self.node_type in ('execution', 'hop'):
|
||||||
grace_period += settings.RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD
|
grace_period += settings.RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD
|
||||||
return self.last_seen < ref_time - timedelta(seconds=grace_period)
|
return self.last_seen < ref_time - timedelta(seconds=grace_period)
|
||||||
|
|
||||||
def mark_offline(self, update_last_seen=False, perform_save=True, errors=''):
|
def mark_offline(self, update_last_seen=False, perform_save=True, errors=''):
|
||||||
if self.cpu_capacity == 0 and self.mem_capacity == 0 and self.capacity == 0 and self.errors == errors and (not update_last_seen):
|
if self.node_state not in (Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
|
||||||
return
|
return []
|
||||||
|
if self.node_state == Instance.States.UNAVAILABLE and self.errors == errors and (not update_last_seen):
|
||||||
|
return []
|
||||||
|
self.node_state = Instance.States.UNAVAILABLE
|
||||||
self.cpu_capacity = self.mem_capacity = self.capacity = 0
|
self.cpu_capacity = self.mem_capacity = self.capacity = 0
|
||||||
self.errors = errors
|
self.errors = errors
|
||||||
if update_last_seen:
|
if update_last_seen:
|
||||||
self.last_seen = now()
|
self.last_seen = now()
|
||||||
|
|
||||||
|
update_fields = ['node_state', 'capacity', 'cpu_capacity', 'mem_capacity', 'errors']
|
||||||
|
if update_last_seen:
|
||||||
|
update_fields += ['last_seen']
|
||||||
if perform_save:
|
if perform_save:
|
||||||
update_fields = ['capacity', 'cpu_capacity', 'mem_capacity', 'errors']
|
|
||||||
if update_last_seen:
|
|
||||||
update_fields += ['last_seen']
|
|
||||||
self.save(update_fields=update_fields)
|
self.save(update_fields=update_fields)
|
||||||
|
return update_fields
|
||||||
|
|
||||||
def set_capacity_value(self):
|
def set_capacity_value(self):
|
||||||
"""Sets capacity according to capacity adjustment rule (no save)"""
|
"""Sets capacity according to capacity adjustment rule (no save)"""
|
||||||
@@ -274,8 +324,12 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
if not errors:
|
if not errors:
|
||||||
self.refresh_capacity_fields()
|
self.refresh_capacity_fields()
|
||||||
self.errors = ''
|
self.errors = ''
|
||||||
|
if self.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
|
||||||
|
self.node_state = Instance.States.READY
|
||||||
|
update_fields.append('node_state')
|
||||||
else:
|
else:
|
||||||
self.mark_offline(perform_save=False, errors=errors)
|
fields_to_update = self.mark_offline(perform_save=False, errors=errors)
|
||||||
|
update_fields.extend(fields_to_update)
|
||||||
update_fields.extend(['cpu_capacity', 'mem_capacity', 'capacity'])
|
update_fields.extend(['cpu_capacity', 'mem_capacity', 'capacity'])
|
||||||
|
|
||||||
# disabling activity stream will avoid extra queries, which is important for heatbeat actions
|
# disabling activity stream will avoid extra queries, which is important for heatbeat actions
|
||||||
@@ -292,7 +346,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
# playbook event data; we should consider this a zero capacity event
|
# playbook event data; we should consider this a zero capacity event
|
||||||
redis.Redis.from_url(settings.BROKER_URL).ping()
|
redis.Redis.from_url(settings.BROKER_URL).ping()
|
||||||
except redis.ConnectionError:
|
except redis.ConnectionError:
|
||||||
errors = _('Failed to connect ot Redis')
|
errors = _('Failed to connect to Redis')
|
||||||
|
|
||||||
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
|
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
|
||||||
|
|
||||||
@@ -384,6 +438,20 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
|
|||||||
|
|
||||||
@receiver(post_save, sender=Instance)
|
@receiver(post_save, sender=Instance)
|
||||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||||
|
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION,):
|
||||||
|
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||||
|
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
|
||||||
|
|
||||||
|
# wait for jobs on the node to complete, then delete the
|
||||||
|
# node and kick off write_receptor_config
|
||||||
|
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||||
|
|
||||||
|
if instance.node_state == Instance.States.INSTALLED:
|
||||||
|
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||||
|
|
||||||
|
# broadcast to all control instances to update their receptor configs
|
||||||
|
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||||
|
|
||||||
if created or instance.has_policy_changes():
|
if created or instance.has_policy_changes():
|
||||||
schedule_policy_task()
|
schedule_policy_task()
|
||||||
|
|
||||||
@@ -430,3 +498,58 @@ class InventoryInstanceGroupMembership(models.Model):
|
|||||||
default=None,
|
default=None,
|
||||||
db_index=True,
|
db_index=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class JobLaunchConfigInstanceGroupMembership(models.Model):
|
||||||
|
|
||||||
|
joblaunchconfig = models.ForeignKey('JobLaunchConfig', on_delete=models.CASCADE)
|
||||||
|
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||||
|
position = models.PositiveIntegerField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ScheduleInstanceGroupMembership(models.Model):
|
||||||
|
|
||||||
|
schedule = models.ForeignKey('Schedule', on_delete=models.CASCADE)
|
||||||
|
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||||
|
position = models.PositiveIntegerField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobTemplateNodeBaseInstanceGroupMembership(models.Model):
|
||||||
|
|
||||||
|
workflowjobtemplatenode = models.ForeignKey('WorkflowJobTemplateNode', on_delete=models.CASCADE)
|
||||||
|
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||||
|
position = models.PositiveIntegerField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobNodeBaseInstanceGroupMembership(models.Model):
|
||||||
|
|
||||||
|
workflowjobnode = models.ForeignKey('WorkflowJobNode', on_delete=models.CASCADE)
|
||||||
|
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||||
|
position = models.PositiveIntegerField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowJobInstanceGroupMembership(models.Model):
|
||||||
|
|
||||||
|
workflowjobnode = models.ForeignKey('WorkflowJob', on_delete=models.CASCADE)
|
||||||
|
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||||
|
position = models.PositiveIntegerField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
an inventory source contains lists and hosts.
|
an inventory source contains lists and hosts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
FIELDS_TO_PRESERVE_AT_COPY = ['hosts', 'groups', 'instance_groups']
|
FIELDS_TO_PRESERVE_AT_COPY = ['hosts', 'groups', 'instance_groups', 'prevent_instance_group_fallback']
|
||||||
KIND_CHOICES = [
|
KIND_CHOICES = [
|
||||||
('', _('Hosts have a direct link to this inventory.')),
|
('', _('Hosts have a direct link to this inventory.')),
|
||||||
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
||||||
@@ -175,6 +175,16 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
related_name='inventory_labels',
|
related_name='inventory_labels',
|
||||||
help_text=_('Labels associated with this inventory.'),
|
help_text=_('Labels associated with this inventory.'),
|
||||||
)
|
)
|
||||||
|
prevent_instance_group_fallback = models.BooleanField(
|
||||||
|
default=False,
|
||||||
|
help_text=(
|
||||||
|
"If enabled, the inventory will prevent adding any organization "
|
||||||
|
"instance groups to the list of preferred instances groups to run "
|
||||||
|
"associated job templates on."
|
||||||
|
"If this setting is enabled and you provided an empty list, the global instance "
|
||||||
|
"groups will be applied."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
@@ -236,6 +246,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
raise ParseError(_('Slice number must be 1 or higher.'))
|
raise ParseError(_('Slice number must be 1 or higher.'))
|
||||||
return (number, step)
|
return (number, step)
|
||||||
|
|
||||||
|
def get_sliced_hosts(self, host_queryset, slice_number, slice_count):
|
||||||
|
if slice_count > 1 and slice_number > 0:
|
||||||
|
offset = slice_number - 1
|
||||||
|
host_queryset = host_queryset[offset::slice_count]
|
||||||
|
return host_queryset
|
||||||
|
|
||||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
||||||
hosts_kw = dict()
|
hosts_kw = dict()
|
||||||
if not show_all:
|
if not show_all:
|
||||||
@@ -243,10 +259,8 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
fetch_fields = ['name', 'id', 'variables', 'inventory_id']
|
fetch_fields = ['name', 'id', 'variables', 'inventory_id']
|
||||||
if towervars:
|
if towervars:
|
||||||
fetch_fields.append('enabled')
|
fetch_fields.append('enabled')
|
||||||
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
host_queryset = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||||
if slice_count > 1 and slice_number > 0:
|
hosts = self.get_sliced_hosts(host_queryset, slice_number, slice_count)
|
||||||
offset = slice_number - 1
|
|
||||||
hosts = hosts[offset::slice_count]
|
|
||||||
|
|
||||||
data = dict()
|
data = dict()
|
||||||
all_group = data.setdefault('all', dict())
|
all_group = data.setdefault('all', dict())
|
||||||
@@ -337,9 +351,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
else:
|
else:
|
||||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||||
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
|
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
|
||||||
|
total_hosts = active_hosts.count()
|
||||||
|
# if total_hosts has changed, set update_task_impact to True
|
||||||
|
update_task_impact = total_hosts != self.total_hosts
|
||||||
computed_fields = {
|
computed_fields = {
|
||||||
'has_active_failures': bool(failed_hosts.count()),
|
'has_active_failures': bool(failed_hosts.count()),
|
||||||
'total_hosts': active_hosts.count(),
|
'total_hosts': total_hosts,
|
||||||
'hosts_with_active_failures': failed_hosts.count(),
|
'hosts_with_active_failures': failed_hosts.count(),
|
||||||
'total_groups': active_groups.count(),
|
'total_groups': active_groups.count(),
|
||||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||||
@@ -357,6 +374,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
computed_fields.pop(field)
|
computed_fields.pop(field)
|
||||||
if computed_fields:
|
if computed_fields:
|
||||||
iobj.save(update_fields=computed_fields.keys())
|
iobj.save(update_fields=computed_fields.keys())
|
||||||
|
if update_task_impact:
|
||||||
|
# if total hosts count has changed, re-calculate task_impact for any
|
||||||
|
# job that is still in pending for this inventory, since task_impact
|
||||||
|
# is cached on task creation and used in task management system
|
||||||
|
tasks = self.jobs.filter(status="pending")
|
||||||
|
for t in tasks:
|
||||||
|
t.task_impact = t._get_task_impact()
|
||||||
|
UnifiedJob.objects.bulk_update(tasks, ['task_impact'])
|
||||||
logger.debug("Finished updating inventory computed fields, pk={0}, in " "{1:.3f} seconds".format(self.pk, time.time() - start_time))
|
logger.debug("Finished updating inventory computed fields, pk={0}, in " "{1:.3f} seconds".format(self.pk, time.time() - start_time))
|
||||||
|
|
||||||
def websocket_emit_status(self, status):
|
def websocket_emit_status(self, status):
|
||||||
@@ -1176,6 +1201,14 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
default=None,
|
default=None,
|
||||||
null=True,
|
null=True,
|
||||||
)
|
)
|
||||||
|
scm_revision = models.CharField(
|
||||||
|
max_length=1024,
|
||||||
|
blank=True,
|
||||||
|
default='',
|
||||||
|
editable=False,
|
||||||
|
verbose_name=_('SCM Revision'),
|
||||||
|
help_text=_('The SCM Revision from the Project used for this inventory update. Only applicable to inventories source from scm'),
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_container_group_task(self):
|
def is_container_group_task(self):
|
||||||
@@ -1220,8 +1253,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
return UnpartitionedInventoryUpdateEvent
|
return UnpartitionedInventoryUpdateEvent
|
||||||
return InventoryUpdateEvent
|
return InventoryUpdateEvent
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# InventoryUpdate credential required
|
# InventoryUpdate credential required
|
||||||
@@ -1246,15 +1278,19 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def preferred_instance_groups(self):
|
def preferred_instance_groups(self):
|
||||||
if self.inventory_source.inventory is not None and self.inventory_source.inventory.organization is not None:
|
selected_groups = []
|
||||||
organization_groups = [x for x in self.inventory_source.inventory.organization.instance_groups.all()]
|
|
||||||
else:
|
|
||||||
organization_groups = []
|
|
||||||
if self.inventory_source.inventory is not None:
|
if self.inventory_source.inventory is not None:
|
||||||
inventory_groups = [x for x in self.inventory_source.inventory.instance_groups.all()]
|
# Add the inventory sources IG to the selected IGs first
|
||||||
else:
|
for instance_group in self.inventory_source.inventory.instance_groups.all():
|
||||||
inventory_groups = []
|
selected_groups.append(instance_group)
|
||||||
selected_groups = inventory_groups + organization_groups
|
# If the inventory allows for fallback and we have an organization then also append the orgs IGs to the end of the list
|
||||||
|
if (
|
||||||
|
not getattr(self.inventory_source.inventory, 'prevent_instance_group_fallback', False)
|
||||||
|
and self.inventory_source.inventory.organization is not None
|
||||||
|
):
|
||||||
|
for instance_group in self.inventory_source.inventory.organization.instance_groups.all():
|
||||||
|
selected_groups.append(instance_group)
|
||||||
|
|
||||||
if not selected_groups:
|
if not selected_groups:
|
||||||
return self.global_instance_groups
|
return self.global_instance_groups
|
||||||
return selected_groups
|
return selected_groups
|
||||||
|
|||||||
@@ -43,8 +43,8 @@ from awx.main.models.notifications import (
|
|||||||
NotificationTemplate,
|
NotificationTemplate,
|
||||||
JobNotificationMixin,
|
JobNotificationMixin,
|
||||||
)
|
)
|
||||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField
|
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob
|
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||||
from awx.main.models.mixins import (
|
from awx.main.models.mixins import (
|
||||||
ResourceMixin,
|
ResourceMixin,
|
||||||
SurveyJobTemplateMixin,
|
SurveyJobTemplateMixin,
|
||||||
@@ -203,7 +203,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
playbook) to an inventory source with a given credential.
|
playbook) to an inventory source with a given credential.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials', 'survey_spec']
|
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials', 'survey_spec', 'prevent_instance_group_fallback']
|
||||||
FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential']
|
FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential']
|
||||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||||
|
|
||||||
@@ -227,15 +227,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
blank=True,
|
blank=True,
|
||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
ask_limit_on_launch = AskForField(
|
|
||||||
blank=True,
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
ask_tags_on_launch = AskForField(blank=True, default=False, allows_field='job_tags')
|
|
||||||
ask_skip_tags_on_launch = AskForField(
|
|
||||||
blank=True,
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
ask_job_type_on_launch = AskForField(
|
ask_job_type_on_launch = AskForField(
|
||||||
blank=True,
|
blank=True,
|
||||||
default=False,
|
default=False,
|
||||||
@@ -244,12 +235,27 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
blank=True,
|
blank=True,
|
||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
ask_inventory_on_launch = AskForField(
|
ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
|
||||||
|
ask_execution_environment_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_forks_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_job_slice_count_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_timeout_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_instance_groups_on_launch = AskForField(
|
||||||
blank=True,
|
blank=True,
|
||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
|
|
||||||
ask_scm_branch_on_launch = AskForField(blank=True, default=False, allows_field='scm_branch')
|
|
||||||
job_slice_count = models.PositiveIntegerField(
|
job_slice_count = models.PositiveIntegerField(
|
||||||
blank=True,
|
blank=True,
|
||||||
default=1,
|
default=1,
|
||||||
@@ -268,6 +274,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
'admin_role',
|
'admin_role',
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
prevent_instance_group_fallback = models.BooleanField(
|
||||||
|
default=False,
|
||||||
|
help_text=(
|
||||||
|
"If enabled, the job template will prevent adding any inventory or organization "
|
||||||
|
"instance groups to the list of preferred instances groups to run on."
|
||||||
|
"If this setting is enabled and you provided an empty list, the global instance "
|
||||||
|
"groups will be applied."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _get_unified_job_class(cls):
|
def _get_unified_job_class(cls):
|
||||||
@@ -276,7 +291,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
@classmethod
|
@classmethod
|
||||||
def _get_unified_job_field_names(cls):
|
def _get_unified_job_field_names(cls):
|
||||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials', 'job_slice_number', 'job_slice_count', 'execution_environment']
|
[
|
||||||
|
'name',
|
||||||
|
'description',
|
||||||
|
'organization',
|
||||||
|
'survey_passwords',
|
||||||
|
'labels',
|
||||||
|
'credentials',
|
||||||
|
'job_slice_number',
|
||||||
|
'job_slice_count',
|
||||||
|
'execution_environment',
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -314,10 +339,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
actual_inventory = self.inventory
|
actual_inventory = self.inventory
|
||||||
if self.ask_inventory_on_launch and 'inventory' in kwargs:
|
if self.ask_inventory_on_launch and 'inventory' in kwargs:
|
||||||
actual_inventory = kwargs['inventory']
|
actual_inventory = kwargs['inventory']
|
||||||
|
actual_slice_count = self.job_slice_count
|
||||||
|
if self.ask_job_slice_count_on_launch and 'job_slice_count' in kwargs:
|
||||||
|
actual_slice_count = kwargs['job_slice_count']
|
||||||
if actual_inventory:
|
if actual_inventory:
|
||||||
return min(self.job_slice_count, actual_inventory.hosts.count())
|
return min(actual_slice_count, actual_inventory.hosts.count())
|
||||||
else:
|
else:
|
||||||
return self.job_slice_count
|
return actual_slice_count
|
||||||
|
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
update_fields = kwargs.get('update_fields', [])
|
update_fields = kwargs.get('update_fields', [])
|
||||||
@@ -425,10 +453,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
|
|
||||||
field = self._meta.get_field(field_name)
|
field = self._meta.get_field(field_name)
|
||||||
if isinstance(field, models.ManyToManyField):
|
if isinstance(field, models.ManyToManyField):
|
||||||
old_value = set(old_value.all())
|
if field_name == 'instance_groups':
|
||||||
new_value = set(kwargs[field_name]) - old_value
|
# Instance groups are ordered so we can't make a set out of them
|
||||||
if not new_value:
|
old_value = old_value.all()
|
||||||
continue
|
elif field_name == 'credentials':
|
||||||
|
# Credentials have a weird pattern because of how they are layered
|
||||||
|
old_value = set(old_value.all())
|
||||||
|
new_value = set(kwargs[field_name]) - old_value
|
||||||
|
if not new_value:
|
||||||
|
continue
|
||||||
|
|
||||||
if new_value == old_value:
|
if new_value == old_value:
|
||||||
# no-op case: Fields the same as template's value
|
# no-op case: Fields the same as template's value
|
||||||
@@ -449,6 +482,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
|||||||
rejected_data[field_name] = new_value
|
rejected_data[field_name] = new_value
|
||||||
errors_dict[field_name] = _('Project does not allow override of branch.')
|
errors_dict[field_name] = _('Project does not allow override of branch.')
|
||||||
continue
|
continue
|
||||||
|
elif field_name == 'job_slice_count' and (new_value > 1) and (self.get_effective_slice_ct(kwargs) <= 1):
|
||||||
|
rejected_data[field_name] = new_value
|
||||||
|
errors_dict[field_name] = _('Job inventory does not have enough hosts for slicing')
|
||||||
|
continue
|
||||||
# accepted prompt
|
# accepted prompt
|
||||||
prompted_data[field_name] = new_value
|
prompted_data[field_name] = new_value
|
||||||
else:
|
else:
|
||||||
@@ -600,6 +637,19 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
def get_ui_url(self):
|
def get_ui_url(self):
|
||||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
"""
|
||||||
|
This sets the initial value of dependencies_processed
|
||||||
|
and here we use this as a shortcut to avoid the DependencyManager for jobs that do not need it
|
||||||
|
"""
|
||||||
|
if (not self.project) or self.project.scm_update_on_launch:
|
||||||
|
self.dependencies_processed = False
|
||||||
|
elif (not self.inventory) or self.inventory.inventory_sources.filter(update_on_launch=True).exists():
|
||||||
|
self.dependencies_processed = False
|
||||||
|
else:
|
||||||
|
# No dependencies to process
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
if self.has_unpartitioned_events:
|
if self.has_unpartitioned_events:
|
||||||
@@ -644,8 +694,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
raise ParseError(_('{status_value} is not a valid status option.').format(status_value=status))
|
raise ParseError(_('{status_value} is not a valid status option.').format(status_value=status))
|
||||||
return self._get_hosts(**kwargs)
|
return self._get_hosts(**kwargs)
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
if self.launch_type == 'callback':
|
if self.launch_type == 'callback':
|
||||||
count_hosts = 2
|
count_hosts = 2
|
||||||
else:
|
else:
|
||||||
@@ -755,19 +804,15 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def preferred_instance_groups(self):
|
def preferred_instance_groups(self):
|
||||||
if self.organization is not None:
|
# If the user specified instance groups those will be handled by the unified_job.create_unified_job
|
||||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
# This function handles only the defaults for a template w/o user specification
|
||||||
else:
|
selected_groups = []
|
||||||
organization_groups = []
|
for obj_type in ['job_template', 'inventory', 'organization']:
|
||||||
if self.inventory is not None:
|
if getattr(self, obj_type) is not None:
|
||||||
inventory_groups = [x for x in self.inventory.instance_groups.all()]
|
for instance_group in getattr(self, obj_type).instance_groups.all():
|
||||||
else:
|
selected_groups.append(instance_group)
|
||||||
inventory_groups = []
|
if getattr(getattr(self, obj_type), 'prevent_instance_group_fallback', False):
|
||||||
if self.job_template is not None:
|
break
|
||||||
template_groups = [x for x in self.job_template.instance_groups.all()]
|
|
||||||
else:
|
|
||||||
template_groups = []
|
|
||||||
selected_groups = template_groups + inventory_groups + organization_groups
|
|
||||||
if not selected_groups:
|
if not selected_groups:
|
||||||
return self.global_instance_groups
|
return self.global_instance_groups
|
||||||
return selected_groups
|
return selected_groups
|
||||||
@@ -802,7 +847,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
def _get_inventory_hosts(self, only=['name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id']):
|
||||||
if not self.inventory:
|
if not self.inventory:
|
||||||
return []
|
return []
|
||||||
return self.inventory.hosts.only(*only)
|
host_queryset = self.inventory.hosts.only(*only)
|
||||||
|
return self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||||
|
|
||||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||||
self.log_lifecycle("start_job_fact_cache")
|
self.log_lifecycle("start_job_fact_cache")
|
||||||
@@ -847,7 +893,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
continue
|
continue
|
||||||
host.ansible_facts = ansible_facts
|
host.ansible_facts = ansible_facts
|
||||||
host.ansible_facts_modified = now()
|
host.ansible_facts_modified = now()
|
||||||
host.save()
|
host.save(update_fields=['ansible_facts', 'ansible_facts_modified'])
|
||||||
system_tracking_logger.info(
|
system_tracking_logger.info(
|
||||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||||
extra=dict(
|
extra=dict(
|
||||||
@@ -893,10 +939,36 @@ class LaunchTimeConfigBase(BaseModel):
|
|||||||
# This is a solution to the nullable CharField problem, specific to prompting
|
# This is a solution to the nullable CharField problem, specific to prompting
|
||||||
char_prompts = JSONBlob(default=dict, blank=True)
|
char_prompts = JSONBlob(default=dict, blank=True)
|
||||||
|
|
||||||
def prompts_dict(self, display=False):
|
# Define fields that are not really fields, but alias to char_prompts lookups
|
||||||
|
limit = NullablePromptPseudoField('limit')
|
||||||
|
scm_branch = NullablePromptPseudoField('scm_branch')
|
||||||
|
job_tags = NullablePromptPseudoField('job_tags')
|
||||||
|
skip_tags = NullablePromptPseudoField('skip_tags')
|
||||||
|
diff_mode = NullablePromptPseudoField('diff_mode')
|
||||||
|
job_type = NullablePromptPseudoField('job_type')
|
||||||
|
verbosity = NullablePromptPseudoField('verbosity')
|
||||||
|
forks = NullablePromptPseudoField('forks')
|
||||||
|
job_slice_count = NullablePromptPseudoField('job_slice_count')
|
||||||
|
timeout = NullablePromptPseudoField('timeout')
|
||||||
|
|
||||||
|
# NOTE: additional fields are assumed to exist but must be defined in subclasses
|
||||||
|
# due to technical limitations
|
||||||
|
SUBCLASS_FIELDS = (
|
||||||
|
'instance_groups', # needs a through model defined
|
||||||
|
'extra_vars', # alternates between extra_vars and extra_data
|
||||||
|
'credentials', # already a unified job and unified JT field
|
||||||
|
'labels', # already a unified job and unified JT field
|
||||||
|
'execution_environment', # already a unified job and unified JT field
|
||||||
|
)
|
||||||
|
|
||||||
|
def prompts_dict(self, display=False, for_cls=None):
|
||||||
data = {}
|
data = {}
|
||||||
|
if for_cls:
|
||||||
|
cls = for_cls
|
||||||
|
else:
|
||||||
|
cls = JobTemplate
|
||||||
# Some types may have different prompts, but always subset of JT prompts
|
# Some types may have different prompts, but always subset of JT prompts
|
||||||
for prompt_name in JobTemplate.get_ask_mapping().keys():
|
for prompt_name in cls.get_ask_mapping().keys():
|
||||||
try:
|
try:
|
||||||
field = self._meta.get_field(prompt_name)
|
field = self._meta.get_field(prompt_name)
|
||||||
except FieldDoesNotExist:
|
except FieldDoesNotExist:
|
||||||
@@ -904,18 +976,23 @@ class LaunchTimeConfigBase(BaseModel):
|
|||||||
if isinstance(field, models.ManyToManyField):
|
if isinstance(field, models.ManyToManyField):
|
||||||
if not self.pk:
|
if not self.pk:
|
||||||
continue # unsaved object can't have related many-to-many
|
continue # unsaved object can't have related many-to-many
|
||||||
prompt_val = set(getattr(self, prompt_name).all())
|
prompt_values = list(getattr(self, prompt_name).all())
|
||||||
if len(prompt_val) > 0:
|
# Many to manys can't distinguish between None and []
|
||||||
data[prompt_name] = prompt_val
|
# Because of this, from a config perspective, we assume [] is none and we don't save [] into the config
|
||||||
|
if len(prompt_values) > 0:
|
||||||
|
data[prompt_name] = prompt_values
|
||||||
elif prompt_name == 'extra_vars':
|
elif prompt_name == 'extra_vars':
|
||||||
if self.extra_vars:
|
if self.extra_vars:
|
||||||
|
extra_vars = {}
|
||||||
if display:
|
if display:
|
||||||
data[prompt_name] = self.display_extra_vars()
|
extra_vars = self.display_extra_vars()
|
||||||
else:
|
else:
|
||||||
data[prompt_name] = self.extra_vars
|
extra_vars = self.extra_vars
|
||||||
# Depending on model, field type may save and return as string
|
# Depending on model, field type may save and return as string
|
||||||
if isinstance(data[prompt_name], str):
|
if isinstance(extra_vars, str):
|
||||||
data[prompt_name] = parse_yaml_or_json(data[prompt_name])
|
extra_vars = parse_yaml_or_json(extra_vars)
|
||||||
|
if extra_vars:
|
||||||
|
data['extra_vars'] = extra_vars
|
||||||
if self.survey_passwords and not display:
|
if self.survey_passwords and not display:
|
||||||
data['survey_passwords'] = self.survey_passwords
|
data['survey_passwords'] = self.survey_passwords
|
||||||
else:
|
else:
|
||||||
@@ -925,15 +1002,6 @@ class LaunchTimeConfigBase(BaseModel):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
for field_name in JobTemplate.get_ask_mapping().keys():
|
|
||||||
if field_name == 'extra_vars':
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
LaunchTimeConfigBase._meta.get_field(field_name)
|
|
||||||
except FieldDoesNotExist:
|
|
||||||
setattr(LaunchTimeConfigBase, field_name, NullablePromptPseudoField(field_name))
|
|
||||||
|
|
||||||
|
|
||||||
class LaunchTimeConfig(LaunchTimeConfigBase):
|
class LaunchTimeConfig(LaunchTimeConfigBase):
|
||||||
"""
|
"""
|
||||||
Common model for all objects that save details of a saved launch config
|
Common model for all objects that save details of a saved launch config
|
||||||
@@ -952,8 +1020,18 @@ class LaunchTimeConfig(LaunchTimeConfigBase):
|
|||||||
blank=True,
|
blank=True,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
# Credentials needed for non-unified job / unified JT models
|
# Fields needed for non-unified job / unified JT models, because they are defined on unified models
|
||||||
credentials = models.ManyToManyField('Credential', related_name='%(class)ss')
|
credentials = models.ManyToManyField('Credential', related_name='%(class)ss')
|
||||||
|
labels = models.ManyToManyField('Label', related_name='%(class)s_labels')
|
||||||
|
execution_environment = models.ForeignKey(
|
||||||
|
'ExecutionEnvironment',
|
||||||
|
null=True,
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
on_delete=polymorphic.SET_NULL,
|
||||||
|
related_name='%(class)s_as_prompt',
|
||||||
|
help_text="The container image to be used for execution.",
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def extra_vars(self):
|
def extra_vars(self):
|
||||||
@@ -997,6 +1075,11 @@ class JobLaunchConfig(LaunchTimeConfig):
|
|||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Instance Groups needed for non-unified job / unified JT models
|
||||||
|
instance_groups = OrderedManyToManyField(
|
||||||
|
'InstanceGroup', related_name='%(class)ss', blank=True, editable=False, through='JobLaunchConfigInstanceGroupMembership'
|
||||||
|
)
|
||||||
|
|
||||||
def has_user_prompts(self, template):
|
def has_user_prompts(self, template):
|
||||||
"""
|
"""
|
||||||
Returns True if any fields exist in the launch config that are
|
Returns True if any fields exist in the launch config that are
|
||||||
@@ -1213,6 +1296,9 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
|||||||
|
|
||||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _get_parent_field_name(cls):
|
def _get_parent_field_name(cls):
|
||||||
return 'system_job_template'
|
return 'system_job_template'
|
||||||
@@ -1238,8 +1324,7 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
|||||||
return UnpartitionedSystemJobEvent
|
return UnpartitionedSystemJobEvent
|
||||||
return SystemJobEvent
|
return SystemJobEvent
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
return 5
|
return 5
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from awx.api.versioning import reverse
|
|||||||
from awx.main.models.base import CommonModelNameNotUnique
|
from awx.main.models.base import CommonModelNameNotUnique
|
||||||
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
||||||
from awx.main.models.inventory import Inventory
|
from awx.main.models.inventory import Inventory
|
||||||
|
from awx.main.models.schedules import Schedule
|
||||||
|
from awx.main.models.workflow import WorkflowJobTemplateNode, WorkflowJobNode
|
||||||
|
|
||||||
__all__ = ('Label',)
|
__all__ = ('Label',)
|
||||||
|
|
||||||
@@ -34,16 +36,22 @@ class Label(CommonModelNameNotUnique):
|
|||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_orphaned_labels():
|
|
||||||
return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
|
|
||||||
|
|
||||||
def is_detached(self):
|
def is_detached(self):
|
||||||
return Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True).exists()
|
return Label.objects.filter(
|
||||||
|
id=self.id,
|
||||||
|
unifiedjob_labels__isnull=True,
|
||||||
|
unifiedjobtemplate_labels__isnull=True,
|
||||||
|
inventory_labels__isnull=True,
|
||||||
|
schedule_labels__isnull=True,
|
||||||
|
workflowjobtemplatenode_labels__isnull=True,
|
||||||
|
workflowjobnode_labels__isnull=True,
|
||||||
|
).exists()
|
||||||
|
|
||||||
def is_candidate_for_detach(self):
|
def is_candidate_for_detach(self):
|
||||||
|
count = UnifiedJob.objects.filter(labels__in=[self.id]).count() # Both Jobs and WFJobs
|
||||||
c1 = UnifiedJob.objects.filter(labels__in=[self.id]).count()
|
count += UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() # Both JTs and WFJT
|
||||||
c2 = UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count()
|
count += Inventory.objects.filter(labels__in=[self.id]).count()
|
||||||
c3 = Inventory.objects.filter(labels__in=[self.id]).count()
|
count += Schedule.objects.filter(labels__in=[self.id]).count()
|
||||||
return (c1 + c2 + c3 - 1) == 0
|
count += WorkflowJobTemplateNode.objects.filter(labels__in=[self.id]).count()
|
||||||
|
count += WorkflowJobNode.objects.filter(labels__in=[self.id]).count()
|
||||||
|
return (count - 1) == 0
|
||||||
|
|||||||
@@ -104,6 +104,33 @@ class SurveyJobTemplateMixin(models.Model):
|
|||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
survey_spec = prevent_search(JSONBlob(default=dict, blank=True))
|
survey_spec = prevent_search(JSONBlob(default=dict, blank=True))
|
||||||
|
|
||||||
|
ask_inventory_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_limit_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_scm_branch_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
allows_field='scm_branch',
|
||||||
|
)
|
||||||
|
ask_labels_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
ask_tags_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
allows_field='job_tags',
|
||||||
|
)
|
||||||
|
ask_skip_tags_on_launch = AskForField(
|
||||||
|
blank=True,
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars')
|
ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars')
|
||||||
|
|
||||||
def survey_password_variables(self):
|
def survey_password_variables(self):
|
||||||
@@ -412,6 +439,11 @@ class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
|
|||||||
class Meta:
|
class Meta:
|
||||||
abstract = True
|
abstract = True
|
||||||
|
|
||||||
|
def get_jobs_fail_chain(self):
|
||||||
|
if self.project_update_id:
|
||||||
|
return [self.project_update]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin):
|
class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin):
|
||||||
class Meta:
|
class Meta:
|
||||||
|
|||||||
@@ -284,6 +284,17 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
|||||||
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
|
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# credential (keys) used to validate content signature
|
||||||
|
signature_validation_credential = models.ForeignKey(
|
||||||
|
'Credential',
|
||||||
|
related_name='%(class)ss_signature_validation',
|
||||||
|
blank=True,
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
on_delete=models.SET_NULL,
|
||||||
|
help_text=_('An optional credential used for validating files in the project against unexpected changes.'),
|
||||||
|
)
|
||||||
|
|
||||||
scm_revision = models.CharField(
|
scm_revision = models.CharField(
|
||||||
max_length=1024,
|
max_length=1024,
|
||||||
blank=True,
|
blank=True,
|
||||||
@@ -513,6 +524,9 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
|||||||
help_text=_('The SCM Revision discovered by this update for the given project and branch.'),
|
help_text=_('The SCM Revision discovered by this update for the given project and branch.'),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
def _get_parent_field_name(self):
|
def _get_parent_field_name(self):
|
||||||
return 'project'
|
return 'project'
|
||||||
|
|
||||||
@@ -560,8 +574,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
|||||||
return UnpartitionedProjectUpdateEvent
|
return UnpartitionedProjectUpdateEvent
|
||||||
return ProjectUpdateEvent
|
return ProjectUpdateEvent
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
return 0 if self.job_type == 'run' else 1
|
return 0 if self.job_type == 'run' else 1
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -618,6 +631,10 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
|||||||
added_update_fields = []
|
added_update_fields = []
|
||||||
if not self.job_tags:
|
if not self.job_tags:
|
||||||
job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']
|
job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']
|
||||||
|
if self.project.signature_validation_credential is not None:
|
||||||
|
credential_type = self.project.signature_validation_credential.credential_type.namespace
|
||||||
|
job_tags.append(f'validation_{credential_type}')
|
||||||
|
job_tags.append('validation_checksum_manifest')
|
||||||
self.job_tags = ','.join(job_tags)
|
self.job_tags = ','.join(job_tags)
|
||||||
added_update_fields.append('job_tags')
|
added_update_fields.append('job_tags')
|
||||||
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':
|
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from django.utils.translation import gettext_lazy as _
|
|||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
from awx.main.fields import OrderedManyToManyField
|
||||||
from awx.main.models.base import PrimordialModel
|
from awx.main.models.base import PrimordialModel
|
||||||
from awx.main.models.jobs import LaunchTimeConfig
|
from awx.main.models.jobs import LaunchTimeConfig
|
||||||
from awx.main.utils import ignore_inventory_computed_fields
|
from awx.main.utils import ignore_inventory_computed_fields
|
||||||
@@ -83,6 +84,13 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
|||||||
)
|
)
|
||||||
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
|
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
|
||||||
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
|
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
|
||||||
|
instance_groups = OrderedManyToManyField(
|
||||||
|
'InstanceGroup',
|
||||||
|
related_name='schedule_instance_groups',
|
||||||
|
blank=True,
|
||||||
|
editable=False,
|
||||||
|
through='ScheduleInstanceGroupMembership',
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_zoneinfo(cls):
|
def get_zoneinfo(cls):
|
||||||
|
|||||||
@@ -45,7 +45,8 @@ from awx.main.utils.common import (
|
|||||||
get_type_for_model,
|
get_type_for_model,
|
||||||
parse_yaml_or_json,
|
parse_yaml_or_json,
|
||||||
getattr_dne,
|
getattr_dne,
|
||||||
schedule_task_manager,
|
ScheduleDependencyManager,
|
||||||
|
ScheduleTaskManager,
|
||||||
get_event_partition_epoch,
|
get_event_partition_epoch,
|
||||||
get_capacity_type,
|
get_capacity_type,
|
||||||
)
|
)
|
||||||
@@ -331,10 +332,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
|
|
||||||
return NotificationTemplate.objects.none()
|
return NotificationTemplate.objects.none()
|
||||||
|
|
||||||
def create_unified_job(self, **kwargs):
|
def create_unified_job(self, instance_groups=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
Create a new unified job based on this unified job template.
|
Create a new unified job based on this unified job template.
|
||||||
"""
|
"""
|
||||||
|
# TODO: rename kwargs to prompts, to set expectation that these are runtime values
|
||||||
new_job_passwords = kwargs.pop('survey_passwords', {})
|
new_job_passwords = kwargs.pop('survey_passwords', {})
|
||||||
eager_fields = kwargs.pop('_eager_fields', None)
|
eager_fields = kwargs.pop('_eager_fields', None)
|
||||||
|
|
||||||
@@ -381,6 +383,14 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
unified_job.survey_passwords = new_job_passwords
|
unified_job.survey_passwords = new_job_passwords
|
||||||
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
|
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
|
||||||
|
|
||||||
|
if instance_groups:
|
||||||
|
unified_job.preferred_instance_groups_cache = [ig.id for ig in instance_groups]
|
||||||
|
else:
|
||||||
|
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
|
||||||
|
|
||||||
|
unified_job._set_default_dependencies_processed()
|
||||||
|
unified_job.task_impact = unified_job._get_task_impact()
|
||||||
|
|
||||||
from awx.main.signals import disable_activity_stream, activity_stream_create
|
from awx.main.signals import disable_activity_stream, activity_stream_create
|
||||||
|
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
@@ -406,13 +416,17 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
||||||
|
|
||||||
# Create record of provided prompts for relaunch and rescheduling
|
# Create record of provided prompts for relaunch and rescheduling
|
||||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
config = unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||||
|
if instance_groups:
|
||||||
|
for ig in instance_groups:
|
||||||
|
config.instance_groups.add(ig)
|
||||||
|
|
||||||
# manually issue the create activity stream entry _after_ M2M relations
|
# manually issue the create activity stream entry _after_ M2M relations
|
||||||
# have been associated to the UJ
|
# have been associated to the UJ
|
||||||
if unified_job.__class__ in activity_stream_registrar.models:
|
if unified_job.__class__ in activity_stream_registrar.models:
|
||||||
activity_stream_create(None, unified_job, True)
|
activity_stream_create(None, unified_job, True)
|
||||||
unified_job.log_lifecycle("created")
|
unified_job.log_lifecycle("created")
|
||||||
|
|
||||||
return unified_job
|
return unified_job
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -693,6 +707,14 @@ class UnifiedJob(
|
|||||||
on_delete=polymorphic.SET_NULL,
|
on_delete=polymorphic.SET_NULL,
|
||||||
help_text=_('The Instance group the job was run under'),
|
help_text=_('The Instance group the job was run under'),
|
||||||
)
|
)
|
||||||
|
preferred_instance_groups_cache = models.JSONField(
|
||||||
|
blank=True,
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
help_text=_("A cached list with pk values from preferred instance groups."),
|
||||||
|
)
|
||||||
|
task_impact = models.PositiveIntegerField(default=0, editable=False, help_text=_("Number of forks an instance consumes when running this job."))
|
||||||
organization = models.ForeignKey(
|
organization = models.ForeignKey(
|
||||||
'Organization',
|
'Organization',
|
||||||
blank=True,
|
blank=True,
|
||||||
@@ -754,6 +776,9 @@ class UnifiedJob(
|
|||||||
def _get_parent_field_name(self):
|
def _get_parent_field_name(self):
|
||||||
return 'unified_job_template' # Override in subclasses.
|
return 'unified_job_template' # Override in subclasses.
|
||||||
|
|
||||||
|
def _get_preferred_instance_group_cache(self):
|
||||||
|
return [ig.pk for ig in self.preferred_instance_groups]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _get_unified_job_template_class(cls):
|
def _get_unified_job_template_class(cls):
|
||||||
"""
|
"""
|
||||||
@@ -808,6 +833,9 @@ class UnifiedJob(
|
|||||||
update_fields = self._update_parent_instance_no_save(parent_instance)
|
update_fields = self._update_parent_instance_no_save(parent_instance)
|
||||||
parent_instance.save(update_fields=update_fields)
|
parent_instance.save(update_fields=update_fields)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
"""Save the job, with current status, to the database.
|
"""Save the job, with current status, to the database.
|
||||||
Ensure that all data is consistent before doing so.
|
Ensure that all data is consistent before doing so.
|
||||||
@@ -821,7 +849,8 @@ class UnifiedJob(
|
|||||||
|
|
||||||
# If this job already exists in the database, retrieve a copy of
|
# If this job already exists in the database, retrieve a copy of
|
||||||
# the job in its prior state.
|
# the job in its prior state.
|
||||||
if self.pk:
|
# If update_fields are given without status, then that indicates no change
|
||||||
|
if self.pk and ((not update_fields) or ('status' in update_fields)):
|
||||||
self_before = self.__class__.objects.get(pk=self.pk)
|
self_before = self.__class__.objects.get(pk=self.pk)
|
||||||
if self_before.status != self.status:
|
if self_before.status != self.status:
|
||||||
status_before = self_before.status
|
status_before = self_before.status
|
||||||
@@ -952,22 +981,38 @@ class UnifiedJob(
|
|||||||
valid_fields.extend(['survey_passwords', 'extra_vars'])
|
valid_fields.extend(['survey_passwords', 'extra_vars'])
|
||||||
else:
|
else:
|
||||||
kwargs.pop('survey_passwords', None)
|
kwargs.pop('survey_passwords', None)
|
||||||
|
many_to_many_fields = []
|
||||||
for field_name, value in kwargs.items():
|
for field_name, value in kwargs.items():
|
||||||
if field_name not in valid_fields:
|
if field_name not in valid_fields:
|
||||||
raise Exception('Unrecognized launch config field {}.'.format(field_name))
|
raise Exception('Unrecognized launch config field {}.'.format(field_name))
|
||||||
if field_name == 'credentials':
|
field = None
|
||||||
|
# may use extra_data as a proxy for extra_vars
|
||||||
|
if field_name in config.SUBCLASS_FIELDS and field_name != 'extra_vars':
|
||||||
|
field = config._meta.get_field(field_name)
|
||||||
|
if isinstance(field, models.ManyToManyField):
|
||||||
|
many_to_many_fields.append(field_name)
|
||||||
continue
|
continue
|
||||||
key = field_name
|
if isinstance(field, (models.ForeignKey)) and (value is None):
|
||||||
if key == 'extra_vars':
|
continue # the null value indicates not-provided for ForeignKey case
|
||||||
key = 'extra_data'
|
setattr(config, field_name, value)
|
||||||
setattr(config, key, value)
|
|
||||||
config.save()
|
config.save()
|
||||||
|
|
||||||
job_creds = set(kwargs.get('credentials', []))
|
for field_name in many_to_many_fields:
|
||||||
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
|
prompted_items = kwargs.get(field_name, [])
|
||||||
job_creds = job_creds - set(parent.credentials.all())
|
if not prompted_items:
|
||||||
if job_creds:
|
continue
|
||||||
config.credentials.add(*job_creds)
|
if field_name == 'instance_groups':
|
||||||
|
# Here we are doing a loop to make sure we preserve order for this Ordered field
|
||||||
|
# also do not merge IGs with parent, so this saves the literal list
|
||||||
|
for item in prompted_items:
|
||||||
|
getattr(config, field_name).add(item)
|
||||||
|
else:
|
||||||
|
# Assuming this field merges prompts with parent, save just the diff
|
||||||
|
if field_name in [field.name for field in parent._meta.get_fields()]:
|
||||||
|
prompted_items = set(prompted_items) - set(getattr(parent, field_name).all())
|
||||||
|
if prompted_items:
|
||||||
|
getattr(config, field_name).add(*prompted_items)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -1026,7 +1071,6 @@ class UnifiedJob(
|
|||||||
event_qs = self.get_event_queryset()
|
event_qs = self.get_event_queryset()
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
return True # Model without events, such as WFJT
|
return True # Model without events, such as WFJT
|
||||||
self.log_lifecycle("event_processing_finished")
|
|
||||||
return self.emitted_events == event_qs.count()
|
return self.emitted_events == event_qs.count()
|
||||||
|
|
||||||
def result_stdout_raw_handle(self, enforce_max_bytes=True):
|
def result_stdout_raw_handle(self, enforce_max_bytes=True):
|
||||||
@@ -1241,9 +1285,8 @@ class UnifiedJob(
|
|||||||
except JobLaunchConfig.DoesNotExist:
|
except JobLaunchConfig.DoesNotExist:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
return self.task_impact # return default, should implement in subclass.
|
||||||
raise NotImplementedError # Implement in subclass.
|
|
||||||
|
|
||||||
def websocket_emit_data(self):
|
def websocket_emit_data(self):
|
||||||
'''Return extra data that should be included when submitting data to the browser over the websocket connection'''
|
'''Return extra data that should be included when submitting data to the browser over the websocket connection'''
|
||||||
@@ -1255,7 +1298,7 @@ class UnifiedJob(
|
|||||||
def _websocket_emit_status(self, status):
|
def _websocket_emit_status(self, status):
|
||||||
try:
|
try:
|
||||||
status_data = dict(unified_job_id=self.id, status=status)
|
status_data = dict(unified_job_id=self.id, status=status)
|
||||||
if status == 'waiting':
|
if status == 'running':
|
||||||
if self.instance_group:
|
if self.instance_group:
|
||||||
status_data['instance_group_name'] = self.instance_group.name
|
status_data['instance_group_name'] = self.instance_group.name
|
||||||
else:
|
else:
|
||||||
@@ -1358,7 +1401,10 @@ class UnifiedJob(
|
|||||||
self.update_fields(start_args=json.dumps(kwargs), status='pending')
|
self.update_fields(start_args=json.dumps(kwargs), status='pending')
|
||||||
self.websocket_emit_status("pending")
|
self.websocket_emit_status("pending")
|
||||||
|
|
||||||
schedule_task_manager()
|
if self.dependencies_processed:
|
||||||
|
ScheduleTaskManager().schedule()
|
||||||
|
else:
|
||||||
|
ScheduleDependencyManager().schedule()
|
||||||
|
|
||||||
# Each type of unified job has a different Task class; get the
|
# Each type of unified job has a different Task class; get the
|
||||||
# appropirate one.
|
# appropirate one.
|
||||||
@@ -1373,22 +1419,6 @@ class UnifiedJob(
|
|||||||
# Done!
|
# Done!
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
|
||||||
def actually_running(self):
|
|
||||||
# returns True if the job is running in the appropriate dispatcher process
|
|
||||||
running = False
|
|
||||||
if all([self.status == 'running', self.celery_task_id, self.execution_node]):
|
|
||||||
# If the job is marked as running, but the dispatcher
|
|
||||||
# doesn't know about it (or the dispatcher doesn't reply),
|
|
||||||
# then cancel the job
|
|
||||||
timeout = 5
|
|
||||||
try:
|
|
||||||
running = self.celery_task_id in ControlDispatcher('dispatcher', self.controller_node or self.execution_node).running(timeout=timeout)
|
|
||||||
except (socket.timeout, RuntimeError):
|
|
||||||
logger.error('could not reach dispatcher on {} within {}s'.format(self.execution_node, timeout))
|
|
||||||
running = False
|
|
||||||
return running
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def can_cancel(self):
|
def can_cancel(self):
|
||||||
return bool(self.status in CAN_CANCEL)
|
return bool(self.status in CAN_CANCEL)
|
||||||
@@ -1398,27 +1428,61 @@ class UnifiedJob(
|
|||||||
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (self.model_to_str(), self.name, self.id)
|
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (self.model_to_str(), self.name, self.id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def fallback_cancel(self):
|
||||||
|
if not self.celery_task_id:
|
||||||
|
self.refresh_from_db(fields=['celery_task_id'])
|
||||||
|
self.cancel_dispatcher_process()
|
||||||
|
|
||||||
|
def cancel_dispatcher_process(self):
|
||||||
|
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
|
||||||
|
if not self.celery_task_id:
|
||||||
|
return
|
||||||
|
canceled = []
|
||||||
|
try:
|
||||||
|
# Use control and reply mechanism to cancel and obtain confirmation
|
||||||
|
timeout = 5
|
||||||
|
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
|
||||||
|
except socket.timeout:
|
||||||
|
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
|
||||||
|
except Exception:
|
||||||
|
logger.exception("error encountered when checking task status")
|
||||||
|
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
|
||||||
|
|
||||||
def cancel(self, job_explanation=None, is_chain=False):
|
def cancel(self, job_explanation=None, is_chain=False):
|
||||||
if self.can_cancel:
|
if self.can_cancel:
|
||||||
if not is_chain:
|
if not is_chain:
|
||||||
for x in self.get_jobs_fail_chain():
|
for x in self.get_jobs_fail_chain():
|
||||||
x.cancel(job_explanation=self._build_job_explanation(), is_chain=True)
|
x.cancel(job_explanation=self._build_job_explanation(), is_chain=True)
|
||||||
|
|
||||||
|
cancel_fields = []
|
||||||
if not self.cancel_flag:
|
if not self.cancel_flag:
|
||||||
self.cancel_flag = True
|
self.cancel_flag = True
|
||||||
self.start_args = '' # blank field to remove encrypted passwords
|
self.start_args = '' # blank field to remove encrypted passwords
|
||||||
cancel_fields = ['cancel_flag', 'start_args']
|
cancel_fields.extend(['cancel_flag', 'start_args'])
|
||||||
if self.status in ('pending', 'waiting', 'new'):
|
connection.on_commit(lambda: self.websocket_emit_status("canceled"))
|
||||||
self.status = 'canceled'
|
|
||||||
cancel_fields.append('status')
|
|
||||||
if self.status == 'running' and not self.actually_running:
|
|
||||||
self.status = 'canceled'
|
|
||||||
cancel_fields.append('status')
|
|
||||||
if job_explanation is not None:
|
if job_explanation is not None:
|
||||||
self.job_explanation = job_explanation
|
self.job_explanation = job_explanation
|
||||||
cancel_fields.append('job_explanation')
|
cancel_fields.append('job_explanation')
|
||||||
self.save(update_fields=cancel_fields)
|
|
||||||
self.websocket_emit_status("canceled")
|
controller_notified = False
|
||||||
|
if self.celery_task_id:
|
||||||
|
controller_notified = self.cancel_dispatcher_process()
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Avoid race condition where we have stale model from pending state but job has already started,
|
||||||
|
# its checking signal but not cancel_flag, so re-send signal after this database commit
|
||||||
|
connection.on_commit(self.fallback_cancel)
|
||||||
|
|
||||||
|
# If a SIGTERM signal was sent to the control process, and acked by the dispatcher
|
||||||
|
# then we want to let its own cleanup change status, otherwise change status now
|
||||||
|
if not controller_notified:
|
||||||
|
if self.status != 'canceled':
|
||||||
|
self.status = 'canceled'
|
||||||
|
cancel_fields.append('status')
|
||||||
|
|
||||||
|
self.save(update_fields=cancel_fields)
|
||||||
|
|
||||||
return self.cancel_flag
|
return self.cancel_flag
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -1515,8 +1579,8 @@ class UnifiedJob(
|
|||||||
'state': state,
|
'state': state,
|
||||||
'work_unit_id': self.work_unit_id,
|
'work_unit_id': self.work_unit_id,
|
||||||
}
|
}
|
||||||
if self.unified_job_template:
|
if self.name:
|
||||||
extra["template_name"] = self.unified_job_template.name
|
extra["task_name"] = self.name
|
||||||
if state == "blocked" and blocked_by:
|
if state == "blocked" and blocked_by:
|
||||||
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
|
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
|
||||||
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
|
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
|
||||||
@@ -1528,7 +1592,7 @@ class UnifiedJob(
|
|||||||
extra["controller_node"] = self.controller_node or "NOT_SET"
|
extra["controller_node"] = self.controller_node or "NOT_SET"
|
||||||
elif state == "execution_node_chosen":
|
elif state == "execution_node_chosen":
|
||||||
extra["execution_node"] = self.execution_node or "NOT_SET"
|
extra["execution_node"] = self.execution_node or "NOT_SET"
|
||||||
logger_job_lifecycle.debug(msg, extra=extra)
|
logger_job_lifecycle.info(msg, extra=extra)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def launched_by(self):
|
def launched_by(self):
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from django.db import connection, models
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.core.exceptions import ObjectDoesNotExist
|
from django.core.exceptions import ObjectDoesNotExist
|
||||||
|
from django.utils.timezone import now, timedelta
|
||||||
|
|
||||||
# from django import settings as tower_settings
|
# from django import settings as tower_settings
|
||||||
|
|
||||||
@@ -28,7 +29,7 @@ from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, Un
|
|||||||
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
|
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
|
||||||
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
|
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
|
||||||
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob
|
from awx.main.fields import ImplicitRoleField, JSONBlob, OrderedManyToManyField
|
||||||
from awx.main.models.mixins import (
|
from awx.main.models.mixins import (
|
||||||
ResourceMixin,
|
ResourceMixin,
|
||||||
SurveyJobTemplateMixin,
|
SurveyJobTemplateMixin,
|
||||||
@@ -40,7 +41,7 @@ from awx.main.models.mixins import (
|
|||||||
from awx.main.models.jobs import LaunchTimeConfigBase, LaunchTimeConfig, JobTemplate
|
from awx.main.models.jobs import LaunchTimeConfigBase, LaunchTimeConfig, JobTemplate
|
||||||
from awx.main.models.credential import Credential
|
from awx.main.models.credential import Credential
|
||||||
from awx.main.redact import REPLACE_STR
|
from awx.main.redact import REPLACE_STR
|
||||||
from awx.main.utils import schedule_task_manager
|
from awx.main.utils import ScheduleWorkflowManager
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
@@ -113,6 +114,9 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
|||||||
'credentials',
|
'credentials',
|
||||||
'char_prompts',
|
'char_prompts',
|
||||||
'all_parents_must_converge',
|
'all_parents_must_converge',
|
||||||
|
'labels',
|
||||||
|
'instance_groups',
|
||||||
|
'execution_environment',
|
||||||
]
|
]
|
||||||
|
|
||||||
def create_workflow_job_node(self, **kwargs):
|
def create_workflow_job_node(self, **kwargs):
|
||||||
@@ -121,7 +125,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
|||||||
"""
|
"""
|
||||||
create_kwargs = {}
|
create_kwargs = {}
|
||||||
for field_name in self._get_workflow_job_field_names():
|
for field_name in self._get_workflow_job_field_names():
|
||||||
if field_name == 'credentials':
|
if field_name in ['credentials', 'labels', 'instance_groups']:
|
||||||
continue
|
continue
|
||||||
if field_name in kwargs:
|
if field_name in kwargs:
|
||||||
create_kwargs[field_name] = kwargs[field_name]
|
create_kwargs[field_name] = kwargs[field_name]
|
||||||
@@ -131,10 +135,20 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
|||||||
new_node = WorkflowJobNode.objects.create(**create_kwargs)
|
new_node = WorkflowJobNode.objects.create(**create_kwargs)
|
||||||
if self.pk:
|
if self.pk:
|
||||||
allowed_creds = self.credentials.all()
|
allowed_creds = self.credentials.all()
|
||||||
|
allowed_labels = self.labels.all()
|
||||||
|
allowed_instance_groups = self.instance_groups.all()
|
||||||
else:
|
else:
|
||||||
allowed_creds = []
|
allowed_creds = []
|
||||||
|
allowed_labels = []
|
||||||
|
allowed_instance_groups = []
|
||||||
for cred in allowed_creds:
|
for cred in allowed_creds:
|
||||||
new_node.credentials.add(cred)
|
new_node.credentials.add(cred)
|
||||||
|
|
||||||
|
for label in allowed_labels:
|
||||||
|
new_node.labels.add(label)
|
||||||
|
for instance_group in allowed_instance_groups:
|
||||||
|
new_node.instance_groups.add(instance_group)
|
||||||
|
|
||||||
return new_node
|
return new_node
|
||||||
|
|
||||||
|
|
||||||
@@ -152,6 +166,9 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
|||||||
'char_prompts',
|
'char_prompts',
|
||||||
'all_parents_must_converge',
|
'all_parents_must_converge',
|
||||||
'identifier',
|
'identifier',
|
||||||
|
'labels',
|
||||||
|
'execution_environment',
|
||||||
|
'instance_groups',
|
||||||
]
|
]
|
||||||
REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||||
|
|
||||||
@@ -166,6 +183,13 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
|||||||
blank=False,
|
blank=False,
|
||||||
help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'),
|
help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'),
|
||||||
)
|
)
|
||||||
|
instance_groups = OrderedManyToManyField(
|
||||||
|
'InstanceGroup',
|
||||||
|
related_name='workflow_job_template_node_instance_groups',
|
||||||
|
blank=True,
|
||||||
|
editable=False,
|
||||||
|
through='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
|
||||||
|
)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
@@ -210,7 +234,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
|||||||
approval_template = WorkflowApprovalTemplate(**kwargs)
|
approval_template = WorkflowApprovalTemplate(**kwargs)
|
||||||
approval_template.save()
|
approval_template.save()
|
||||||
self.unified_job_template = approval_template
|
self.unified_job_template = approval_template
|
||||||
self.save()
|
self.save(update_fields=['unified_job_template'])
|
||||||
return approval_template
|
return approval_template
|
||||||
|
|
||||||
|
|
||||||
@@ -249,6 +273,9 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
blank=True, # blank denotes pre-migration job nodes
|
blank=True, # blank denotes pre-migration job nodes
|
||||||
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
|
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
|
||||||
)
|
)
|
||||||
|
instance_groups = OrderedManyToManyField(
|
||||||
|
'InstanceGroup', related_name='workflow_job_node_instance_groups', blank=True, editable=False, through='WorkflowJobNodeBaseInstanceGroupMembership'
|
||||||
|
)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
@@ -264,19 +291,6 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
def prompts_dict(self, *args, **kwargs):
|
|
||||||
r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
|
|
||||||
# Explanation - WFJT extra_vars still break pattern, so they are not
|
|
||||||
# put through prompts processing, but inventory and others are only accepted
|
|
||||||
# if JT prompts for it, so it goes through this mechanism
|
|
||||||
if self.workflow_job:
|
|
||||||
if self.workflow_job.inventory_id:
|
|
||||||
# workflow job inventory takes precedence
|
|
||||||
r['inventory'] = self.workflow_job.inventory
|
|
||||||
if self.workflow_job.char_prompts:
|
|
||||||
r.update(self.workflow_job.char_prompts)
|
|
||||||
return r
|
|
||||||
|
|
||||||
def get_job_kwargs(self):
|
def get_job_kwargs(self):
|
||||||
"""
|
"""
|
||||||
In advance of creating a new unified job as part of a workflow,
|
In advance of creating a new unified job as part of a workflow,
|
||||||
@@ -286,16 +300,38 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
"""
|
"""
|
||||||
# reject/accept prompted fields
|
# reject/accept prompted fields
|
||||||
data = {}
|
data = {}
|
||||||
|
wj_special_vars = {}
|
||||||
|
wj_special_passwords = {}
|
||||||
ujt_obj = self.unified_job_template
|
ujt_obj = self.unified_job_template
|
||||||
if ujt_obj is not None:
|
if ujt_obj is not None:
|
||||||
# MERGE note: move this to prompts_dict method on node when merging
|
node_prompts_data = self.prompts_dict(for_cls=ujt_obj.__class__)
|
||||||
# with the workflow inventory branch
|
wj_prompts_data = self.workflow_job.prompts_dict(for_cls=ujt_obj.__class__)
|
||||||
prompts_data = self.prompts_dict()
|
# Explanation - special historical case
|
||||||
if isinstance(ujt_obj, WorkflowJobTemplate):
|
# WFJT extra_vars ignored JobTemplate.ask_variables_on_launch, bypassing _accept_or_ignore_job_kwargs
|
||||||
if self.workflow_job.extra_vars:
|
# inventory and others are only accepted if JT prompts for it with related ask_ field
|
||||||
prompts_data.setdefault('extra_vars', {})
|
# this is inconsistent, but maintained
|
||||||
prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict)
|
if not isinstance(ujt_obj, WorkflowJobTemplate):
|
||||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data)
|
wj_special_vars = wj_prompts_data.pop('extra_vars', {})
|
||||||
|
wj_special_passwords = wj_prompts_data.pop('survey_passwords', {})
|
||||||
|
elif 'extra_vars' in node_prompts_data:
|
||||||
|
# Follow the vars combination rules
|
||||||
|
node_prompts_data['extra_vars'].update(wj_prompts_data.pop('extra_vars', {}))
|
||||||
|
elif 'survey_passwords' in node_prompts_data:
|
||||||
|
node_prompts_data['survey_passwords'].update(wj_prompts_data.pop('survey_passwords', {}))
|
||||||
|
|
||||||
|
# Follow the credential combination rules
|
||||||
|
if ('credentials' in wj_prompts_data) and ('credentials' in node_prompts_data):
|
||||||
|
wj_pivoted_creds = Credential.unique_dict(wj_prompts_data['credentials'])
|
||||||
|
node_pivoted_creds = Credential.unique_dict(node_prompts_data['credentials'])
|
||||||
|
node_pivoted_creds.update(wj_pivoted_creds)
|
||||||
|
wj_prompts_data['credentials'] = [cred for cred in node_pivoted_creds.values()]
|
||||||
|
|
||||||
|
# NOTE: no special rules for instance_groups, because they do not merge
|
||||||
|
# or labels, because they do not propogate WFJT-->node at all
|
||||||
|
|
||||||
|
# Combine WFJT prompts with node here, WFJT at higher level
|
||||||
|
node_prompts_data.update(wj_prompts_data)
|
||||||
|
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**node_prompts_data)
|
||||||
if errors:
|
if errors:
|
||||||
logger.info(
|
logger.info(
|
||||||
_('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format(
|
_('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format(
|
||||||
@@ -303,15 +339,6 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
data.update(accepted_fields) # missing fields are handled in the scheduler
|
data.update(accepted_fields) # missing fields are handled in the scheduler
|
||||||
try:
|
|
||||||
# config saved on the workflow job itself
|
|
||||||
wj_config = self.workflow_job.launch_config
|
|
||||||
except ObjectDoesNotExist:
|
|
||||||
wj_config = None
|
|
||||||
if wj_config:
|
|
||||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
|
|
||||||
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
|
|
||||||
data.update(accepted_fields)
|
|
||||||
# build ancestor artifacts, save them to node model for later
|
# build ancestor artifacts, save them to node model for later
|
||||||
aa_dict = {}
|
aa_dict = {}
|
||||||
is_root_node = True
|
is_root_node = True
|
||||||
@@ -324,15 +351,12 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
self.ancestor_artifacts = aa_dict
|
self.ancestor_artifacts = aa_dict
|
||||||
self.save(update_fields=['ancestor_artifacts'])
|
self.save(update_fields=['ancestor_artifacts'])
|
||||||
# process password list
|
# process password list
|
||||||
password_dict = {}
|
password_dict = data.get('survey_passwords', {})
|
||||||
if '_ansible_no_log' in aa_dict:
|
if '_ansible_no_log' in aa_dict:
|
||||||
for key in aa_dict:
|
for key in aa_dict:
|
||||||
if key != '_ansible_no_log':
|
if key != '_ansible_no_log':
|
||||||
password_dict[key] = REPLACE_STR
|
password_dict[key] = REPLACE_STR
|
||||||
if self.workflow_job.survey_passwords:
|
password_dict.update(wj_special_passwords)
|
||||||
password_dict.update(self.workflow_job.survey_passwords)
|
|
||||||
if self.survey_passwords:
|
|
||||||
password_dict.update(self.survey_passwords)
|
|
||||||
if password_dict:
|
if password_dict:
|
||||||
data['survey_passwords'] = password_dict
|
data['survey_passwords'] = password_dict
|
||||||
# process extra_vars
|
# process extra_vars
|
||||||
@@ -342,12 +366,12 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
functional_aa_dict = copy(aa_dict)
|
functional_aa_dict = copy(aa_dict)
|
||||||
functional_aa_dict.pop('_ansible_no_log', None)
|
functional_aa_dict.pop('_ansible_no_log', None)
|
||||||
extra_vars.update(functional_aa_dict)
|
extra_vars.update(functional_aa_dict)
|
||||||
if ujt_obj and isinstance(ujt_obj, JobTemplate):
|
|
||||||
# Workflow Job extra_vars higher precedence than ancestor artifacts
|
# Workflow Job extra_vars higher precedence than ancestor artifacts
|
||||||
if self.workflow_job and self.workflow_job.extra_vars:
|
extra_vars.update(wj_special_vars)
|
||||||
extra_vars.update(self.workflow_job.extra_vars_dict)
|
|
||||||
if extra_vars:
|
if extra_vars:
|
||||||
data['extra_vars'] = extra_vars
|
data['extra_vars'] = extra_vars
|
||||||
|
|
||||||
# ensure that unified jobs created by WorkflowJobs are marked
|
# ensure that unified jobs created by WorkflowJobs are marked
|
||||||
data['_eager_fields'] = {'launch_type': 'workflow'}
|
data['_eager_fields'] = {'launch_type': 'workflow'}
|
||||||
if self.workflow_job and self.workflow_job.created_by:
|
if self.workflow_job and self.workflow_job.created_by:
|
||||||
@@ -373,6 +397,10 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
# Workflow jobs are used for sliced jobs, and thus, must be a conduit for any JT prompts
|
||||||
|
instance_groups = OrderedManyToManyField(
|
||||||
|
'InstanceGroup', related_name='workflow_job_instance_groups', blank=True, editable=False, through='WorkflowJobInstanceGroupMembership'
|
||||||
|
)
|
||||||
allow_simultaneous = models.BooleanField(default=False)
|
allow_simultaneous = models.BooleanField(default=False)
|
||||||
|
|
||||||
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
extra_vars_dict = VarsDictProperty('extra_vars', True)
|
||||||
@@ -384,7 +412,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def _get_unified_job_field_names(cls):
|
def _get_unified_job_field_names(cls):
|
||||||
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch']
|
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'job_tags', 'skip_tags']
|
||||||
)
|
)
|
||||||
r.remove('char_prompts') # needed due to copying launch config to launch config
|
r.remove('char_prompts') # needed due to copying launch config to launch config
|
||||||
return r
|
return r
|
||||||
@@ -424,26 +452,29 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
|
|||||||
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin):
|
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin):
|
||||||
|
|
||||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||||
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec']
|
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||||
|
'labels',
|
||||||
|
'organization',
|
||||||
|
'instance_groups',
|
||||||
|
'workflow_job_template_nodes',
|
||||||
|
'credentials',
|
||||||
|
'survey_spec',
|
||||||
|
'skip_tags',
|
||||||
|
'job_tags',
|
||||||
|
'execution_environment',
|
||||||
|
]
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
|
|
||||||
ask_inventory_on_launch = AskForField(
|
notification_templates_approvals = models.ManyToManyField(
|
||||||
|
"NotificationTemplate",
|
||||||
blank=True,
|
blank=True,
|
||||||
default=False,
|
related_name='%(class)s_notification_templates_for_approvals',
|
||||||
)
|
)
|
||||||
ask_limit_on_launch = AskForField(
|
admin_role = ImplicitRoleField(
|
||||||
blank=True,
|
parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'],
|
||||||
default=False,
|
|
||||||
)
|
)
|
||||||
ask_scm_branch_on_launch = AskForField(
|
|
||||||
blank=True,
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
notification_templates_approvals = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_approvals')
|
|
||||||
|
|
||||||
admin_role = ImplicitRoleField(parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'])
|
|
||||||
execute_role = ImplicitRoleField(
|
execute_role = ImplicitRoleField(
|
||||||
parent_role=[
|
parent_role=[
|
||||||
'admin_role',
|
'admin_role',
|
||||||
@@ -622,6 +653,9 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
|||||||
)
|
)
|
||||||
is_sliced_job = models.BooleanField(default=False)
|
is_sliced_job = models.BooleanField(default=False)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def workflow_nodes(self):
|
def workflow_nodes(self):
|
||||||
return self.workflow_job_nodes
|
return self.workflow_job_nodes
|
||||||
@@ -668,8 +702,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
|||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@property
|
def _get_task_impact(self):
|
||||||
def task_impact(self):
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def get_ancestor_workflows(self):
|
def get_ancestor_workflows(self):
|
||||||
@@ -710,6 +743,25 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
|||||||
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
|
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
|
||||||
return artifacts
|
return artifacts
|
||||||
|
|
||||||
|
def prompts_dict(self, *args, **kwargs):
|
||||||
|
if self.job_template_id:
|
||||||
|
# HACK: Exception for sliced jobs here, this is bad
|
||||||
|
# when sliced jobs were introduced, workflows did not have all the prompted JT fields
|
||||||
|
# so to support prompting with slicing, we abused the workflow job launch config
|
||||||
|
# these would be more properly saved on the workflow job, but it gets the wrong fields now
|
||||||
|
try:
|
||||||
|
wj_config = self.launch_config
|
||||||
|
r = wj_config.prompts_dict(*args, **kwargs)
|
||||||
|
except ObjectDoesNotExist:
|
||||||
|
r = {}
|
||||||
|
else:
|
||||||
|
r = super().prompts_dict(*args, **kwargs)
|
||||||
|
# Workflow labels and job labels are treated separately
|
||||||
|
# that means that they do not propogate from WFJT / workflow job to jobs in workflow
|
||||||
|
r.pop('labels', None)
|
||||||
|
|
||||||
|
return r
|
||||||
|
|
||||||
def get_notification_templates(self):
|
def get_notification_templates(self):
|
||||||
return self.workflow_job_template.notification_templates
|
return self.workflow_job_template.notification_templates
|
||||||
|
|
||||||
@@ -720,11 +772,10 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
|||||||
def preferred_instance_groups(self):
|
def preferred_instance_groups(self):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@property
|
def cancel_dispatcher_process(self):
|
||||||
def actually_running(self):
|
|
||||||
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
|
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
|
||||||
# there's no point in asking the dispatcher if it knows about this task
|
# there's no point in asking the dispatcher if it knows about this task
|
||||||
return self.status == 'running'
|
return True
|
||||||
|
|
||||||
|
|
||||||
class WorkflowApprovalTemplate(UnifiedJobTemplate, RelatedJobsMixin):
|
class WorkflowApprovalTemplate(UnifiedJobTemplate, RelatedJobsMixin):
|
||||||
@@ -783,6 +834,12 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
|||||||
default=0,
|
default=0,
|
||||||
help_text=_("The amount of time (in seconds) before the approval node expires and fails."),
|
help_text=_("The amount of time (in seconds) before the approval node expires and fails."),
|
||||||
)
|
)
|
||||||
|
expires = models.DateTimeField(
|
||||||
|
default=None,
|
||||||
|
null=True,
|
||||||
|
editable=False,
|
||||||
|
help_text=_("The time this approval will expire. This is the created time plus timeout, used for filtering."),
|
||||||
|
)
|
||||||
timed_out = models.BooleanField(default=False, help_text=_("Shows when an approval node (with a timeout assigned to it) has timed out."))
|
timed_out = models.BooleanField(default=False, help_text=_("Shows when an approval node (with a timeout assigned to it) has timed out."))
|
||||||
approved_or_denied_by = models.ForeignKey(
|
approved_or_denied_by = models.ForeignKey(
|
||||||
'auth.User',
|
'auth.User',
|
||||||
@@ -793,6 +850,9 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
|||||||
on_delete=models.SET_NULL,
|
on_delete=models.SET_NULL,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _set_default_dependencies_processed(self):
|
||||||
|
self.dependencies_processed = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _get_unified_job_template_class(cls):
|
def _get_unified_job_template_class(cls):
|
||||||
return WorkflowApprovalTemplate
|
return WorkflowApprovalTemplate
|
||||||
@@ -810,13 +870,32 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
|||||||
def _get_parent_field_name(self):
|
def _get_parent_field_name(self):
|
||||||
return 'workflow_approval_template'
|
return 'workflow_approval_template'
|
||||||
|
|
||||||
|
def save(self, *args, **kwargs):
|
||||||
|
update_fields = list(kwargs.get('update_fields', []))
|
||||||
|
if self.timeout != 0 and ((not self.pk) or (not update_fields) or ('timeout' in update_fields)):
|
||||||
|
if not self.created: # on creation, created will be set by parent class, so we fudge it here
|
||||||
|
created = now()
|
||||||
|
else:
|
||||||
|
created = self.created
|
||||||
|
new_expires = created + timedelta(seconds=self.timeout)
|
||||||
|
if new_expires != self.expires:
|
||||||
|
self.expires = new_expires
|
||||||
|
if update_fields and 'expires' not in update_fields:
|
||||||
|
update_fields.append('expires')
|
||||||
|
elif self.timeout == 0 and ((not update_fields) or ('timeout' in update_fields)):
|
||||||
|
if self.expires:
|
||||||
|
self.expires = None
|
||||||
|
if update_fields and 'expires' not in update_fields:
|
||||||
|
update_fields.append('expires')
|
||||||
|
super(WorkflowApproval, self).save(*args, **kwargs)
|
||||||
|
|
||||||
def approve(self, request=None):
|
def approve(self, request=None):
|
||||||
self.status = 'successful'
|
self.status = 'successful'
|
||||||
self.approved_or_denied_by = get_current_user()
|
self.approved_or_denied_by = get_current_user()
|
||||||
self.save()
|
self.save()
|
||||||
self.send_approval_notification('approved')
|
self.send_approval_notification('approved')
|
||||||
self.websocket_emit_status(self.status)
|
self.websocket_emit_status(self.status)
|
||||||
schedule_task_manager()
|
ScheduleWorkflowManager().schedule()
|
||||||
return reverse('api:workflow_approval_approve', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:workflow_approval_approve', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
def deny(self, request=None):
|
def deny(self, request=None):
|
||||||
@@ -825,7 +904,7 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
|||||||
self.save()
|
self.save()
|
||||||
self.send_approval_notification('denied')
|
self.send_approval_notification('denied')
|
||||||
self.websocket_emit_status(self.status)
|
self.websocket_emit_status(self.status)
|
||||||
schedule_task_manager()
|
ScheduleWorkflowManager().schedule()
|
||||||
return reverse('api:workflow_approval_deny', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:workflow_approval_deny', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
def signal_start(self, **kwargs):
|
def signal_start(self, **kwargs):
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Copyright (c) 2017 Ansible, Inc.
|
# Copyright (c) 2017 Ansible, Inc.
|
||||||
#
|
#
|
||||||
|
|
||||||
from .task_manager import TaskManager
|
from .task_manager import TaskManager, DependencyManager, WorkflowManager
|
||||||
|
|
||||||
__all__ = ['TaskManager']
|
__all__ = ['TaskManager', 'DependencyManager', 'WorkflowManager']
|
||||||
|
|||||||
@@ -7,6 +7,11 @@ from awx.main.models import (
|
|||||||
WorkflowJob,
|
WorkflowJob,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger('awx.main.scheduler.dependency_graph')
|
||||||
|
|
||||||
|
|
||||||
class DependencyGraph(object):
|
class DependencyGraph(object):
|
||||||
PROJECT_UPDATES = 'project_updates'
|
PROJECT_UPDATES = 'project_updates'
|
||||||
@@ -36,6 +41,9 @@ class DependencyGraph(object):
|
|||||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}
|
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}
|
||||||
|
|
||||||
def mark_if_no_key(self, job_type, id, job):
|
def mark_if_no_key(self, job_type, id, job):
|
||||||
|
if id is None:
|
||||||
|
logger.warning(f'Null dependency graph key from {job}, could be integrity error or bug, ignoring')
|
||||||
|
return
|
||||||
# only mark first occurrence of a task. If 10 of JobA are launched
|
# only mark first occurrence of a task. If 10 of JobA are launched
|
||||||
# (concurrent disabled), the dependency graph should return that jobs
|
# (concurrent disabled), the dependency graph should return that jobs
|
||||||
# 2 through 10 are blocked by job1
|
# 2 through 10 are blocked by job1
|
||||||
@@ -66,7 +74,10 @@ class DependencyGraph(object):
|
|||||||
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)
|
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)
|
||||||
|
|
||||||
def mark_workflow_job(self, job):
|
def mark_workflow_job(self, job):
|
||||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
if job.workflow_job_template_id:
|
||||||
|
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
||||||
|
elif job.unified_job_template_id: # for sliced jobs
|
||||||
|
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.unified_job_template_id, job)
|
||||||
|
|
||||||
def project_update_blocked_by(self, job):
|
def project_update_blocked_by(self, job):
|
||||||
return self.get_item(self.PROJECT_UPDATES, job.project_id)
|
return self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||||
@@ -85,7 +96,13 @@ class DependencyGraph(object):
|
|||||||
|
|
||||||
def workflow_job_blocked_by(self, job):
|
def workflow_job_blocked_by(self, job):
|
||||||
if job.allow_simultaneous is False:
|
if job.allow_simultaneous is False:
|
||||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
if job.workflow_job_template_id:
|
||||||
|
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
||||||
|
elif job.unified_job_template_id:
|
||||||
|
# Sliced jobs can be either Job or WorkflowJob type, and either should block a sliced WorkflowJob
|
||||||
|
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.unified_job_template_id) or self.get_item(
|
||||||
|
self.JOB_TEMPLATE_JOBS, job.unified_job_template_id
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def system_job_blocked_by(self, job):
|
def system_job_blocked_by(self, job):
|
||||||
|
|||||||
@@ -11,31 +11,35 @@ import sys
|
|||||||
import signal
|
import signal
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.db import transaction, connection
|
from django.db import transaction
|
||||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||||
from django.utils.timezone import now as tz_now
|
from django.utils.timezone import now as tz_now
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.dispatch.reaper import reap_job
|
from awx.main.dispatch.reaper import reap_job
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
AdHocCommand,
|
|
||||||
Instance,
|
Instance,
|
||||||
InventorySource,
|
InventorySource,
|
||||||
InventoryUpdate,
|
InventoryUpdate,
|
||||||
Job,
|
Job,
|
||||||
Project,
|
Project,
|
||||||
ProjectUpdate,
|
ProjectUpdate,
|
||||||
SystemJob,
|
|
||||||
UnifiedJob,
|
UnifiedJob,
|
||||||
WorkflowApproval,
|
WorkflowApproval,
|
||||||
WorkflowJob,
|
WorkflowJob,
|
||||||
|
WorkflowJobNode,
|
||||||
WorkflowJobTemplate,
|
WorkflowJobTemplate,
|
||||||
)
|
)
|
||||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
from awx.main.utils import (
|
||||||
from awx.main.utils.common import create_partition
|
get_type_for_model,
|
||||||
|
ScheduleTaskManager,
|
||||||
|
ScheduleWorkflowManager,
|
||||||
|
)
|
||||||
|
from awx.main.utils.common import task_manager_bulk_reschedule
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
from awx.main.constants import ACTIVE_STATES
|
from awx.main.constants import ACTIVE_STATES
|
||||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||||
@@ -53,167 +57,101 @@ def timeit(func):
|
|||||||
t_now = time.perf_counter()
|
t_now = time.perf_counter()
|
||||||
result = func(*args, **kwargs)
|
result = func(*args, **kwargs)
|
||||||
dur = time.perf_counter() - t_now
|
dur = time.perf_counter() - t_now
|
||||||
args[0].subsystem_metrics.inc("task_manager_" + func.__name__ + "_seconds", dur)
|
args[0].subsystem_metrics.inc(f"{args[0].prefix}_{func.__name__}_seconds", dur)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
return inner
|
return inner
|
||||||
|
|
||||||
|
|
||||||
class TaskManager:
|
class TaskBase:
|
||||||
def __init__(self):
|
def __init__(self, prefix=""):
|
||||||
"""
|
self.prefix = prefix
|
||||||
Do NOT put database queries or other potentially expensive operations
|
|
||||||
in the task manager init. The task manager object is created every time a
|
|
||||||
job is created, transitions state, and every 30 seconds on each tower node.
|
|
||||||
More often then not, the object is destroyed quickly because the NOOP case is hit.
|
|
||||||
|
|
||||||
The NOOP case is short-circuit logic. If the task manager realizes that another instance
|
|
||||||
of the task manager is already running, then it short-circuits and decides not to run.
|
|
||||||
"""
|
|
||||||
# start task limit indicates how many pending jobs can be started on this
|
|
||||||
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
|
||||||
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
|
||||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
|
||||||
# will no longer be started and will be started on the next task manager cycle.
|
|
||||||
self.start_task_limit = settings.START_TASK_LIMIT
|
|
||||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
|
||||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
|
||||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||||
# ensures each task manager metric will be overridden when pipe_execute
|
# ensures each task manager metric will be overridden when pipe_execute
|
||||||
# is called later.
|
# is called later.
|
||||||
|
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||||
|
self.start_time = time.time()
|
||||||
|
self.start_task_limit = settings.START_TASK_LIMIT
|
||||||
for m in self.subsystem_metrics.METRICS:
|
for m in self.subsystem_metrics.METRICS:
|
||||||
if m.startswith("task_manager"):
|
if m.startswith(self.prefix):
|
||||||
self.subsystem_metrics.set(m, 0)
|
self.subsystem_metrics.set(m, 0)
|
||||||
|
|
||||||
def after_lock_init(self, all_sorted_tasks):
|
def timed_out(self):
|
||||||
"""
|
"""Return True/False if we have met or exceeded the timeout for the task manager."""
|
||||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
elapsed = time.time() - self.start_time
|
||||||
"""
|
if elapsed >= settings.TASK_MANAGER_TIMEOUT:
|
||||||
self.dependency_graph = DependencyGraph()
|
logger.warning(f"{self.prefix} manager has run for {elapsed} which is greater than TASK_MANAGER_TIMEOUT of {settings.TASK_MANAGER_TIMEOUT}.")
|
||||||
self.instances = TaskManagerInstances(all_sorted_tasks)
|
return True
|
||||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
return False
|
||||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
|
||||||
|
|
||||||
def job_blocked_by(self, task):
|
|
||||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
|
||||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
|
||||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
|
||||||
blocked_by = self.dependency_graph.task_blocked_by(task)
|
|
||||||
if blocked_by:
|
|
||||||
return blocked_by
|
|
||||||
|
|
||||||
for dep in task.dependent_jobs.all():
|
|
||||||
if dep.status in ACTIVE_STATES:
|
|
||||||
return dep
|
|
||||||
# if we detect a failed or error dependency, go ahead and fail this
|
|
||||||
# task. The errback on the dependency takes some time to trigger,
|
|
||||||
# and we don't want the task to enter running state if its
|
|
||||||
# dependency has failed or errored.
|
|
||||||
elif dep.status in ("error", "failed"):
|
|
||||||
task.status = 'failed'
|
|
||||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
|
||||||
get_type_for_model(type(dep)),
|
|
||||||
dep.name,
|
|
||||||
dep.id,
|
|
||||||
)
|
|
||||||
task.save(update_fields=['status', 'job_explanation'])
|
|
||||||
task.websocket_emit_status('failed')
|
|
||||||
return dep
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
def get_tasks(self, filter_args):
|
||||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
wf_approval_ctype_id = ContentType.objects.get_for_model(WorkflowApproval).id
|
||||||
inventory_updates_qs = (
|
qs = (
|
||||||
InventoryUpdate.objects.filter(status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
UnifiedJob.objects.filter(**filter_args)
|
||||||
|
.exclude(launch_type='sync')
|
||||||
|
.exclude(polymorphic_ctype_id=wf_approval_ctype_id)
|
||||||
|
.order_by('created')
|
||||||
|
.prefetch_related('dependent_jobs')
|
||||||
)
|
)
|
||||||
inventory_updates = [i for i in inventory_updates_qs]
|
self.all_tasks = [t for t in qs]
|
||||||
# Notice the job_type='check': we want to prevent implicit project updates from blocking our jobs.
|
|
||||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
|
|
||||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
|
||||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
|
||||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
|
||||||
all_tasks = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs, key=lambda task: task.created)
|
|
||||||
return all_tasks
|
|
||||||
|
|
||||||
def get_running_workflow_jobs(self):
|
def record_aggregate_metrics(self, *args):
|
||||||
graph_workflow_jobs = [wf for wf in WorkflowJob.objects.filter(status='running')]
|
if not settings.IS_TESTING():
|
||||||
return graph_workflow_jobs
|
# increment task_manager_schedule_calls regardless if the other
|
||||||
|
# metrics are recorded
|
||||||
|
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||||
|
# Only record metrics if the last time recording was more
|
||||||
|
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||||
|
# Prevents a short-duration task manager that runs directly after a
|
||||||
|
# long task manager to override useful metrics.
|
||||||
|
current_time = time.time()
|
||||||
|
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
||||||
|
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||||
|
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||||
|
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
||||||
|
self.subsystem_metrics.pipe_execute()
|
||||||
|
else:
|
||||||
|
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||||
|
|
||||||
def get_inventory_source_tasks(self, all_sorted_tasks):
|
def record_aggregate_metrics_and_exit(self, *args):
|
||||||
inventory_ids = set()
|
self.record_aggregate_metrics()
|
||||||
for task in all_sorted_tasks:
|
sys.exit(1)
|
||||||
if isinstance(task, Job):
|
|
||||||
inventory_ids.add(task.inventory_id)
|
def schedule(self):
|
||||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
# Lock
|
||||||
|
with task_manager_bulk_reschedule():
|
||||||
|
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
||||||
|
with transaction.atomic():
|
||||||
|
if acquired is False:
|
||||||
|
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
||||||
|
return
|
||||||
|
logger.debug(f"Starting {self.prefix} Scheduler")
|
||||||
|
# if sigterm due to timeout, still record metrics
|
||||||
|
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||||
|
self._schedule()
|
||||||
|
commit_start = time.time()
|
||||||
|
|
||||||
|
if self.prefix == "task_manager":
|
||||||
|
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||||
|
self.record_aggregate_metrics()
|
||||||
|
logger.debug(f"Finishing {self.prefix} Scheduler")
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowManager(TaskBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(prefix="workflow_manager")
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
def spawn_workflow_graph_jobs(self):
|
||||||
for workflow_job in workflow_jobs:
|
|
||||||
if workflow_job.cancel_flag:
|
|
||||||
logger.debug('Not spawning jobs for %s because it is pending cancelation.', workflow_job.log_format)
|
|
||||||
continue
|
|
||||||
dag = WorkflowDAG(workflow_job)
|
|
||||||
spawn_nodes = dag.bfs_nodes_to_run()
|
|
||||||
if spawn_nodes:
|
|
||||||
logger.debug('Spawning jobs for %s', workflow_job.log_format)
|
|
||||||
else:
|
|
||||||
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
|
||||||
for spawn_node in spawn_nodes:
|
|
||||||
if spawn_node.unified_job_template is None:
|
|
||||||
continue
|
|
||||||
kv = spawn_node.get_job_kwargs()
|
|
||||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
|
||||||
spawn_node.job = job
|
|
||||||
spawn_node.save()
|
|
||||||
logger.debug('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
|
||||||
can_start = True
|
|
||||||
if isinstance(spawn_node.unified_job_template, WorkflowJobTemplate):
|
|
||||||
workflow_ancestors = job.get_ancestor_workflows()
|
|
||||||
if spawn_node.unified_job_template in set(workflow_ancestors):
|
|
||||||
can_start = False
|
|
||||||
logger.info(
|
|
||||||
'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
|
||||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
|
||||||
job.job_explanation = gettext_noop(
|
|
||||||
"Workflow Job spawned from workflow could not start because it " "would result in recursion (spawn order, most recent first: {})"
|
|
||||||
).format(', '.join(['<{}>'.format(tmp) for tmp in display_list]))
|
|
||||||
else:
|
|
||||||
logger.debug(
|
|
||||||
'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
|
||||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if not job._resources_sufficient_for_launch():
|
|
||||||
can_start = False
|
|
||||||
job.job_explanation = gettext_noop(
|
|
||||||
"Job spawned from workflow could not start because it " "was missing a related resource such as project or inventory"
|
|
||||||
)
|
|
||||||
if can_start:
|
|
||||||
if workflow_job.start_args:
|
|
||||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
|
||||||
else:
|
|
||||||
start_args = {}
|
|
||||||
can_start = job.signal_start(**start_args)
|
|
||||||
if not can_start:
|
|
||||||
job.job_explanation = gettext_noop(
|
|
||||||
"Job spawned from workflow could not start because it " "was not in the right state or required manual credentials"
|
|
||||||
)
|
|
||||||
if not can_start:
|
|
||||||
job.status = 'failed'
|
|
||||||
job.save(update_fields=['status', 'job_explanation'])
|
|
||||||
job.websocket_emit_status('failed')
|
|
||||||
|
|
||||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
|
||||||
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
|
||||||
|
|
||||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
|
||||||
result = []
|
result = []
|
||||||
for workflow_job in workflow_jobs:
|
for workflow_job in self.all_tasks:
|
||||||
|
if self.timed_out():
|
||||||
|
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
|
||||||
|
ScheduleWorkflowManager().schedule()
|
||||||
|
# Do not process any more workflow jobs. Stop here.
|
||||||
|
# Maybe we should schedule another WorkflowManager run
|
||||||
|
break
|
||||||
dag = WorkflowDAG(workflow_job)
|
dag = WorkflowDAG(workflow_job)
|
||||||
status_changed = False
|
status_changed = False
|
||||||
if workflow_job.cancel_flag:
|
if workflow_job.cancel_flag:
|
||||||
@@ -228,99 +166,106 @@ class TaskManager:
|
|||||||
status_changed = True
|
status_changed = True
|
||||||
else:
|
else:
|
||||||
workflow_nodes = dag.mark_dnr_nodes()
|
workflow_nodes = dag.mark_dnr_nodes()
|
||||||
for n in workflow_nodes:
|
WorkflowJobNode.objects.bulk_update(workflow_nodes, ['do_not_run'])
|
||||||
n.save(update_fields=['do_not_run'])
|
# If workflow is now done, we do special things to mark it as done.
|
||||||
is_done = dag.is_workflow_done()
|
is_done = dag.is_workflow_done()
|
||||||
if not is_done:
|
if is_done:
|
||||||
continue
|
has_failed, reason = dag.has_workflow_failed()
|
||||||
has_failed, reason = dag.has_workflow_failed()
|
logger.debug('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||||
logger.debug('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
result.append(workflow_job.id)
|
||||||
result.append(workflow_job.id)
|
new_status = 'failed' if has_failed else 'successful'
|
||||||
new_status = 'failed' if has_failed else 'successful'
|
logger.debug("Transitioning {} to {} status.".format(workflow_job.log_format, new_status))
|
||||||
logger.debug("Transitioning {} to {} status.".format(workflow_job.log_format, new_status))
|
update_fields = ['status', 'start_args']
|
||||||
update_fields = ['status', 'start_args']
|
workflow_job.status = new_status
|
||||||
workflow_job.status = new_status
|
if reason:
|
||||||
if reason:
|
logger.info(f'Workflow job {workflow_job.id} failed due to reason: {reason}')
|
||||||
logger.info(f'Workflow job {workflow_job.id} failed due to reason: {reason}')
|
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
update_fields.append('job_explanation')
|
||||||
update_fields.append('job_explanation')
|
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
workflow_job.save(update_fields=update_fields)
|
||||||
workflow_job.save(update_fields=update_fields)
|
status_changed = True
|
||||||
status_changed = True
|
|
||||||
if status_changed:
|
if status_changed:
|
||||||
if workflow_job.spawned_by_workflow:
|
if workflow_job.spawned_by_workflow:
|
||||||
schedule_task_manager()
|
ScheduleWorkflowManager().schedule()
|
||||||
workflow_job.websocket_emit_status(workflow_job.status)
|
workflow_job.websocket_emit_status(workflow_job.status)
|
||||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||||
workflow_job.send_notification_templates('succeeded' if workflow_job.status == 'successful' else 'failed')
|
workflow_job.send_notification_templates('succeeded' if workflow_job.status == 'successful' else 'failed')
|
||||||
|
|
||||||
|
if workflow_job.status == 'running':
|
||||||
|
spawn_nodes = dag.bfs_nodes_to_run()
|
||||||
|
if spawn_nodes:
|
||||||
|
logger.debug('Spawning jobs for %s', workflow_job.log_format)
|
||||||
|
else:
|
||||||
|
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
||||||
|
for spawn_node in spawn_nodes:
|
||||||
|
if spawn_node.unified_job_template is None:
|
||||||
|
continue
|
||||||
|
kv = spawn_node.get_job_kwargs()
|
||||||
|
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||||
|
spawn_node.job = job
|
||||||
|
spawn_node.save()
|
||||||
|
logger.debug('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
||||||
|
can_start = True
|
||||||
|
if isinstance(spawn_node.unified_job_template, WorkflowJobTemplate):
|
||||||
|
workflow_ancestors = job.get_ancestor_workflows()
|
||||||
|
if spawn_node.unified_job_template in set(workflow_ancestors):
|
||||||
|
can_start = False
|
||||||
|
logger.info(
|
||||||
|
'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||||
|
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||||
|
job.job_explanation = gettext_noop(
|
||||||
|
"Workflow Job spawned from workflow could not start because it "
|
||||||
|
"would result in recursion (spawn order, most recent first: {})"
|
||||||
|
).format(', '.join('<{}>'.format(tmp) for tmp in display_list))
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||||
|
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not job._resources_sufficient_for_launch():
|
||||||
|
can_start = False
|
||||||
|
job.job_explanation = gettext_noop(
|
||||||
|
"Job spawned from workflow could not start because it was missing a related resource such as project or inventory"
|
||||||
|
)
|
||||||
|
if can_start:
|
||||||
|
if workflow_job.start_args:
|
||||||
|
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||||
|
else:
|
||||||
|
start_args = {}
|
||||||
|
can_start = job.signal_start(**start_args)
|
||||||
|
if not can_start:
|
||||||
|
job.job_explanation = gettext_noop(
|
||||||
|
"Job spawned from workflow could not start because it was not in the right state or required manual credentials"
|
||||||
|
)
|
||||||
|
if not can_start:
|
||||||
|
job.status = 'failed'
|
||||||
|
job.save(update_fields=['status', 'job_explanation'])
|
||||||
|
job.websocket_emit_status('failed')
|
||||||
|
|
||||||
|
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||||
|
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
def get_tasks(self, filter_args):
|
||||||
self.subsystem_metrics.inc("task_manager_tasks_started", 1)
|
self.all_tasks = [wf for wf in WorkflowJob.objects.filter(**filter_args)]
|
||||||
self.start_task_limit -= 1
|
|
||||||
if self.start_task_limit == 0:
|
|
||||||
# schedule another run immediately after this task manager
|
|
||||||
schedule_task_manager()
|
|
||||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
|
||||||
|
|
||||||
dependent_tasks = dependent_tasks or []
|
|
||||||
|
|
||||||
task_actual = {
|
|
||||||
'type': get_type_for_model(type(task)),
|
|
||||||
'id': task.id,
|
|
||||||
}
|
|
||||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
|
||||||
|
|
||||||
task.status = 'waiting'
|
|
||||||
|
|
||||||
(start_status, opts) = task.pre_start()
|
|
||||||
if not start_status:
|
|
||||||
task.status = 'failed'
|
|
||||||
if task.job_explanation:
|
|
||||||
task.job_explanation += ' '
|
|
||||||
task.job_explanation += 'Task failed pre-start check.'
|
|
||||||
task.save()
|
|
||||||
# TODO: run error handler to fail sub-tasks and send notifications
|
|
||||||
else:
|
|
||||||
if type(task) is WorkflowJob:
|
|
||||||
task.status = 'running'
|
|
||||||
task.send_notification_templates('running')
|
|
||||||
logger.debug('Transitioning %s to running status.', task.log_format)
|
|
||||||
schedule_task_manager()
|
|
||||||
# at this point we already have control/execution nodes selected for the following cases
|
|
||||||
else:
|
|
||||||
task.instance_group = instance_group
|
|
||||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
|
||||||
logger.debug(
|
|
||||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
|
||||||
)
|
|
||||||
with disable_activity_stream():
|
|
||||||
task.celery_task_id = str(uuid.uuid4())
|
|
||||||
task.save()
|
|
||||||
task.log_lifecycle("waiting")
|
|
||||||
|
|
||||||
def post_commit():
|
|
||||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
|
||||||
# Before task is dispatched, ensure that job_event partitions exist
|
|
||||||
create_partition(task.event_class._meta.db_table, start=task.created)
|
|
||||||
task_cls = task._get_task_class()
|
|
||||||
task_cls.apply_async(
|
|
||||||
[task.pk],
|
|
||||||
opts,
|
|
||||||
queue=task.get_queue_name(),
|
|
||||||
uuid=task.celery_task_id,
|
|
||||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
|
||||||
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
|
||||||
)
|
|
||||||
|
|
||||||
task.websocket_emit_status(task.status) # adds to on_commit
|
|
||||||
connection.on_commit(post_commit)
|
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def process_running_tasks(self, running_tasks):
|
def _schedule(self):
|
||||||
for task in running_tasks:
|
self.get_tasks(dict(status__in=["running"], dependencies_processed=True))
|
||||||
self.dependency_graph.add_job(task)
|
if len(self.all_tasks) > 0:
|
||||||
|
self.spawn_workflow_graph_jobs()
|
||||||
|
|
||||||
|
|
||||||
|
class DependencyManager(TaskBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(prefix="dependency_manager")
|
||||||
|
|
||||||
def create_project_update(self, task, project_id=None):
|
def create_project_update(self, task, project_id=None):
|
||||||
if project_id is None:
|
if project_id is None:
|
||||||
@@ -341,14 +286,20 @@ class TaskManager:
|
|||||||
inventory_task.status = 'pending'
|
inventory_task.status = 'pending'
|
||||||
inventory_task.save()
|
inventory_task.save()
|
||||||
logger.debug('Spawned {} as dependency of {}'.format(inventory_task.log_format, task.log_format))
|
logger.debug('Spawned {} as dependency of {}'.format(inventory_task.log_format, task.log_format))
|
||||||
# inventory_sources = self.get_inventory_source_tasks([task])
|
|
||||||
# self.process_inventory_sources(inventory_sources)
|
|
||||||
return inventory_task
|
return inventory_task
|
||||||
|
|
||||||
def add_dependencies(self, task, dependencies):
|
def add_dependencies(self, task, dependencies):
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
task.dependent_jobs.add(*dependencies)
|
task.dependent_jobs.add(*dependencies)
|
||||||
|
|
||||||
|
def get_inventory_source_tasks(self):
|
||||||
|
inventory_ids = set()
|
||||||
|
for task in self.all_tasks:
|
||||||
|
if isinstance(task, Job):
|
||||||
|
inventory_ids.add(task.inventory_id)
|
||||||
|
self.all_inventory_sources = [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||||
|
|
||||||
def get_latest_inventory_update(self, inventory_source):
|
def get_latest_inventory_update(self, inventory_source):
|
||||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||||
if not latest_inventory_update.exists():
|
if not latest_inventory_update.exists():
|
||||||
@@ -481,16 +432,167 @@ class TaskManager:
|
|||||||
|
|
||||||
return created_dependencies
|
return created_dependencies
|
||||||
|
|
||||||
|
def process_tasks(self):
|
||||||
|
deps = self.generate_dependencies(self.all_tasks)
|
||||||
|
self.generate_dependencies(deps)
|
||||||
|
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(deps))
|
||||||
|
|
||||||
|
@timeit
|
||||||
|
def _schedule(self):
|
||||||
|
self.get_tasks(dict(status__in=["pending"], dependencies_processed=False))
|
||||||
|
|
||||||
|
if len(self.all_tasks) > 0:
|
||||||
|
self.get_inventory_source_tasks()
|
||||||
|
self.process_tasks()
|
||||||
|
ScheduleTaskManager().schedule()
|
||||||
|
|
||||||
|
|
||||||
|
class TaskManager(TaskBase):
|
||||||
|
def __init__(self):
|
||||||
|
"""
|
||||||
|
Do NOT put database queries or other potentially expensive operations
|
||||||
|
in the task manager init. The task manager object is created every time a
|
||||||
|
job is created, transitions state, and every 30 seconds on each tower node.
|
||||||
|
More often then not, the object is destroyed quickly because the NOOP case is hit.
|
||||||
|
|
||||||
|
The NOOP case is short-circuit logic. If the task manager realizes that another instance
|
||||||
|
of the task manager is already running, then it short-circuits and decides not to run.
|
||||||
|
"""
|
||||||
|
# start task limit indicates how many pending jobs can be started on this
|
||||||
|
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
||||||
|
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
||||||
|
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||||
|
# will no longer be started and will be started on the next task manager cycle.
|
||||||
|
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||||
|
super().__init__(prefix="task_manager")
|
||||||
|
|
||||||
|
def after_lock_init(self):
|
||||||
|
"""
|
||||||
|
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||||
|
"""
|
||||||
|
self.dependency_graph = DependencyGraph()
|
||||||
|
self.instances = TaskManagerInstances(self.all_tasks)
|
||||||
|
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||||
|
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||||
|
|
||||||
|
def job_blocked_by(self, task):
|
||||||
|
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||||
|
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||||
|
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||||
|
blocked_by = self.dependency_graph.task_blocked_by(task)
|
||||||
|
if blocked_by:
|
||||||
|
return blocked_by
|
||||||
|
|
||||||
|
for dep in task.dependent_jobs.all():
|
||||||
|
if dep.status in ACTIVE_STATES:
|
||||||
|
return dep
|
||||||
|
# if we detect a failed or error dependency, go ahead and fail this
|
||||||
|
# task. The errback on the dependency takes some time to trigger,
|
||||||
|
# and we don't want the task to enter running state if its
|
||||||
|
# dependency has failed or errored.
|
||||||
|
elif dep.status in ("error", "failed"):
|
||||||
|
task.status = 'failed'
|
||||||
|
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||||
|
get_type_for_model(type(dep)),
|
||||||
|
dep.name,
|
||||||
|
dep.id,
|
||||||
|
)
|
||||||
|
task.save(update_fields=['status', 'job_explanation'])
|
||||||
|
task.websocket_emit_status('failed')
|
||||||
|
return dep
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@timeit
|
||||||
|
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||||
|
self.dependency_graph.add_job(task)
|
||||||
|
self.subsystem_metrics.inc(f"{self.prefix}_tasks_started", 1)
|
||||||
|
self.start_task_limit -= 1
|
||||||
|
if self.start_task_limit == 0:
|
||||||
|
# schedule another run immediately after this task manager
|
||||||
|
ScheduleTaskManager().schedule()
|
||||||
|
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||||
|
|
||||||
|
# update capacity for control node and execution node
|
||||||
|
if task.controller_node:
|
||||||
|
self.instances[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||||
|
if task.execution_node:
|
||||||
|
self.instances[task.execution_node].consume_capacity(task.task_impact)
|
||||||
|
|
||||||
|
dependent_tasks = dependent_tasks or []
|
||||||
|
|
||||||
|
task_actual = {
|
||||||
|
'type': get_type_for_model(type(task)),
|
||||||
|
'id': task.id,
|
||||||
|
}
|
||||||
|
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||||
|
|
||||||
|
task.status = 'waiting'
|
||||||
|
|
||||||
|
(start_status, opts) = task.pre_start()
|
||||||
|
if not start_status:
|
||||||
|
task.status = 'failed'
|
||||||
|
if task.job_explanation:
|
||||||
|
task.job_explanation += ' '
|
||||||
|
task.job_explanation += 'Task failed pre-start check.'
|
||||||
|
task.save()
|
||||||
|
# TODO: run error handler to fail sub-tasks and send notifications
|
||||||
|
else:
|
||||||
|
if type(task) is WorkflowJob:
|
||||||
|
task.status = 'running'
|
||||||
|
task.send_notification_templates('running')
|
||||||
|
logger.debug('Transitioning %s to running status.', task.log_format)
|
||||||
|
# Call this to ensure Workflow nodes get spawned in timely manner
|
||||||
|
ScheduleWorkflowManager().schedule()
|
||||||
|
# at this point we already have control/execution nodes selected for the following cases
|
||||||
|
else:
|
||||||
|
task.instance_group = instance_group
|
||||||
|
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||||
|
logger.debug(
|
||||||
|
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||||
|
)
|
||||||
|
with disable_activity_stream():
|
||||||
|
task.celery_task_id = str(uuid.uuid4())
|
||||||
|
task.save()
|
||||||
|
task.log_lifecycle("waiting")
|
||||||
|
|
||||||
|
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||||
|
# postgres will treat this as part of the transaction, which is what we want
|
||||||
|
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||||
|
task_cls = task._get_task_class()
|
||||||
|
task_cls.apply_async(
|
||||||
|
[task.pk],
|
||||||
|
opts,
|
||||||
|
queue=task.get_queue_name(),
|
||||||
|
uuid=task.celery_task_id,
|
||||||
|
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||||
|
errbacks=[{'task': handle_work_error.name, 'args': [task.celery_task_id], 'kwargs': {'subtasks': [task_actual] + dependencies}}],
|
||||||
|
)
|
||||||
|
|
||||||
|
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||||
|
# for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||||
|
if task.status != 'waiting':
|
||||||
|
task.websocket_emit_status(task.status) # adds to on_commit
|
||||||
|
|
||||||
|
@timeit
|
||||||
|
def process_running_tasks(self, running_tasks):
|
||||||
|
for task in running_tasks:
|
||||||
|
if type(task) is WorkflowJob:
|
||||||
|
ScheduleWorkflowManager().schedule()
|
||||||
|
self.dependency_graph.add_job(task)
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def process_pending_tasks(self, pending_tasks):
|
def process_pending_tasks(self, pending_tasks):
|
||||||
running_workflow_templates = {wf.unified_job_template_id for wf in self.get_running_workflow_jobs()}
|
|
||||||
tasks_to_update_job_explanation = []
|
tasks_to_update_job_explanation = []
|
||||||
for task in pending_tasks:
|
for task in pending_tasks:
|
||||||
if self.start_task_limit <= 0:
|
if self.start_task_limit <= 0:
|
||||||
break
|
break
|
||||||
|
if self.timed_out():
|
||||||
|
logger.warning("Task manager has reached time out while processing pending jobs, exiting loop early")
|
||||||
|
break
|
||||||
blocked_by = self.job_blocked_by(task)
|
blocked_by = self.job_blocked_by(task)
|
||||||
if blocked_by:
|
if blocked_by:
|
||||||
self.subsystem_metrics.inc("task_manager_tasks_blocked", 1)
|
self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
|
||||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||||
if task.job_explanation != job_explanation:
|
if task.job_explanation != job_explanation:
|
||||||
@@ -499,19 +601,14 @@ class TaskManager:
|
|||||||
tasks_to_update_job_explanation.append(task)
|
tasks_to_update_job_explanation.append(task)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
found_acceptable_queue = False
|
|
||||||
preferred_instance_groups = task.preferred_instance_groups
|
|
||||||
|
|
||||||
if isinstance(task, WorkflowJob):
|
if isinstance(task, WorkflowJob):
|
||||||
if task.unified_job_template_id in running_workflow_templates:
|
# Previously we were tracking allow_simultaneous blocking both here and in DependencyGraph.
|
||||||
if not task.allow_simultaneous:
|
# Double check that using just the DependencyGraph works for Workflows and Sliced Jobs.
|
||||||
logger.debug("{} is blocked from running, workflow already running".format(task.log_format))
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
running_workflow_templates.add(task.unified_job_template_id)
|
|
||||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
found_acceptable_queue = False
|
||||||
|
|
||||||
# Determine if there is control capacity for the task
|
# Determine if there is control capacity for the task
|
||||||
if task.capacity_type == 'control':
|
if task.capacity_type == 'control':
|
||||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||||
@@ -530,8 +627,6 @@ class TaskManager:
|
|||||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||||
if task.capacity_type == 'control':
|
if task.capacity_type == 'control':
|
||||||
task.execution_node = control_instance.hostname
|
task.execution_node = control_instance.hostname
|
||||||
control_instance.consume_capacity(control_impact)
|
|
||||||
self.dependency_graph.add_job(task)
|
|
||||||
execution_instance = self.instances[control_instance.hostname].obj
|
execution_instance = self.instances[control_instance.hostname].obj
|
||||||
task.log_lifecycle("controller_node_chosen")
|
task.log_lifecycle("controller_node_chosen")
|
||||||
task.log_lifecycle("execution_node_chosen")
|
task.log_lifecycle("execution_node_chosen")
|
||||||
@@ -539,17 +634,12 @@ class TaskManager:
|
|||||||
found_acceptable_queue = True
|
found_acceptable_queue = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for instance_group in preferred_instance_groups:
|
for instance_group in self.instance_groups.get_instance_groups_from_task_cache(task):
|
||||||
if instance_group.is_container_group:
|
if instance_group.is_container_group:
|
||||||
self.dependency_graph.add_job(task)
|
|
||||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
||||||
found_acceptable_queue = True
|
found_acceptable_queue = True
|
||||||
break
|
break
|
||||||
|
|
||||||
# TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
|
|
||||||
if settings.IS_K8S and task.capacity_type == 'execution':
|
|
||||||
logger.debug("Skipping group {}, task cannot run on control plane".format(instance_group.name))
|
|
||||||
continue
|
|
||||||
# at this point we know the instance group is NOT a container group
|
# at this point we know the instance group is NOT a container group
|
||||||
# because if it was, it would have started the task and broke out of the loop.
|
# because if it was, it would have started the task and broke out of the loop.
|
||||||
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||||
@@ -563,9 +653,7 @@ class TaskManager:
|
|||||||
control_instance = execution_instance
|
control_instance = execution_instance
|
||||||
task.controller_node = execution_instance.hostname
|
task.controller_node = execution_instance.hostname
|
||||||
|
|
||||||
control_instance.consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
|
||||||
task.log_lifecycle("controller_node_chosen")
|
task.log_lifecycle("controller_node_chosen")
|
||||||
execution_instance.consume_capacity(task.task_impact)
|
|
||||||
task.log_lifecycle("execution_node_chosen")
|
task.log_lifecycle("execution_node_chosen")
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||||
@@ -573,7 +661,6 @@ class TaskManager:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
execution_instance = self.instances[execution_instance.hostname].obj
|
execution_instance = self.instances[execution_instance.hostname].obj
|
||||||
self.dependency_graph.add_job(task)
|
|
||||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
||||||
found_acceptable_queue = True
|
found_acceptable_queue = True
|
||||||
break
|
break
|
||||||
@@ -599,25 +686,6 @@ class TaskManager:
|
|||||||
tasks_to_update_job_explanation.append(task)
|
tasks_to_update_job_explanation.append(task)
|
||||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||||
|
|
||||||
def timeout_approval_node(self):
|
|
||||||
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
|
|
||||||
now = tz_now()
|
|
||||||
for task in workflow_approvals:
|
|
||||||
approval_timeout_seconds = timedelta(seconds=task.timeout)
|
|
||||||
if task.timeout == 0:
|
|
||||||
continue
|
|
||||||
if (now - task.created) >= approval_timeout_seconds:
|
|
||||||
timeout_message = _("The approval node {name} ({pk}) has expired after {timeout} seconds.").format(
|
|
||||||
name=task.name, pk=task.pk, timeout=task.timeout
|
|
||||||
)
|
|
||||||
logger.warning(timeout_message)
|
|
||||||
task.timed_out = True
|
|
||||||
task.status = 'failed'
|
|
||||||
task.send_approval_notification('timed_out')
|
|
||||||
task.websocket_emit_status(task.status)
|
|
||||||
task.job_explanation = timeout_message
|
|
||||||
task.save(update_fields=['status', 'job_explanation', 'timed_out'])
|
|
||||||
|
|
||||||
def reap_jobs_from_orphaned_instances(self):
|
def reap_jobs_from_orphaned_instances(self):
|
||||||
# discover jobs that are in running state but aren't on an execution node
|
# discover jobs that are in running state but aren't on an execution node
|
||||||
# that we know about; this is a fairly rare event, but it can occur if you,
|
# that we know about; this is a fairly rare event, but it can occur if you,
|
||||||
@@ -630,92 +698,45 @@ class TaskManager:
|
|||||||
logger.error(f'{j.execution_node} is not a registered instance; reaping {j.log_format}')
|
logger.error(f'{j.execution_node} is not a registered instance; reaping {j.log_format}')
|
||||||
reap_job(j, 'failed')
|
reap_job(j, 'failed')
|
||||||
|
|
||||||
def process_tasks(self, all_sorted_tasks):
|
def process_tasks(self):
|
||||||
running_tasks = [t for t in all_sorted_tasks if t.status in ['waiting', 'running']]
|
running_tasks = [t for t in self.all_tasks if t.status in ['waiting', 'running']]
|
||||||
self.process_running_tasks(running_tasks)
|
self.process_running_tasks(running_tasks)
|
||||||
self.subsystem_metrics.inc("task_manager_running_processed", len(running_tasks))
|
self.subsystem_metrics.inc(f"{self.prefix}_running_processed", len(running_tasks))
|
||||||
|
|
||||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
pending_tasks = [t for t in self.all_tasks if t.status == 'pending']
|
||||||
|
|
||||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
|
||||||
dependencies = self.generate_dependencies(undeped_tasks)
|
|
||||||
deps_of_deps = self.generate_dependencies(dependencies)
|
|
||||||
dependencies += deps_of_deps
|
|
||||||
self.process_pending_tasks(dependencies)
|
|
||||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(dependencies))
|
|
||||||
|
|
||||||
self.process_pending_tasks(pending_tasks)
|
self.process_pending_tasks(pending_tasks)
|
||||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(pending_tasks))
|
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(pending_tasks))
|
||||||
|
|
||||||
|
def timeout_approval_node(self, task):
|
||||||
|
if self.timed_out():
|
||||||
|
logger.warning("Task manager has reached time out while processing approval nodes, exiting loop early")
|
||||||
|
# Do not process any more workflow approval nodes. Stop here.
|
||||||
|
# Maybe we should schedule another TaskManager run
|
||||||
|
return
|
||||||
|
timeout_message = _("The approval node {name} ({pk}) has expired after {timeout} seconds.").format(name=task.name, pk=task.pk, timeout=task.timeout)
|
||||||
|
logger.warning(timeout_message)
|
||||||
|
task.timed_out = True
|
||||||
|
task.status = 'failed'
|
||||||
|
task.send_approval_notification('timed_out')
|
||||||
|
task.websocket_emit_status(task.status)
|
||||||
|
task.job_explanation = timeout_message
|
||||||
|
task.save(update_fields=['status', 'job_explanation', 'timed_out'])
|
||||||
|
|
||||||
|
def get_expired_workflow_approvals(self):
|
||||||
|
# timeout of 0 indicates that it never expires
|
||||||
|
qs = WorkflowApproval.objects.filter(status='pending').exclude(timeout=0).filter(expires__lt=tz_now())
|
||||||
|
return qs
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def _schedule(self):
|
def _schedule(self):
|
||||||
finished_wfjs = []
|
self.get_tasks(dict(status__in=["pending", "waiting", "running"], dependencies_processed=True))
|
||||||
all_sorted_tasks = self.get_tasks()
|
|
||||||
|
|
||||||
self.after_lock_init(all_sorted_tasks)
|
self.after_lock_init()
|
||||||
|
self.reap_jobs_from_orphaned_instances()
|
||||||
|
|
||||||
if len(all_sorted_tasks) > 0:
|
if len(self.all_tasks) > 0:
|
||||||
# TODO: Deal with
|
self.process_tasks()
|
||||||
# latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks)
|
|
||||||
# self.process_latest_project_updates(latest_project_updates)
|
|
||||||
|
|
||||||
# latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks)
|
for workflow_approval in self.get_expired_workflow_approvals():
|
||||||
# self.process_latest_inventory_updates(latest_inventory_updates)
|
self.timeout_approval_node(workflow_approval)
|
||||||
|
|
||||||
self.all_inventory_sources = self.get_inventory_source_tasks(all_sorted_tasks)
|
|
||||||
|
|
||||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
|
||||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
|
||||||
|
|
||||||
previously_running_workflow_tasks = running_workflow_tasks
|
|
||||||
running_workflow_tasks = []
|
|
||||||
for workflow_job in previously_running_workflow_tasks:
|
|
||||||
if workflow_job.status == 'running':
|
|
||||||
running_workflow_tasks.append(workflow_job)
|
|
||||||
else:
|
|
||||||
logger.debug('Removed %s from job spawning consideration.', workflow_job.log_format)
|
|
||||||
|
|
||||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
|
||||||
|
|
||||||
self.timeout_approval_node()
|
|
||||||
self.reap_jobs_from_orphaned_instances()
|
|
||||||
|
|
||||||
self.process_tasks(all_sorted_tasks)
|
|
||||||
return finished_wfjs
|
|
||||||
|
|
||||||
def record_aggregate_metrics(self, *args):
|
|
||||||
if not settings.IS_TESTING():
|
|
||||||
# increment task_manager_schedule_calls regardless if the other
|
|
||||||
# metrics are recorded
|
|
||||||
s_metrics.Metrics(auto_pipe_execute=True).inc("task_manager_schedule_calls", 1)
|
|
||||||
# Only record metrics if the last time recording was more
|
|
||||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
|
||||||
# Prevents a short-duration task manager that runs directly after a
|
|
||||||
# long task manager to override useful metrics.
|
|
||||||
current_time = time.time()
|
|
||||||
time_last_recorded = current_time - self.subsystem_metrics.decode("task_manager_recorded_timestamp")
|
|
||||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
|
||||||
logger.debug(f"recording metrics, last recorded {time_last_recorded} seconds ago")
|
|
||||||
self.subsystem_metrics.set("task_manager_recorded_timestamp", current_time)
|
|
||||||
self.subsystem_metrics.pipe_execute()
|
|
||||||
else:
|
|
||||||
logger.debug(f"skipping recording metrics, last recorded {time_last_recorded} seconds ago")
|
|
||||||
|
|
||||||
def record_aggregate_metrics_and_exit(self, *args):
|
|
||||||
self.record_aggregate_metrics()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
def schedule(self):
|
|
||||||
# Lock
|
|
||||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
|
||||||
with transaction.atomic():
|
|
||||||
if acquired is False:
|
|
||||||
logger.debug("Not running scheduler, another task holds lock")
|
|
||||||
return
|
|
||||||
logger.debug("Starting Scheduler")
|
|
||||||
with task_manager_bulk_reschedule():
|
|
||||||
# if sigterm due to timeout, still record metrics
|
|
||||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
|
||||||
self._schedule()
|
|
||||||
self.record_aggregate_metrics()
|
|
||||||
logger.debug("Finishing Scheduler")
|
|
||||||
|
|||||||
@@ -34,11 +34,13 @@ class TaskManagerInstance:
|
|||||||
|
|
||||||
|
|
||||||
class TaskManagerInstances:
|
class TaskManagerInstances:
|
||||||
def __init__(self, active_tasks, instances=None):
|
def __init__(self, active_tasks, instances=None, instance_fields=('node_type', 'capacity', 'hostname', 'enabled')):
|
||||||
self.instances_by_hostname = dict()
|
self.instances_by_hostname = dict()
|
||||||
if instances is None:
|
if instances is None:
|
||||||
instances = (
|
instances = (
|
||||||
Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop').only('node_type', 'capacity', 'hostname', 'enabled')
|
Instance.objects.filter(hostname__isnull=False, node_state=Instance.States.READY, enabled=True)
|
||||||
|
.exclude(node_type='hop')
|
||||||
|
.only('node_type', 'node_state', 'capacity', 'hostname', 'enabled')
|
||||||
)
|
)
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
||||||
@@ -67,6 +69,7 @@ class TaskManagerInstanceGroups:
|
|||||||
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
||||||
self.instance_groups = dict()
|
self.instance_groups = dict()
|
||||||
self.controlplane_ig = None
|
self.controlplane_ig = None
|
||||||
|
self.pk_ig_map = dict()
|
||||||
|
|
||||||
if instance_groups is not None: # for testing
|
if instance_groups is not None: # for testing
|
||||||
self.instance_groups = instance_groups
|
self.instance_groups = instance_groups
|
||||||
@@ -81,6 +84,7 @@ class TaskManagerInstanceGroups:
|
|||||||
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
self.pk_ig_map[instance_group.pk] = instance_group
|
||||||
|
|
||||||
def get_remaining_capacity(self, group_name):
|
def get_remaining_capacity(self, group_name):
|
||||||
instances = self.instance_groups[group_name]['instances']
|
instances = self.instance_groups[group_name]['instances']
|
||||||
@@ -121,3 +125,17 @@ class TaskManagerInstanceGroups:
|
|||||||
elif i.capacity > largest_instance.capacity:
|
elif i.capacity > largest_instance.capacity:
|
||||||
largest_instance = i
|
largest_instance = i
|
||||||
return largest_instance
|
return largest_instance
|
||||||
|
|
||||||
|
def get_instance_groups_from_task_cache(self, task):
|
||||||
|
igs = []
|
||||||
|
if task.preferred_instance_groups_cache:
|
||||||
|
for pk in task.preferred_instance_groups_cache:
|
||||||
|
ig = self.pk_ig_map.get(pk, None)
|
||||||
|
if ig:
|
||||||
|
igs.append(ig)
|
||||||
|
else:
|
||||||
|
logger.warn(f"Unknown instance group with pk {pk} for task {task}")
|
||||||
|
if len(igs) == 0:
|
||||||
|
logger.warn(f"No instance groups in cache exist, defaulting to global instance groups for task {task}")
|
||||||
|
return task.global_instance_groups
|
||||||
|
return igs
|
||||||
|
|||||||
@@ -1,15 +1,35 @@
|
|||||||
# Python
|
# Python
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
# Django
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.scheduler import TaskManager
|
from awx import MODE
|
||||||
|
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_local_queuename
|
from awx.main.dispatch import get_local_queuename
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.scheduler')
|
logger = logging.getLogger('awx.main.scheduler')
|
||||||
|
|
||||||
|
|
||||||
|
def run_manager(manager, prefix):
|
||||||
|
if MODE == 'development' and settings.AWX_DISABLE_TASK_MANAGERS:
|
||||||
|
logger.debug(f"Not running {prefix} manager, AWX_DISABLE_TASK_MANAGERS is True. Trigger with GET to /api/debug/{prefix}_manager/")
|
||||||
|
return
|
||||||
|
manager().schedule()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
def run_task_manager():
|
def task_manager():
|
||||||
logger.debug("Running task manager.")
|
run_manager(TaskManager, "task")
|
||||||
TaskManager().schedule()
|
|
||||||
|
|
||||||
|
@task(queue=get_local_queuename)
|
||||||
|
def dependency_manager():
|
||||||
|
run_manager(DependencyManager, "dependency")
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_local_queuename)
|
||||||
|
def workflow_manager():
|
||||||
|
run_manager(WorkflowManager, "workflow")
|
||||||
|
|||||||
@@ -6,17 +6,16 @@ import os
|
|||||||
import stat
|
import stat
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.utils.timezone import now
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django_guid import get_guid
|
from django_guid import get_guid
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
|
from django.db import connections
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.redact import UriCleaner
|
from awx.main.redact import UriCleaner
|
||||||
from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
|
from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
|
||||||
from awx.main.utils.update_model import update_model
|
from awx.main.utils.update_model import update_model
|
||||||
from awx.main.queue import CallbackQueueDispatcher
|
from awx.main.queue import CallbackQueueDispatcher
|
||||||
from awx.main.tasks.signals import signal_callback
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.tasks.callback')
|
logger = logging.getLogger('awx.main.tasks.callback')
|
||||||
|
|
||||||
@@ -175,28 +174,6 @@ class RunnerCallback:
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def cancel_callback(self):
|
|
||||||
"""
|
|
||||||
Ansible runner callback to tell the job when/if it is canceled
|
|
||||||
"""
|
|
||||||
unified_job_id = self.instance.pk
|
|
||||||
if signal_callback():
|
|
||||||
return True
|
|
||||||
try:
|
|
||||||
self.instance = self.update_model(unified_job_id)
|
|
||||||
except Exception:
|
|
||||||
logger.exception(f'Encountered error during cancel check for {unified_job_id}, canceling now')
|
|
||||||
return True
|
|
||||||
if not self.instance:
|
|
||||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
|
||||||
return True
|
|
||||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
|
||||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
|
||||||
if cancel_wait > 5:
|
|
||||||
logger.warning('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def finished_callback(self, runner_obj):
|
def finished_callback(self, runner_obj):
|
||||||
"""
|
"""
|
||||||
Ansible runner callback triggered on finished run
|
Ansible runner callback triggered on finished run
|
||||||
@@ -227,6 +204,8 @@ class RunnerCallback:
|
|||||||
|
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||||
|
# We opened a connection just for that save, close it here now
|
||||||
|
connections.close_all()
|
||||||
elif status_data['status'] == 'failed':
|
elif status_data['status'] == 'failed':
|
||||||
# For encrypted ssh_key_data, ansible-runner worker will open and write the
|
# For encrypted ssh_key_data, ansible-runner worker will open and write the
|
||||||
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will
|
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
# Python
|
# Python
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from distutils.dir_util import copy_tree
|
|
||||||
import errno
|
import errno
|
||||||
import functools
|
import functools
|
||||||
import fcntl
|
import fcntl
|
||||||
@@ -15,7 +14,6 @@ import tempfile
|
|||||||
import traceback
|
import traceback
|
||||||
import time
|
import time
|
||||||
import urllib.parse as urlparse
|
import urllib.parse as urlparse
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
@@ -38,6 +36,7 @@ from awx.main.constants import (
|
|||||||
JOB_FOLDER_PREFIX,
|
JOB_FOLDER_PREFIX,
|
||||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||||
|
ACTIVE_STATES,
|
||||||
)
|
)
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
Instance,
|
Instance,
|
||||||
@@ -146,7 +145,7 @@ class BaseTask(object):
|
|||||||
"""
|
"""
|
||||||
Return params structure to be executed by the container runtime
|
Return params structure to be executed by the container runtime
|
||||||
"""
|
"""
|
||||||
if settings.IS_K8S:
|
if settings.IS_K8S and instance.instance_group.is_container_group:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
image = instance.execution_environment.image
|
image = instance.execution_environment.image
|
||||||
@@ -211,14 +210,22 @@ class BaseTask(object):
|
|||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||||
if settings.AWX_CLEANUP_PATHS:
|
if settings.AWX_CLEANUP_PATHS:
|
||||||
self.cleanup_paths.append(path)
|
self.cleanup_paths.append(path)
|
||||||
# Ansible runner requires that project exists,
|
# We will write files in these folders later
|
||||||
# and we will write files in the other folders without pre-creating the folder
|
for subfolder in ('inventory', 'env'):
|
||||||
for subfolder in ('project', 'inventory', 'env'):
|
|
||||||
runner_subfolder = os.path.join(path, subfolder)
|
runner_subfolder = os.path.join(path, subfolder)
|
||||||
if not os.path.exists(runner_subfolder):
|
if not os.path.exists(runner_subfolder):
|
||||||
os.mkdir(runner_subfolder)
|
os.mkdir(runner_subfolder)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
def build_project_dir(self, instance, private_data_dir):
|
||||||
|
"""
|
||||||
|
Create the ansible-runner project subdirectory. In many cases this is the source checkout.
|
||||||
|
In cases that do not even need the source checkout, we create an empty dir to be the workdir.
|
||||||
|
"""
|
||||||
|
project_dir = os.path.join(private_data_dir, 'project')
|
||||||
|
if not os.path.exists(project_dir):
|
||||||
|
os.mkdir(project_dir)
|
||||||
|
|
||||||
def build_private_data_files(self, instance, private_data_dir):
|
def build_private_data_files(self, instance, private_data_dir):
|
||||||
"""
|
"""
|
||||||
Creates temporary files containing the private data.
|
Creates temporary files containing the private data.
|
||||||
@@ -354,12 +361,65 @@ class BaseTask(object):
|
|||||||
expect_passwords[k] = passwords.get(v, '') or ''
|
expect_passwords[k] = passwords.get(v, '') or ''
|
||||||
return expect_passwords
|
return expect_passwords
|
||||||
|
|
||||||
|
def release_lock(self, project):
|
||||||
|
try:
|
||||||
|
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
||||||
|
except IOError as e:
|
||||||
|
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, project.get_lock_file(), e.strerror))
|
||||||
|
os.close(self.lock_fd)
|
||||||
|
raise
|
||||||
|
|
||||||
|
os.close(self.lock_fd)
|
||||||
|
self.lock_fd = None
|
||||||
|
|
||||||
|
def acquire_lock(self, project, unified_job_id=None):
|
||||||
|
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||||
|
os.mkdir(settings.PROJECTS_ROOT)
|
||||||
|
|
||||||
|
lock_path = project.get_lock_file()
|
||||||
|
if lock_path is None:
|
||||||
|
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
||||||
|
project.save()
|
||||||
|
lock_path = project.get_lock_file()
|
||||||
|
if lock_path is None:
|
||||||
|
raise RuntimeError(u'Invalid lock file path')
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
||||||
|
except OSError as e:
|
||||||
|
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||||
|
raise
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
|
break
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
||||||
|
os.close(self.lock_fd)
|
||||||
|
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
time.sleep(1.0)
|
||||||
|
self.instance.refresh_from_db(fields=['cancel_flag'])
|
||||||
|
if self.instance.cancel_flag or signal_callback():
|
||||||
|
logger.debug(f"Unified job {self.instance.id} was canceled while waiting for project file lock")
|
||||||
|
return
|
||||||
|
waiting_time = time.time() - start_time
|
||||||
|
|
||||||
|
if waiting_time > 1.0:
|
||||||
|
logger.info(f'Job {unified_job_id} waited {waiting_time} to acquire lock for local source tree for path {lock_path}.')
|
||||||
|
|
||||||
def pre_run_hook(self, instance, private_data_dir):
|
def pre_run_hook(self, instance, private_data_dir):
|
||||||
"""
|
"""
|
||||||
Hook for any steps to run before the job/task starts
|
Hook for any steps to run before the job/task starts
|
||||||
"""
|
"""
|
||||||
instance.log_lifecycle("pre_run")
|
instance.log_lifecycle("pre_run")
|
||||||
|
|
||||||
|
# Before task is started, ensure that job_event partitions exist
|
||||||
|
create_partition(instance.event_class._meta.db_table, start=instance.created)
|
||||||
|
|
||||||
def post_run_hook(self, instance, status):
|
def post_run_hook(self, instance, status):
|
||||||
"""
|
"""
|
||||||
Hook for any steps to run before job/task is marked as complete.
|
Hook for any steps to run before job/task is marked as complete.
|
||||||
@@ -372,15 +432,9 @@ class BaseTask(object):
|
|||||||
"""
|
"""
|
||||||
instance.log_lifecycle("finalize_run")
|
instance.log_lifecycle("finalize_run")
|
||||||
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
|
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
|
||||||
job_profiling_dir = os.path.join(artifact_dir, 'playbook_profiling')
|
|
||||||
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
|
|
||||||
collections_info = os.path.join(artifact_dir, 'collections.json')
|
collections_info = os.path.join(artifact_dir, 'collections.json')
|
||||||
ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')
|
ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')
|
||||||
|
|
||||||
if not os.path.exists(awx_profiling_dir):
|
|
||||||
os.mkdir(awx_profiling_dir)
|
|
||||||
if os.path.isdir(job_profiling_dir):
|
|
||||||
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
|
|
||||||
if os.path.exists(collections_info):
|
if os.path.exists(collections_info):
|
||||||
with open(collections_info) as ee_json_info:
|
with open(collections_info) as ee_json_info:
|
||||||
ee_collections_info = json.loads(ee_json_info.read())
|
ee_collections_info = json.loads(ee_json_info.read())
|
||||||
@@ -399,6 +453,11 @@ class BaseTask(object):
|
|||||||
Run the job/task and capture its output.
|
Run the job/task and capture its output.
|
||||||
"""
|
"""
|
||||||
self.instance = self.model.objects.get(pk=pk)
|
self.instance = self.model.objects.get(pk=pk)
|
||||||
|
if self.instance.status != 'canceled' and self.instance.cancel_flag:
|
||||||
|
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
|
||||||
|
if self.instance.status not in ACTIVE_STATES:
|
||||||
|
# Prevent starting the job if it has been reaped or handled by another process.
|
||||||
|
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
|
||||||
|
|
||||||
if self.instance.execution_environment_id is None:
|
if self.instance.execution_environment_id is None:
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
@@ -424,9 +483,11 @@ class BaseTask(object):
|
|||||||
self.instance.send_notification_templates("running")
|
self.instance.send_notification_templates("running")
|
||||||
private_data_dir = self.build_private_data_dir(self.instance)
|
private_data_dir = self.build_private_data_dir(self.instance)
|
||||||
self.pre_run_hook(self.instance, private_data_dir)
|
self.pre_run_hook(self.instance, private_data_dir)
|
||||||
|
self.build_project_dir(self.instance, private_data_dir)
|
||||||
self.instance.log_lifecycle("preparing_playbook")
|
self.instance.log_lifecycle("preparing_playbook")
|
||||||
if self.instance.cancel_flag or signal_callback():
|
if self.instance.cancel_flag or signal_callback():
|
||||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||||
|
|
||||||
if self.instance.status != 'running':
|
if self.instance.status != 'running':
|
||||||
# Stop the task chain and prevent starting the job if it has
|
# Stop the task chain and prevent starting the job if it has
|
||||||
# already been canceled.
|
# already been canceled.
|
||||||
@@ -529,7 +590,7 @@ class BaseTask(object):
|
|||||||
event_handler=self.runner_callback.event_handler,
|
event_handler=self.runner_callback.event_handler,
|
||||||
finished_callback=self.runner_callback.finished_callback,
|
finished_callback=self.runner_callback.finished_callback,
|
||||||
status_handler=self.runner_callback.status_handler,
|
status_handler=self.runner_callback.status_handler,
|
||||||
cancel_callback=self.runner_callback.cancel_callback,
|
cancel_callback=signal_callback,
|
||||||
**params,
|
**params,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@@ -549,8 +610,12 @@ class BaseTask(object):
|
|||||||
status = 'failed'
|
status = 'failed'
|
||||||
elif status == 'canceled':
|
elif status == 'canceled':
|
||||||
self.instance = self.update_model(pk)
|
self.instance = self.update_model(pk)
|
||||||
if (getattr(self.instance, 'cancel_flag', False) is False) and signal_callback():
|
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
|
||||||
self.runner_callback.delay_update(job_explanation="Task was canceled due to receiving a shutdown signal.")
|
if (cancel_flag_value is False) and signal_callback():
|
||||||
|
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||||
|
status = 'failed'
|
||||||
|
elif cancel_flag_value is False:
|
||||||
|
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
|
||||||
status = 'failed'
|
status = 'failed'
|
||||||
except ReceptorNodeNotFound as exc:
|
except ReceptorNodeNotFound as exc:
|
||||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||||
@@ -593,8 +658,141 @@ class BaseTask(object):
|
|||||||
raise AwxTaskError.TaskError(self.instance, rc)
|
raise AwxTaskError.TaskError(self.instance, rc)
|
||||||
|
|
||||||
|
|
||||||
|
class SourceControlMixin(BaseTask):
|
||||||
|
"""Utility methods for tasks that run use content from source control"""
|
||||||
|
|
||||||
|
def get_sync_needs(self, project, scm_branch=None):
|
||||||
|
project_path = project.get_project_path(check_if_exists=False)
|
||||||
|
job_revision = project.scm_revision
|
||||||
|
sync_needs = []
|
||||||
|
source_update_tag = 'update_{}'.format(project.scm_type)
|
||||||
|
branch_override = bool(scm_branch and scm_branch != project.scm_branch)
|
||||||
|
# TODO: skip syncs for inventory updates. Now, UI needs a link added so clients can link to project
|
||||||
|
# source_project is only a field on inventory sources.
|
||||||
|
if isinstance(self.instance, InventoryUpdate):
|
||||||
|
sync_needs.append(source_update_tag)
|
||||||
|
elif not project.scm_type:
|
||||||
|
pass # manual projects are not synced, user has responsibility for that
|
||||||
|
elif not os.path.exists(project_path):
|
||||||
|
logger.debug(f'Performing fresh clone of {project.id} for unified job {self.instance.id} on this instance.')
|
||||||
|
sync_needs.append(source_update_tag)
|
||||||
|
elif project.scm_type == 'git' and project.scm_revision and (not branch_override):
|
||||||
|
try:
|
||||||
|
git_repo = git.Repo(project_path)
|
||||||
|
|
||||||
|
if job_revision == git_repo.head.commit.hexsha:
|
||||||
|
logger.debug(f'Skipping project sync for {self.instance.id} because commit is locally available')
|
||||||
|
else:
|
||||||
|
sync_needs.append(source_update_tag)
|
||||||
|
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
|
||||||
|
logger.debug(f'Needed commit for {self.instance.id} not in local source tree, will sync with remote')
|
||||||
|
sync_needs.append(source_update_tag)
|
||||||
|
else:
|
||||||
|
logger.debug(f'Project not available locally, {self.instance.id} will sync with remote')
|
||||||
|
sync_needs.append(source_update_tag)
|
||||||
|
|
||||||
|
has_cache = os.path.exists(os.path.join(project.get_cache_path(), project.cache_id))
|
||||||
|
# Galaxy requirements are not supported for manual projects
|
||||||
|
if project.scm_type and ((not has_cache) or branch_override):
|
||||||
|
sync_needs.extend(['install_roles', 'install_collections'])
|
||||||
|
|
||||||
|
return sync_needs
|
||||||
|
|
||||||
|
def spawn_project_sync(self, project, sync_needs, scm_branch=None):
|
||||||
|
pu_ig = self.instance.instance_group
|
||||||
|
pu_en = Instance.objects.me().hostname
|
||||||
|
|
||||||
|
sync_metafields = dict(
|
||||||
|
launch_type="sync",
|
||||||
|
job_type='run',
|
||||||
|
job_tags=','.join(sync_needs),
|
||||||
|
status='running',
|
||||||
|
instance_group=pu_ig,
|
||||||
|
execution_node=pu_en,
|
||||||
|
controller_node=pu_en,
|
||||||
|
celery_task_id=self.instance.celery_task_id,
|
||||||
|
)
|
||||||
|
if scm_branch and scm_branch != project.scm_branch:
|
||||||
|
sync_metafields['scm_branch'] = scm_branch
|
||||||
|
sync_metafields['scm_clean'] = True # to accomidate force pushes
|
||||||
|
if 'update_' not in sync_metafields['job_tags']:
|
||||||
|
sync_metafields['scm_revision'] = project.scm_revision
|
||||||
|
local_project_sync = project.create_project_update(_eager_fields=sync_metafields)
|
||||||
|
local_project_sync.log_lifecycle("controller_node_chosen")
|
||||||
|
local_project_sync.log_lifecycle("execution_node_chosen")
|
||||||
|
return local_project_sync
|
||||||
|
|
||||||
|
def sync_and_copy_without_lock(self, project, private_data_dir, scm_branch=None):
|
||||||
|
sync_needs = self.get_sync_needs(project, scm_branch=scm_branch)
|
||||||
|
|
||||||
|
if sync_needs:
|
||||||
|
local_project_sync = self.spawn_project_sync(project, sync_needs, scm_branch=scm_branch)
|
||||||
|
# save the associated job before calling run() so that a
|
||||||
|
# cancel() call on the job can cancel the project update
|
||||||
|
if isinstance(self.instance, Job):
|
||||||
|
self.instance = self.update_model(self.instance.pk, project_update=local_project_sync)
|
||||||
|
else:
|
||||||
|
self.instance = self.update_model(self.instance.pk, source_project_update=local_project_sync)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# the job private_data_dir is passed so sync can download roles and collections there
|
||||||
|
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
|
||||||
|
sync_task.run(local_project_sync.id)
|
||||||
|
local_project_sync.refresh_from_db()
|
||||||
|
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
|
||||||
|
except Exception:
|
||||||
|
local_project_sync.refresh_from_db()
|
||||||
|
if local_project_sync.status != 'canceled':
|
||||||
|
self.instance = self.update_model(
|
||||||
|
self.instance.pk,
|
||||||
|
status='failed',
|
||||||
|
job_explanation=(
|
||||||
|
'Previous Task Failed: {"job_type": "project_update", '
|
||||||
|
f'"job_name": "{local_project_sync.name}", "job_id": "{local_project_sync.id}"}}'
|
||||||
|
),
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
self.instance.refresh_from_db()
|
||||||
|
if self.instance.cancel_flag:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
# Case where a local sync is not needed, meaning that local tree is
|
||||||
|
# up-to-date with project, job is running project current version
|
||||||
|
self.instance = self.update_model(self.instance.pk, scm_revision=project.scm_revision)
|
||||||
|
# Project update does not copy the folder, so copy here
|
||||||
|
RunProjectUpdate.make_local_copy(project, private_data_dir)
|
||||||
|
|
||||||
|
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
|
||||||
|
self.acquire_lock(project, self.instance.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
original_branch = None
|
||||||
|
project_path = project.get_project_path(check_if_exists=False)
|
||||||
|
if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):
|
||||||
|
if os.path.exists(project_path):
|
||||||
|
git_repo = git.Repo(project_path)
|
||||||
|
if git_repo.head.is_detached:
|
||||||
|
original_branch = git_repo.head.commit
|
||||||
|
else:
|
||||||
|
original_branch = git_repo.active_branch
|
||||||
|
|
||||||
|
return self.sync_and_copy_without_lock(project, private_data_dir, scm_branch=scm_branch)
|
||||||
|
finally:
|
||||||
|
# We have made the copy so we can set the tree back to its normal state
|
||||||
|
if original_branch:
|
||||||
|
# for git project syncs, non-default branches can be problems
|
||||||
|
# restore to branch the repo was on before this run
|
||||||
|
try:
|
||||||
|
original_branch.checkout()
|
||||||
|
except Exception:
|
||||||
|
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||||
|
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
|
||||||
|
|
||||||
|
self.release_lock(project)
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
class RunJob(BaseTask):
|
class RunJob(SourceControlMixin, BaseTask):
|
||||||
"""
|
"""
|
||||||
Run a job using ansible-playbook.
|
Run a job using ansible-playbook.
|
||||||
"""
|
"""
|
||||||
@@ -863,98 +1061,14 @@ class RunJob(BaseTask):
|
|||||||
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
||||||
raise RuntimeError(msg)
|
raise RuntimeError(msg)
|
||||||
|
|
||||||
project_path = job.project.get_project_path(check_if_exists=False)
|
|
||||||
job_revision = job.project.scm_revision
|
|
||||||
sync_needs = []
|
|
||||||
source_update_tag = 'update_{}'.format(job.project.scm_type)
|
|
||||||
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
|
|
||||||
if not job.project.scm_type:
|
|
||||||
pass # manual projects are not synced, user has responsibility for that
|
|
||||||
elif not os.path.exists(project_path):
|
|
||||||
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
|
|
||||||
sync_needs.append(source_update_tag)
|
|
||||||
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
|
|
||||||
try:
|
|
||||||
git_repo = git.Repo(project_path)
|
|
||||||
|
|
||||||
if job_revision == git_repo.head.commit.hexsha:
|
|
||||||
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
|
||||||
else:
|
|
||||||
sync_needs.append(source_update_tag)
|
|
||||||
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
|
|
||||||
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
|
|
||||||
sync_needs.append(source_update_tag)
|
|
||||||
else:
|
|
||||||
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
|
|
||||||
sync_needs.append(source_update_tag)
|
|
||||||
|
|
||||||
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
|
|
||||||
# Galaxy requirements are not supported for manual projects
|
|
||||||
if job.project.scm_type and ((not has_cache) or branch_override):
|
|
||||||
sync_needs.extend(['install_roles', 'install_collections'])
|
|
||||||
|
|
||||||
if sync_needs:
|
|
||||||
pu_ig = job.instance_group
|
|
||||||
pu_en = Instance.objects.me().hostname
|
|
||||||
|
|
||||||
sync_metafields = dict(
|
|
||||||
launch_type="sync",
|
|
||||||
job_type='run',
|
|
||||||
job_tags=','.join(sync_needs),
|
|
||||||
status='running',
|
|
||||||
instance_group=pu_ig,
|
|
||||||
execution_node=pu_en,
|
|
||||||
controller_node=pu_en,
|
|
||||||
celery_task_id=job.celery_task_id,
|
|
||||||
)
|
|
||||||
if branch_override:
|
|
||||||
sync_metafields['scm_branch'] = job.scm_branch
|
|
||||||
sync_metafields['scm_clean'] = True # to accomidate force pushes
|
|
||||||
if 'update_' not in sync_metafields['job_tags']:
|
|
||||||
sync_metafields['scm_revision'] = job_revision
|
|
||||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
|
||||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
|
||||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
|
||||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
|
||||||
# save the associated job before calling run() so that a
|
|
||||||
# cancel() call on the job can cancel the project update
|
|
||||||
job = self.update_model(job.pk, project_update=local_project_sync)
|
|
||||||
|
|
||||||
project_update_task = local_project_sync._get_task_class()
|
|
||||||
try:
|
|
||||||
# the job private_data_dir is passed so sync can download roles and collections there
|
|
||||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
|
||||||
sync_task.run(local_project_sync.id)
|
|
||||||
local_project_sync.refresh_from_db()
|
|
||||||
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
|
|
||||||
except Exception:
|
|
||||||
local_project_sync.refresh_from_db()
|
|
||||||
if local_project_sync.status != 'canceled':
|
|
||||||
job = self.update_model(
|
|
||||||
job.pk,
|
|
||||||
status='failed',
|
|
||||||
job_explanation=(
|
|
||||||
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
|
|
||||||
% ('project_update', local_project_sync.name, local_project_sync.id)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
job.refresh_from_db()
|
|
||||||
if job.cancel_flag:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
# Case where a local sync is not needed, meaning that local tree is
|
|
||||||
# up-to-date with project, job is running project current version
|
|
||||||
if job_revision:
|
|
||||||
job = self.update_model(job.pk, scm_revision=job_revision)
|
|
||||||
# Project update does not copy the folder, so copy here
|
|
||||||
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
|
|
||||||
|
|
||||||
if job.inventory.kind == 'smart':
|
if job.inventory.kind == 'smart':
|
||||||
# cache smart inventory memberships so that the host_filter query is not
|
# cache smart inventory memberships so that the host_filter query is not
|
||||||
# ran inside of the event saving code
|
# ran inside of the event saving code
|
||||||
update_smart_memberships_for_inventory(job.inventory)
|
update_smart_memberships_for_inventory(job.inventory)
|
||||||
|
|
||||||
|
def build_project_dir(self, job, private_data_dir):
|
||||||
|
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||||
|
|
||||||
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
|
||||||
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
|
||||||
if not private_data_dir:
|
if not private_data_dir:
|
||||||
@@ -986,7 +1100,6 @@ class RunProjectUpdate(BaseTask):
|
|||||||
|
|
||||||
def __init__(self, *args, job_private_data_dir=None, **kwargs):
|
def __init__(self, *args, job_private_data_dir=None, **kwargs):
|
||||||
super(RunProjectUpdate, self).__init__(*args, **kwargs)
|
super(RunProjectUpdate, self).__init__(*args, **kwargs)
|
||||||
self.original_branch = None
|
|
||||||
self.job_private_data_dir = job_private_data_dir
|
self.job_private_data_dir = job_private_data_dir
|
||||||
|
|
||||||
def build_private_data(self, project_update, private_data_dir):
|
def build_private_data(self, project_update, private_data_dir):
|
||||||
@@ -1156,6 +1269,10 @@ class RunProjectUpdate(BaseTask):
|
|||||||
# for raw archive, prevent error moving files between volumes
|
# for raw archive, prevent error moving files between volumes
|
||||||
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
|
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
|
||||||
|
|
||||||
|
if project_update.project.signature_validation_credential is not None:
|
||||||
|
pubkey = project_update.project.signature_validation_credential.get_input('gpg_public_key')
|
||||||
|
extra_vars['gpg_pubkey'] = pubkey
|
||||||
|
|
||||||
self._write_extra_vars_file(private_data_dir, extra_vars)
|
self._write_extra_vars_file(private_data_dir, extra_vars)
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
|
||||||
@@ -1173,74 +1290,13 @@ class RunProjectUpdate(BaseTask):
|
|||||||
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
|
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def release_lock(self, instance):
|
|
||||||
try:
|
|
||||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
|
||||||
except IOError as e:
|
|
||||||
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
|
|
||||||
os.close(self.lock_fd)
|
|
||||||
raise
|
|
||||||
|
|
||||||
os.close(self.lock_fd)
|
|
||||||
self.lock_fd = None
|
|
||||||
|
|
||||||
'''
|
|
||||||
Note: We don't support blocking=False
|
|
||||||
'''
|
|
||||||
|
|
||||||
def acquire_lock(self, instance, blocking=True):
|
|
||||||
lock_path = instance.get_lock_file()
|
|
||||||
if lock_path is None:
|
|
||||||
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
|
||||||
instance.save()
|
|
||||||
lock_path = instance.get_lock_file()
|
|
||||||
if lock_path is None:
|
|
||||||
raise RuntimeError(u'Invalid lock file path')
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
|
||||||
except OSError as e:
|
|
||||||
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
|
||||||
raise
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
instance.refresh_from_db(fields=['cancel_flag'])
|
|
||||||
if instance.cancel_flag:
|
|
||||||
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
|
|
||||||
return
|
|
||||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
||||||
break
|
|
||||||
except IOError as e:
|
|
||||||
if e.errno not in (errno.EAGAIN, errno.EACCES):
|
|
||||||
os.close(self.lock_fd)
|
|
||||||
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
time.sleep(1.0)
|
|
||||||
waiting_time = time.time() - start_time
|
|
||||||
|
|
||||||
if waiting_time > 1.0:
|
|
||||||
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
|
|
||||||
|
|
||||||
def pre_run_hook(self, instance, private_data_dir):
|
def pre_run_hook(self, instance, private_data_dir):
|
||||||
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
|
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
|
||||||
# re-create root project folder if a natural disaster has destroyed it
|
# re-create root project folder if a natural disaster has destroyed it
|
||||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
|
||||||
os.mkdir(settings.PROJECTS_ROOT)
|
|
||||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||||
|
|
||||||
self.acquire_lock(instance)
|
if instance.launch_type != 'sync':
|
||||||
|
self.acquire_lock(instance.project, instance.id)
|
||||||
self.original_branch = None
|
|
||||||
if instance.scm_type == 'git' and instance.branch_override:
|
|
||||||
if os.path.exists(project_path):
|
|
||||||
git_repo = git.Repo(project_path)
|
|
||||||
if git_repo.head.is_detached:
|
|
||||||
self.original_branch = git_repo.head.commit
|
|
||||||
else:
|
|
||||||
self.original_branch = git_repo.active_branch
|
|
||||||
|
|
||||||
if not os.path.exists(project_path):
|
if not os.path.exists(project_path):
|
||||||
os.makedirs(project_path) # used as container mount
|
os.makedirs(project_path) # used as container mount
|
||||||
@@ -1251,11 +1307,12 @@ class RunProjectUpdate(BaseTask):
|
|||||||
shutil.rmtree(stage_path)
|
shutil.rmtree(stage_path)
|
||||||
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
|
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
|
||||||
|
|
||||||
|
def build_project_dir(self, instance, private_data_dir):
|
||||||
# the project update playbook is not in a git repo, but uses a vendoring directory
|
# the project update playbook is not in a git repo, but uses a vendoring directory
|
||||||
# to be consistent with the ansible-runner model,
|
# to be consistent with the ansible-runner model,
|
||||||
# that is moved into the runner project folder here
|
# that is moved into the runner project folder here
|
||||||
awx_playbooks = self.get_path_to('../../', 'playbooks')
|
awx_playbooks = self.get_path_to('../../', 'playbooks')
|
||||||
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
|
shutil.copytree(awx_playbooks, os.path.join(private_data_dir, 'project'))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def clear_project_cache(cache_dir, keep_value):
|
def clear_project_cache(cache_dir, keep_value):
|
||||||
@@ -1272,50 +1329,18 @@ class RunProjectUpdate(BaseTask):
|
|||||||
logger.warning(f"Could not remove cache directory {old_path}")
|
logger.warning(f"Could not remove cache directory {old_path}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_local_copy(p, job_private_data_dir, scm_revision=None):
|
def make_local_copy(project, job_private_data_dir):
|
||||||
"""Copy project content (roles and collections) to a job private_data_dir
|
"""Copy project content (roles and collections) to a job private_data_dir
|
||||||
|
|
||||||
:param object p: Either a project or a project update
|
:param object project: Either a project or a project update
|
||||||
:param str job_private_data_dir: The root of the target ansible-runner folder
|
:param str job_private_data_dir: The root of the target ansible-runner folder
|
||||||
:param str scm_revision: For branch_override cases, the git revision to copy
|
|
||||||
"""
|
"""
|
||||||
project_path = p.get_project_path(check_if_exists=False)
|
project_path = project.get_project_path(check_if_exists=False)
|
||||||
destination_folder = os.path.join(job_private_data_dir, 'project')
|
destination_folder = os.path.join(job_private_data_dir, 'project')
|
||||||
if not scm_revision:
|
shutil.copytree(project_path, destination_folder, ignore=shutil.ignore_patterns('.git'), symlinks=True)
|
||||||
scm_revision = p.scm_revision
|
|
||||||
|
|
||||||
if p.scm_type == 'git':
|
|
||||||
git_repo = git.Repo(project_path)
|
|
||||||
if not os.path.exists(destination_folder):
|
|
||||||
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
|
|
||||||
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
|
|
||||||
# always clone based on specific job revision
|
|
||||||
if not p.scm_revision:
|
|
||||||
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
|
|
||||||
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
|
|
||||||
# git clone must take file:// syntax for source repo or else options like depth will be ignored
|
|
||||||
source_as_uri = Path(project_path).as_uri()
|
|
||||||
git.Repo.clone_from(
|
|
||||||
source_as_uri,
|
|
||||||
destination_folder,
|
|
||||||
branch=source_branch,
|
|
||||||
depth=1,
|
|
||||||
single_branch=True, # shallow, do not copy full history
|
|
||||||
)
|
|
||||||
# submodules copied in loop because shallow copies from local HEADs are ideal
|
|
||||||
# and no git clone submodule options are compatible with minimum requirements
|
|
||||||
for submodule in git_repo.submodules:
|
|
||||||
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
|
|
||||||
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
|
|
||||||
subrepo_uri = Path(subrepo_path).as_uri()
|
|
||||||
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
|
|
||||||
# force option is necessary because remote refs are not counted, although no information is lost
|
|
||||||
git_repo.delete_head(tmp_branch_name, force=True)
|
|
||||||
else:
|
|
||||||
copy_tree(project_path, destination_folder, preserve_symlinks=1)
|
|
||||||
|
|
||||||
# copy over the roles and collection cache to job folder
|
# copy over the roles and collection cache to job folder
|
||||||
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
|
cache_path = os.path.join(project.get_cache_path(), project.cache_id)
|
||||||
subfolders = []
|
subfolders = []
|
||||||
if settings.AWX_COLLECTIONS_ENABLED:
|
if settings.AWX_COLLECTIONS_ENABLED:
|
||||||
subfolders.append('requirements_collections')
|
subfolders.append('requirements_collections')
|
||||||
@@ -1325,8 +1350,8 @@ class RunProjectUpdate(BaseTask):
|
|||||||
cache_subpath = os.path.join(cache_path, subfolder)
|
cache_subpath = os.path.join(cache_path, subfolder)
|
||||||
if os.path.exists(cache_subpath):
|
if os.path.exists(cache_subpath):
|
||||||
dest_subpath = os.path.join(job_private_data_dir, subfolder)
|
dest_subpath = os.path.join(job_private_data_dir, subfolder)
|
||||||
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
|
shutil.copytree(cache_subpath, dest_subpath, symlinks=True)
|
||||||
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
|
logger.debug('{0} {1} prepared {2} from cache'.format(type(project).__name__, project.pk, dest_subpath))
|
||||||
|
|
||||||
def post_run_hook(self, instance, status):
|
def post_run_hook(self, instance, status):
|
||||||
super(RunProjectUpdate, self).post_run_hook(instance, status)
|
super(RunProjectUpdate, self).post_run_hook(instance, status)
|
||||||
@@ -1356,23 +1381,13 @@ class RunProjectUpdate(BaseTask):
|
|||||||
if self.job_private_data_dir:
|
if self.job_private_data_dir:
|
||||||
if status == 'successful':
|
if status == 'successful':
|
||||||
# copy project folder before resetting to default branch
|
# copy project folder before resetting to default branch
|
||||||
# because some git-tree-specific resources (like submodules) might matter
|
|
||||||
self.make_local_copy(instance, self.job_private_data_dir)
|
self.make_local_copy(instance, self.job_private_data_dir)
|
||||||
if self.original_branch:
|
|
||||||
# for git project syncs, non-default branches can be problems
|
|
||||||
# restore to branch the repo was on before this run
|
|
||||||
try:
|
|
||||||
self.original_branch.checkout()
|
|
||||||
except Exception:
|
|
||||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
|
||||||
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
|
|
||||||
finally:
|
finally:
|
||||||
self.release_lock(instance)
|
if instance.launch_type != 'sync':
|
||||||
|
self.release_lock(instance.project)
|
||||||
|
|
||||||
p = instance.project
|
p = instance.project
|
||||||
if instance.job_type == 'check' and status not in (
|
if instance.job_type == 'check' and status not in ('failed', 'canceled'):
|
||||||
'failed',
|
|
||||||
'canceled',
|
|
||||||
):
|
|
||||||
if self.runner_callback.playbook_new_revision:
|
if self.runner_callback.playbook_new_revision:
|
||||||
p.scm_revision = self.runner_callback.playbook_new_revision
|
p.scm_revision = self.runner_callback.playbook_new_revision
|
||||||
else:
|
else:
|
||||||
@@ -1400,7 +1415,7 @@ class RunProjectUpdate(BaseTask):
|
|||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
class RunInventoryUpdate(BaseTask):
|
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||||
|
|
||||||
model = InventoryUpdate
|
model = InventoryUpdate
|
||||||
event_model = InventoryUpdateEvent
|
event_model = InventoryUpdateEvent
|
||||||
@@ -1556,54 +1571,18 @@ class RunInventoryUpdate(BaseTask):
|
|||||||
# All credentials not used by inventory source injector
|
# All credentials not used by inventory source injector
|
||||||
return inventory_update.get_extra_credentials()
|
return inventory_update.get_extra_credentials()
|
||||||
|
|
||||||
def pre_run_hook(self, inventory_update, private_data_dir):
|
def build_project_dir(self, inventory_update, private_data_dir):
|
||||||
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
|
|
||||||
source_project = None
|
source_project = None
|
||||||
if inventory_update.inventory_source:
|
if inventory_update.inventory_source:
|
||||||
source_project = inventory_update.inventory_source.source_project
|
source_project = inventory_update.inventory_source.source_project
|
||||||
if inventory_update.source == 'scm' and source_project and source_project.scm_type: # never ever update manual projects
|
|
||||||
|
|
||||||
# Check if the content cache exists, so that we do not unnecessarily re-download roles
|
if inventory_update.source == 'scm':
|
||||||
sync_needs = ['update_{}'.format(source_project.scm_type)]
|
if not source_project:
|
||||||
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
|
raise RuntimeError('Could not find project to run SCM inventory update from.')
|
||||||
# Galaxy requirements are not supported for manual projects
|
self.sync_and_copy(source_project, private_data_dir)
|
||||||
if not has_cache:
|
else:
|
||||||
sync_needs.extend(['install_roles', 'install_collections'])
|
# If source is not SCM make an empty project directory, content is built inside inventory folder
|
||||||
|
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)
|
||||||
local_project_sync = source_project.create_project_update(
|
|
||||||
_eager_fields=dict(
|
|
||||||
launch_type="sync",
|
|
||||||
job_type='run',
|
|
||||||
job_tags=','.join(sync_needs),
|
|
||||||
status='running',
|
|
||||||
execution_node=Instance.objects.me().hostname,
|
|
||||||
controller_node=Instance.objects.me().hostname,
|
|
||||||
instance_group=inventory_update.instance_group,
|
|
||||||
celery_task_id=inventory_update.celery_task_id,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
local_project_sync.log_lifecycle("controller_node_chosen")
|
|
||||||
local_project_sync.log_lifecycle("execution_node_chosen")
|
|
||||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
|
||||||
# associate the inventory update before calling run() so that a
|
|
||||||
# cancel() call on the inventory update can cancel the project update
|
|
||||||
local_project_sync.scm_inventory_updates.add(inventory_update)
|
|
||||||
|
|
||||||
project_update_task = local_project_sync._get_task_class()
|
|
||||||
try:
|
|
||||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
|
||||||
sync_task.run(local_project_sync.id)
|
|
||||||
local_project_sync.refresh_from_db()
|
|
||||||
except Exception:
|
|
||||||
inventory_update = self.update_model(
|
|
||||||
inventory_update.pk,
|
|
||||||
status='failed',
|
|
||||||
job_explanation=(
|
|
||||||
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
|
|
||||||
% ('project_update', local_project_sync.name, local_project_sync.id)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def post_run_hook(self, inventory_update, status):
|
def post_run_hook(self, inventory_update, status):
|
||||||
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
|
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
|
||||||
@@ -1646,7 +1625,7 @@ class RunInventoryUpdate(BaseTask):
|
|||||||
|
|
||||||
handler = SpecialInventoryHandler(
|
handler = SpecialInventoryHandler(
|
||||||
self.runner_callback.event_handler,
|
self.runner_callback.event_handler,
|
||||||
self.runner_callback.cancel_callback,
|
signal_callback,
|
||||||
verbosity=inventory_update.verbosity,
|
verbosity=inventory_update.verbosity,
|
||||||
job_timeout=self.get_instance_timeout(self.instance),
|
job_timeout=self.get_instance_timeout(self.instance),
|
||||||
start_time=inventory_update.started,
|
start_time=inventory_update.started,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import yaml
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from django.db import connections
|
||||||
|
|
||||||
# Runner
|
# Runner
|
||||||
import ansible_runner
|
import ansible_runner
|
||||||
@@ -25,12 +26,19 @@ from awx.main.utils.common import (
|
|||||||
cleanup_new_process,
|
cleanup_new_process,
|
||||||
)
|
)
|
||||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||||
|
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||||
|
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||||
|
from awx.main.dispatch import get_local_queuename
|
||||||
|
from awx.main.dispatch.publish import task
|
||||||
|
|
||||||
# Receptorctl
|
# Receptorctl
|
||||||
from receptorctl.socket_interface import ReceptorControl
|
from receptorctl.socket_interface import ReceptorControl
|
||||||
|
|
||||||
|
from filelock import FileLock
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.tasks.receptor')
|
logger = logging.getLogger('awx.main.tasks.receptor')
|
||||||
__RECEPTOR_CONF = '/etc/receptor/receptor.conf'
|
__RECEPTOR_CONF = '/etc/receptor/receptor.conf'
|
||||||
|
__RECEPTOR_CONF_LOCKFILE = f'{__RECEPTOR_CONF}.lock'
|
||||||
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
|
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
|
||||||
|
|
||||||
|
|
||||||
@@ -40,9 +48,22 @@ class ReceptorConnectionType(Enum):
|
|||||||
STREAMTLS = 2
|
STREAMTLS = 2
|
||||||
|
|
||||||
|
|
||||||
|
def read_receptor_config():
|
||||||
|
# for K8S deployments, getting a lock is necessary as another process
|
||||||
|
# may be re-writing the config at this time
|
||||||
|
if settings.IS_K8S:
|
||||||
|
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||||
|
with lock:
|
||||||
|
with open(__RECEPTOR_CONF, 'r') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
else:
|
||||||
|
with open(__RECEPTOR_CONF, 'r') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
|
||||||
def get_receptor_sockfile():
|
def get_receptor_sockfile():
|
||||||
with open(__RECEPTOR_CONF, 'r') as f:
|
data = read_receptor_config()
|
||||||
data = yaml.safe_load(f)
|
|
||||||
for section in data:
|
for section in data:
|
||||||
for entry_name, entry_data in section.items():
|
for entry_name, entry_data in section.items():
|
||||||
if entry_name == 'control-service':
|
if entry_name == 'control-service':
|
||||||
@@ -58,8 +79,7 @@ def get_tls_client(use_stream_tls=None):
|
|||||||
if not use_stream_tls:
|
if not use_stream_tls:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
with open(__RECEPTOR_CONF, 'r') as f:
|
data = read_receptor_config()
|
||||||
data = yaml.safe_load(f)
|
|
||||||
for section in data:
|
for section in data:
|
||||||
for entry_name, entry_data in section.items():
|
for entry_name, entry_data in section.items():
|
||||||
if entry_name == 'tls-client':
|
if entry_name == 'tls-client':
|
||||||
@@ -76,12 +96,25 @@ def get_receptor_ctl():
|
|||||||
return ReceptorControl(receptor_sockfile)
|
return ReceptorControl(receptor_sockfile)
|
||||||
|
|
||||||
|
|
||||||
|
def find_node_in_mesh(node_name, receptor_ctl):
|
||||||
|
attempts = 10
|
||||||
|
backoff = 1
|
||||||
|
for attempt in range(attempts):
|
||||||
|
all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None)
|
||||||
|
for node in all_nodes:
|
||||||
|
if node.get('NodeID') == node_name:
|
||||||
|
return node
|
||||||
|
else:
|
||||||
|
logger.warning(f"Instance {node_name} is not in the receptor mesh. {attempts-attempt} attempts left.")
|
||||||
|
time.sleep(backoff)
|
||||||
|
backoff += 1
|
||||||
|
else:
|
||||||
|
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
|
||||||
|
|
||||||
|
|
||||||
def get_conn_type(node_name, receptor_ctl):
|
def get_conn_type(node_name, receptor_ctl):
|
||||||
all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None)
|
node = find_node_in_mesh(node_name, receptor_ctl)
|
||||||
for node in all_nodes:
|
return ReceptorConnectionType(node.get('ConnType'))
|
||||||
if node.get('NodeID') == node_name:
|
|
||||||
return ReceptorConnectionType(node.get('ConnType'))
|
|
||||||
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
|
|
||||||
|
|
||||||
|
|
||||||
def administrative_workunit_reaper(work_list=None):
|
def administrative_workunit_reaper(work_list=None):
|
||||||
@@ -99,16 +132,22 @@ def administrative_workunit_reaper(work_list=None):
|
|||||||
|
|
||||||
for unit_id, work_data in work_list.items():
|
for unit_id, work_data in work_list.items():
|
||||||
extra_data = work_data.get('ExtraData')
|
extra_data = work_data.get('ExtraData')
|
||||||
if (extra_data is None) or (extra_data.get('RemoteWorkType') != 'ansible-runner'):
|
if extra_data is None:
|
||||||
continue # if this is not ansible-runner work, we do not want to touch it
|
continue # if this is not ansible-runner work, we do not want to touch it
|
||||||
params = extra_data.get('RemoteParams', {}).get('params')
|
if isinstance(extra_data, str):
|
||||||
if not params:
|
if not work_data.get('StateName', None) or work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||||
continue
|
continue
|
||||||
if not (params == '--worker-info' or params.startswith('cleanup')):
|
else:
|
||||||
continue # if this is not a cleanup or health check, we do not want to touch it
|
if extra_data.get('RemoteWorkType') != 'ansible-runner':
|
||||||
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
continue
|
||||||
continue # do not want to touch active work units
|
params = extra_data.get('RemoteParams', {}).get('params')
|
||||||
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
if not params:
|
||||||
|
continue
|
||||||
|
if not (params == '--worker-info' or params.startswith('cleanup')):
|
||||||
|
continue # if this is not a cleanup or health check, we do not want to touch it
|
||||||
|
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||||
|
continue # do not want to touch active work units
|
||||||
|
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
||||||
receptor_ctl.simple_command(f"work release {unit_id}")
|
receptor_ctl.simple_command(f"work release {unit_id}")
|
||||||
|
|
||||||
|
|
||||||
@@ -128,8 +167,7 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
|||||||
kwargs.setdefault('payload', '')
|
kwargs.setdefault('payload', '')
|
||||||
|
|
||||||
transmit_start = time.time()
|
transmit_start = time.time()
|
||||||
sign_work = False if settings.IS_K8S else True
|
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=True, **kwargs)
|
||||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=sign_work, **kwargs)
|
|
||||||
|
|
||||||
unit_id = result['unitid']
|
unit_id = result['unitid']
|
||||||
run_start = time.time()
|
run_start = time.time()
|
||||||
@@ -204,7 +242,7 @@ def worker_info(node_name, work_type='ansible-runner'):
|
|||||||
else:
|
else:
|
||||||
error_list.append(details)
|
error_list.append(details)
|
||||||
|
|
||||||
except (ReceptorNodeNotFound, RuntimeError) as exc:
|
except Exception as exc:
|
||||||
error_list.append(str(exc))
|
error_list.append(str(exc))
|
||||||
|
|
||||||
# If we have a connection error, missing keys would be trivial consequence of that
|
# If we have a connection error, missing keys would be trivial consequence of that
|
||||||
@@ -275,10 +313,6 @@ class AWXReceptorJob:
|
|||||||
except Exception:
|
except Exception:
|
||||||
logger.exception(f"Error releasing work unit {self.unit_id}.")
|
logger.exception(f"Error releasing work unit {self.unit_id}.")
|
||||||
|
|
||||||
@property
|
|
||||||
def sign_work(self):
|
|
||||||
return False if settings.IS_K8S else True
|
|
||||||
|
|
||||||
def _run_internal(self, receptor_ctl):
|
def _run_internal(self, receptor_ctl):
|
||||||
# Create a socketpair. Where the left side will be used for writing our payload
|
# Create a socketpair. Where the left side will be used for writing our payload
|
||||||
# (private data dir, kwargs). The right side will be passed to Receptor for
|
# (private data dir, kwargs). The right side will be passed to Receptor for
|
||||||
@@ -329,24 +363,32 @@ class AWXReceptorJob:
|
|||||||
shutil.rmtree(artifact_dir)
|
shutil.rmtree(artifact_dir)
|
||||||
|
|
||||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
|
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
|
||||||
# Both "processor" and "cancel_watcher" are spawned in separate threads.
|
|
||||||
# We wait for the first one to return. If cancel_watcher returns first,
|
|
||||||
# we yank the socket out from underneath the processor, which will cause it
|
|
||||||
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
|
|
||||||
# Which exits if the job has finished normally. The context manager ensures we do not
|
|
||||||
# leave any threads laying around.
|
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
|
|
||||||
processor_future = executor.submit(self.processor, resultfile)
|
|
||||||
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
|
|
||||||
futures = [processor_future, cancel_watcher_future]
|
|
||||||
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
|
|
||||||
|
|
||||||
res = list(first_future.done)[0].result()
|
connections.close_all()
|
||||||
if res.status == 'canceled':
|
|
||||||
|
# "processor" and the main thread will be separate threads.
|
||||||
|
# If a cancel happens, the main thread will encounter an exception, in which case
|
||||||
|
# we yank the socket out from underneath the processor, which will cause it to exit.
|
||||||
|
# The ThreadPoolExecutor context manager ensures we do not leave any threads laying around.
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||||
|
processor_future = executor.submit(self.processor, resultfile)
|
||||||
|
|
||||||
|
try:
|
||||||
|
signal_state.raise_exception = True
|
||||||
|
# address race condition where SIGTERM was issued after this dispatcher task started
|
||||||
|
if signal_callback():
|
||||||
|
raise SignalExit()
|
||||||
|
res = processor_future.result()
|
||||||
|
except SignalExit:
|
||||||
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
|
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
|
||||||
resultsock.shutdown(socket.SHUT_RDWR)
|
resultsock.shutdown(socket.SHUT_RDWR)
|
||||||
resultfile.close()
|
resultfile.close()
|
||||||
elif res.status == 'error':
|
result = namedtuple('result', ['status', 'rc'])
|
||||||
|
res = result('canceled', 1)
|
||||||
|
finally:
|
||||||
|
signal_state.raise_exception = False
|
||||||
|
|
||||||
|
if res.status == 'error':
|
||||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||||
# is saved via the status_handler passed in to the processor.
|
# is saved via the status_handler passed in to the processor.
|
||||||
if 'result_traceback' in self.task.runner_callback.extra_update_fields:
|
if 'result_traceback' in self.task.runner_callback.extra_update_fields:
|
||||||
@@ -430,6 +472,10 @@ class AWXReceptorJob:
|
|||||||
|
|
||||||
return receptor_params
|
return receptor_params
|
||||||
|
|
||||||
|
@property
|
||||||
|
def sign_work(self):
|
||||||
|
return True if self.work_type in ('ansible-runner', 'local') else False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def work_type(self):
|
def work_type(self):
|
||||||
if self.task.instance.is_container_group_task:
|
if self.task.instance.is_container_group_task:
|
||||||
@@ -440,18 +486,6 @@ class AWXReceptorJob:
|
|||||||
return 'local'
|
return 'local'
|
||||||
return 'ansible-runner'
|
return 'ansible-runner'
|
||||||
|
|
||||||
@cleanup_new_process
|
|
||||||
def cancel_watcher(self, processor_future):
|
|
||||||
while True:
|
|
||||||
if processor_future.done():
|
|
||||||
return processor_future.result()
|
|
||||||
|
|
||||||
if self.task.runner_callback.cancel_callback():
|
|
||||||
result = namedtuple('result', ['status', 'rc'])
|
|
||||||
return result('canceled', 1)
|
|
||||||
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pod_definition(self):
|
def pod_definition(self):
|
||||||
ee = self.task.instance.execution_environment
|
ee = self.task.instance.execution_environment
|
||||||
@@ -570,3 +604,105 @@ class AWXReceptorJob:
|
|||||||
else:
|
else:
|
||||||
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
|
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: receptor reload expects ordering within config items to be preserved
|
||||||
|
# if python dictionary is not preserving order properly, may need to find a
|
||||||
|
# solution. yaml.dump does not seem to work well with OrderedDict. below line may help
|
||||||
|
# yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()))
|
||||||
|
#
|
||||||
|
RECEPTOR_CONFIG_STARTER = (
|
||||||
|
{'local-only': None},
|
||||||
|
{'log-level': 'debug'},
|
||||||
|
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
|
||||||
|
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
|
||||||
|
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
|
||||||
|
{'work-signing': {'privatekey': '/etc/receptor/signing/work-private-key.pem', 'tokenexpiration': '1m'}},
|
||||||
|
{
|
||||||
|
'work-kubernetes': {
|
||||||
|
'worktype': 'kubernetes-runtime-auth',
|
||||||
|
'authmethod': 'runtime',
|
||||||
|
'allowruntimeauth': True,
|
||||||
|
'allowruntimepod': True,
|
||||||
|
'allowruntimeparams': True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'work-kubernetes': {
|
||||||
|
'worktype': 'kubernetes-incluster-auth',
|
||||||
|
'authmethod': 'incluster',
|
||||||
|
'allowruntimeauth': True,
|
||||||
|
'allowruntimepod': True,
|
||||||
|
'allowruntimeparams': True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'tls-client': {
|
||||||
|
'name': 'tlsclient',
|
||||||
|
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
|
||||||
|
'cert': '/etc/receptor/tls/receptor.crt',
|
||||||
|
'key': '/etc/receptor/tls/receptor.key',
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@task()
|
||||||
|
def write_receptor_config():
|
||||||
|
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||||
|
with lock:
|
||||||
|
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||||
|
|
||||||
|
this_inst = Instance.objects.me()
|
||||||
|
instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)
|
||||||
|
existing_peers = {link.target_id for link in InstanceLink.objects.filter(source=this_inst)}
|
||||||
|
new_links = []
|
||||||
|
for instance in instances:
|
||||||
|
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||||
|
receptor_config.append(peer)
|
||||||
|
if instance.id not in existing_peers:
|
||||||
|
new_links.append(InstanceLink(source=this_inst, target=instance, link_state=InstanceLink.States.ADDING))
|
||||||
|
|
||||||
|
InstanceLink.objects.bulk_create(new_links)
|
||||||
|
|
||||||
|
with open(__RECEPTOR_CONF, 'w') as file:
|
||||||
|
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||||
|
|
||||||
|
# This needs to be outside of the lock because this function itself will acquire the lock.
|
||||||
|
receptor_ctl = get_receptor_ctl()
|
||||||
|
|
||||||
|
attempts = 10
|
||||||
|
for backoff in range(1, attempts + 1):
|
||||||
|
try:
|
||||||
|
receptor_ctl.simple_command("reload")
|
||||||
|
break
|
||||||
|
except ValueError:
|
||||||
|
logger.warning(f"Unable to reload Receptor configuration. {attempts-backoff} attempts left.")
|
||||||
|
time.sleep(backoff)
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Receptor reload failed")
|
||||||
|
|
||||||
|
links = InstanceLink.objects.filter(source=this_inst, target__in=instances, link_state=InstanceLink.States.ADDING)
|
||||||
|
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_local_queuename)
|
||||||
|
def remove_deprovisioned_node(hostname):
|
||||||
|
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
|
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
|
|
||||||
|
node_jobs = UnifiedJob.objects.filter(
|
||||||
|
execution_node=hostname,
|
||||||
|
status__in=(
|
||||||
|
'running',
|
||||||
|
'waiting',
|
||||||
|
),
|
||||||
|
)
|
||||||
|
while node_jobs.exists():
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
# This will as a side effect also delete the InstanceLinks that are tied to it.
|
||||||
|
Instance.objects.filter(hostname=hostname).delete()
|
||||||
|
|
||||||
|
# Update the receptor configs for all of the control-plane.
|
||||||
|
write_receptor_config.apply_async(queue='tower_broadcast_all')
|
||||||
|
|||||||
@@ -9,12 +9,17 @@ logger = logging.getLogger('awx.main.tasks.signals')
|
|||||||
__all__ = ['with_signal_handling', 'signal_callback']
|
__all__ = ['with_signal_handling', 'signal_callback']
|
||||||
|
|
||||||
|
|
||||||
|
class SignalExit(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SignalState:
|
class SignalState:
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.sigterm_flag = False
|
self.sigterm_flag = False
|
||||||
self.is_active = False
|
self.is_active = False
|
||||||
self.original_sigterm = None
|
self.original_sigterm = None
|
||||||
self.original_sigint = None
|
self.original_sigint = None
|
||||||
|
self.raise_exception = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reset()
|
self.reset()
|
||||||
@@ -22,6 +27,9 @@ class SignalState:
|
|||||||
def set_flag(self, *args):
|
def set_flag(self, *args):
|
||||||
"""Method to pass into the python signal.signal method to receive signals"""
|
"""Method to pass into the python signal.signal method to receive signals"""
|
||||||
self.sigterm_flag = True
|
self.sigterm_flag = True
|
||||||
|
if self.raise_exception:
|
||||||
|
self.raise_exception = False # so it is not raised a second time in error handling
|
||||||
|
raise SignalExit()
|
||||||
|
|
||||||
def connect_signals(self):
|
def connect_signals(self):
|
||||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||||
|
|||||||
@@ -10,12 +10,13 @@ from contextlib import redirect_stdout
|
|||||||
import shutil
|
import shutil
|
||||||
import time
|
import time
|
||||||
from distutils.version import LooseVersion as Version
|
from distutils.version import LooseVersion as Version
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction, DatabaseError, IntegrityError
|
from django.db import transaction, DatabaseError, IntegrityError
|
||||||
from django.db.models.fields.related import ForeignKey
|
from django.db.models.fields.related import ForeignKey
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now, timedelta
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
@@ -53,13 +54,14 @@ from awx.main.dispatch import get_local_queuename, reaper
|
|||||||
from awx.main.utils.common import (
|
from awx.main.utils.common import (
|
||||||
ignore_inventory_computed_fields,
|
ignore_inventory_computed_fields,
|
||||||
ignore_inventory_group_removal,
|
ignore_inventory_group_removal,
|
||||||
schedule_task_manager,
|
ScheduleWorkflowManager,
|
||||||
|
ScheduleTaskManager,
|
||||||
)
|
)
|
||||||
|
|
||||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||||
from awx.main.utils.reload import stop_local_services
|
from awx.main.utils.reload import stop_local_services
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper
|
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||||
from awx.main.consumers import emit_channel_notification
|
from awx.main.consumers import emit_channel_notification
|
||||||
from awx.main import analytics
|
from awx.main import analytics
|
||||||
from awx.conf import settings_registry
|
from awx.conf import settings_registry
|
||||||
@@ -79,6 +81,10 @@ Try upgrading OpenSSH or providing your private key in an different format. \
|
|||||||
def dispatch_startup():
|
def dispatch_startup():
|
||||||
startup_logger = logging.getLogger('awx.main.tasks')
|
startup_logger = logging.getLogger('awx.main.tasks')
|
||||||
|
|
||||||
|
# TODO: Enable this on VM installs
|
||||||
|
if settings.IS_K8S:
|
||||||
|
write_receptor_config()
|
||||||
|
|
||||||
startup_logger.debug("Syncing Schedules")
|
startup_logger.debug("Syncing Schedules")
|
||||||
for sch in Schedule.objects.all():
|
for sch in Schedule.objects.all():
|
||||||
try:
|
try:
|
||||||
@@ -103,6 +109,8 @@ def dispatch_startup():
|
|||||||
#
|
#
|
||||||
apply_cluster_membership_policies()
|
apply_cluster_membership_policies()
|
||||||
cluster_node_heartbeat()
|
cluster_node_heartbeat()
|
||||||
|
reaper.startup_reaping()
|
||||||
|
reaper.reap_waiting(grace_period=0)
|
||||||
m = Metrics()
|
m = Metrics()
|
||||||
m.reset_values()
|
m.reset_values()
|
||||||
|
|
||||||
@@ -114,7 +122,11 @@ def inform_cluster_of_shutdown():
|
|||||||
try:
|
try:
|
||||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||||
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
|
try:
|
||||||
|
reaper.reap_waiting(this_inst, grace_period=0)
|
||||||
|
except Exception:
|
||||||
|
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
|
||||||
|
logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname))
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('Encountered problem with normal shutdown signal.')
|
logger.exception('Encountered problem with normal shutdown signal.')
|
||||||
|
|
||||||
@@ -341,9 +353,13 @@ def _cleanup_images_and_files(**kwargs):
|
|||||||
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
|
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
|
||||||
|
|
||||||
# if we are the first instance alphabetically, then run cleanup on execution nodes
|
# if we are the first instance alphabetically, then run cleanup on execution nodes
|
||||||
checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
|
checker_instance = (
|
||||||
|
Instance.objects.filter(node_type__in=['hybrid', 'control'], node_state=Instance.States.READY, enabled=True, capacity__gt=0)
|
||||||
|
.order_by('-hostname')
|
||||||
|
.first()
|
||||||
|
)
|
||||||
if checker_instance and this_inst.hostname == checker_instance.hostname:
|
if checker_instance and this_inst.hostname == checker_instance.hostname:
|
||||||
for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
|
for inst in Instance.objects.filter(node_type='execution', node_state=Instance.States.READY, enabled=True, capacity__gt=0):
|
||||||
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
|
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
|
||||||
if not runner_cleanup_kwargs:
|
if not runner_cleanup_kwargs:
|
||||||
continue
|
continue
|
||||||
@@ -397,7 +413,12 @@ def execution_node_health_check(node):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if instance.node_type != 'execution':
|
if instance.node_type != 'execution':
|
||||||
raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
|
logger.warning(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
|
||||||
|
return
|
||||||
|
|
||||||
|
if instance.node_state not in (Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
|
||||||
|
logger.warning(f"Execution node health check ran against node {instance.hostname} in state {instance.node_state}")
|
||||||
|
return
|
||||||
|
|
||||||
data = worker_info(node)
|
data = worker_info(node)
|
||||||
|
|
||||||
@@ -432,6 +453,7 @@ def inspect_execution_nodes(instance_list):
|
|||||||
|
|
||||||
nowtime = now()
|
nowtime = now()
|
||||||
workers = mesh_status['Advertisements']
|
workers = mesh_status['Advertisements']
|
||||||
|
|
||||||
for ad in workers:
|
for ad in workers:
|
||||||
hostname = ad['NodeID']
|
hostname = ad['NodeID']
|
||||||
|
|
||||||
@@ -442,25 +464,23 @@ def inspect_execution_nodes(instance_list):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# Control-plane nodes are dealt with via local_health_check instead.
|
# Control-plane nodes are dealt with via local_health_check instead.
|
||||||
if instance.node_type in ('control', 'hybrid'):
|
if instance.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
was_lost = instance.is_lost(ref_time=nowtime)
|
|
||||||
last_seen = parse_date(ad['Time'])
|
last_seen = parse_date(ad['Time'])
|
||||||
|
|
||||||
if instance.last_seen and instance.last_seen >= last_seen:
|
if instance.last_seen and instance.last_seen >= last_seen:
|
||||||
continue
|
continue
|
||||||
instance.last_seen = last_seen
|
instance.last_seen = last_seen
|
||||||
instance.save(update_fields=['last_seen'])
|
instance.save(update_fields=['last_seen'])
|
||||||
|
|
||||||
# Only execution nodes should be dealt with by execution_node_health_check
|
# Only execution nodes should be dealt with by execution_node_health_check
|
||||||
if instance.node_type == 'hop':
|
if instance.node_type == Instance.Types.HOP:
|
||||||
if was_lost and (not instance.is_lost(ref_time=nowtime)):
|
if instance.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
|
||||||
logger.warning(f'Hop node {hostname}, has rejoined the receptor mesh')
|
logger.warning(f'Hop node {hostname}, has rejoined the receptor mesh')
|
||||||
instance.save_health_data(errors='')
|
instance.save_health_data(errors='')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if was_lost:
|
if instance.node_state in (Instance.States.UNAVAILABLE, Instance.States.INSTALLED):
|
||||||
# if the instance *was* lost, but has appeared again,
|
# if the instance *was* lost, but has appeared again,
|
||||||
# attempt to re-establish the initial capacity and version
|
# attempt to re-establish the initial capacity and version
|
||||||
# check
|
# check
|
||||||
@@ -475,11 +495,11 @@ def inspect_execution_nodes(instance_list):
|
|||||||
execution_node_health_check.apply_async([hostname])
|
execution_node_health_check.apply_async([hostname])
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||||
def cluster_node_heartbeat():
|
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||||
logger.debug("Cluster node heartbeat task.")
|
logger.debug("Cluster node heartbeat task.")
|
||||||
nowtime = now()
|
nowtime = now()
|
||||||
instance_list = list(Instance.objects.all())
|
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
|
||||||
this_inst = None
|
this_inst = None
|
||||||
lost_instances = []
|
lost_instances = []
|
||||||
|
|
||||||
@@ -499,12 +519,23 @@ def cluster_node_heartbeat():
|
|||||||
|
|
||||||
if this_inst:
|
if this_inst:
|
||||||
startup_event = this_inst.is_lost(ref_time=nowtime)
|
startup_event = this_inst.is_lost(ref_time=nowtime)
|
||||||
|
last_last_seen = this_inst.last_seen
|
||||||
this_inst.local_health_check()
|
this_inst.local_health_check()
|
||||||
if startup_event and this_inst.capacity != 0:
|
if startup_event and this_inst.capacity != 0:
|
||||||
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
|
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
|
||||||
return
|
return
|
||||||
|
elif not last_last_seen:
|
||||||
|
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
|
||||||
|
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
|
||||||
|
logger.warning(f'Heartbeat skew - interval={(nowtime - last_last_seen).total_seconds():.4f}, expected={settings.CLUSTER_NODE_HEARTBEAT_PERIOD}')
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||||
|
(changed, this_inst) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||||
|
if changed:
|
||||||
|
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
|
||||||
|
this_inst.local_health_check()
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||||
# IFF any node has a greater version than we do, then we'll shutdown services
|
# IFF any node has a greater version than we do, then we'll shutdown services
|
||||||
for other_inst in instance_list:
|
for other_inst in instance_list:
|
||||||
if other_inst.node_type in ('execution', 'hop'):
|
if other_inst.node_type in ('execution', 'hop'):
|
||||||
@@ -524,15 +555,17 @@ def cluster_node_heartbeat():
|
|||||||
|
|
||||||
for other_inst in lost_instances:
|
for other_inst in lost_instances:
|
||||||
try:
|
try:
|
||||||
reaper.reap(other_inst)
|
explanation = "Job reaped due to instance shutdown"
|
||||||
|
reaper.reap(other_inst, job_explanation=explanation)
|
||||||
|
reaper.reap_waiting(other_inst, grace_period=0, job_explanation=explanation)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
|
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
|
||||||
try:
|
try:
|
||||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
if settings.AWX_AUTO_DEPROVISION_INSTANCES and other_inst.node_type == "control":
|
||||||
deprovision_hostname = other_inst.hostname
|
deprovision_hostname = other_inst.hostname
|
||||||
other_inst.delete()
|
other_inst.delete() # FIXME: what about associated inbound links?
|
||||||
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
|
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
|
||||||
elif other_inst.capacity != 0 or (not other_inst.errors):
|
elif other_inst.node_state == Instance.States.READY:
|
||||||
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
|
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
|
||||||
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
||||||
|
|
||||||
@@ -542,6 +575,15 @@ def cluster_node_heartbeat():
|
|||||||
else:
|
else:
|
||||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||||
|
|
||||||
|
# Run local reaper
|
||||||
|
if worker_tasks is not None:
|
||||||
|
active_task_ids = []
|
||||||
|
for task_list in worker_tasks.values():
|
||||||
|
active_task_ids.extend(task_list)
|
||||||
|
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids)
|
||||||
|
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||||
|
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
def awx_receptor_workunit_reaper():
|
def awx_receptor_workunit_reaper():
|
||||||
@@ -589,7 +631,8 @@ def awx_k8s_reaper():
|
|||||||
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
|
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
|
||||||
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
|
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
|
||||||
pods = PodManager.list_active_jobs(group)
|
pods = PodManager.list_active_jobs(group)
|
||||||
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
|
time_cutoff = now() - timedelta(seconds=settings.K8S_POD_REAPER_GRACE_PERIOD)
|
||||||
|
for job in UnifiedJob.objects.filter(pk__in=pods.keys(), finished__lte=time_cutoff).exclude(status__in=ACTIVE_STATES):
|
||||||
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
|
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
|
||||||
try:
|
try:
|
||||||
pm = PodManager(job)
|
pm = PodManager(job)
|
||||||
@@ -657,6 +700,13 @@ def awx_periodic_scheduler():
|
|||||||
state.save()
|
state.save()
|
||||||
|
|
||||||
|
|
||||||
|
def schedule_manager_success_or_error(instance):
|
||||||
|
if instance.unifiedjob_blocked_jobs.exists():
|
||||||
|
ScheduleTaskManager().schedule()
|
||||||
|
if instance.spawned_by_workflow:
|
||||||
|
ScheduleWorkflowManager().schedule()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
def handle_work_success(task_actual):
|
def handle_work_success(task_actual):
|
||||||
try:
|
try:
|
||||||
@@ -666,8 +716,7 @@ def handle_work_success(task_actual):
|
|||||||
return
|
return
|
||||||
if not instance:
|
if not instance:
|
||||||
return
|
return
|
||||||
|
schedule_manager_success_or_error(instance)
|
||||||
schedule_task_manager()
|
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
@@ -709,8 +758,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
|||||||
# what the job complete message handler does then we may want to send a
|
# what the job complete message handler does then we may want to send a
|
||||||
# completion event for each job here.
|
# completion event for each job here.
|
||||||
if first_instance:
|
if first_instance:
|
||||||
schedule_task_manager()
|
schedule_manager_success_or_error(first_instance)
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ def mk_workflow_job_template(name, extra_vars='', spec=None, organization=None,
|
|||||||
if extra_vars:
|
if extra_vars:
|
||||||
extra_vars = json.dumps(extra_vars)
|
extra_vars = json.dumps(extra_vars)
|
||||||
|
|
||||||
wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service)
|
wfjt = WorkflowJobTemplate.objects.create(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service)
|
||||||
|
|
||||||
if spec:
|
if spec:
|
||||||
wfjt.survey_spec = spec
|
wfjt.survey_spec = spec
|
||||||
|
|||||||
@@ -19,8 +19,7 @@ EXPECTED_VALUES = {
|
|||||||
'awx_hosts_total': 1.0,
|
'awx_hosts_total': 1.0,
|
||||||
'awx_schedules_total': 1.0,
|
'awx_schedules_total': 1.0,
|
||||||
'awx_sessions_total': 0.0,
|
'awx_sessions_total': 0.0,
|
||||||
'awx_sessions_total': 0.0,
|
'awx_status_total': 0.0,
|
||||||
'awx_sessions_total': 0.0,
|
|
||||||
'awx_running_jobs_total': 0.0,
|
'awx_running_jobs_total': 0.0,
|
||||||
'awx_instance_capacity': 100.0,
|
'awx_instance_capacity': 100.0,
|
||||||
'awx_instance_consumed_capacity': 0.0,
|
'awx_instance_consumed_capacity': 0.0,
|
||||||
|
|||||||
@@ -1,16 +1,9 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from unittest import mock
|
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models.activity_stream import ActivityStream
|
from awx.main.models.activity_stream import ActivityStream
|
||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
|
|
||||||
import redis
|
|
||||||
|
|
||||||
# Django
|
|
||||||
from django.test.utils import override_settings
|
|
||||||
|
|
||||||
|
|
||||||
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
||||||
|
|
||||||
@@ -50,33 +43,14 @@ def test_enabled_sets_capacity(patch, admin_user):
|
|||||||
def test_auditor_user_health_check(get, post, system_auditor):
|
def test_auditor_user_health_check(get, post, system_auditor):
|
||||||
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
||||||
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
||||||
r = get(url=url, user=system_auditor, expect=200)
|
get(url=url, user=system_auditor, expect=200)
|
||||||
assert r.data['cpu_capacity'] == instance.cpu_capacity
|
|
||||||
post(url=url, user=system_auditor, expect=403)
|
post(url=url, user=system_auditor, expect=403)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_health_check_throws_error(post, admin_user):
|
|
||||||
instance = Instance.objects.create(node_type='execution', **INSTANCE_KWARGS)
|
|
||||||
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
|
||||||
# we will simulate a receptor error, similar to this one
|
|
||||||
# https://github.com/ansible/receptor/blob/156e6e24a49fbf868734507f9943ac96208ed8f5/receptorctl/receptorctl/socket_interface.py#L204
|
|
||||||
# related to issue https://github.com/ansible/tower/issues/5315
|
|
||||||
with mock.patch('awx.main.tasks.receptor.run_until_complete', side_effect=RuntimeError('Remote error: foobar')):
|
|
||||||
post(url=url, user=admin_user, expect=200)
|
|
||||||
instance.refresh_from_db()
|
|
||||||
assert 'Remote error: foobar' in instance.errors
|
|
||||||
assert instance.capacity == 0
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
@mock.patch.object(redis.client.Redis, 'ping', lambda self: True)
|
|
||||||
def test_health_check_usage(get, post, admin_user):
|
def test_health_check_usage(get, post, admin_user):
|
||||||
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
||||||
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
||||||
r = get(url=url, user=admin_user, expect=200)
|
get(url=url, user=admin_user, expect=200)
|
||||||
assert r.data['cpu_capacity'] == instance.cpu_capacity
|
r = post(url=url, user=admin_user, expect=200)
|
||||||
assert r.data['last_health_check'] is None
|
assert r.data['msg'] == f"Health check is running for {instance.hostname}."
|
||||||
with override_settings(CLUSTER_HOST_ID=instance.hostname): # force direct call of cluster_node_health_check
|
|
||||||
r = post(url=url, user=admin_user, expect=200)
|
|
||||||
assert r.data['last_health_check'] is not None
|
|
||||||
|
|||||||
@@ -13,17 +13,11 @@ from django.utils import timezone
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
|
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
|
||||||
from awx.main.models import (
|
from awx.main.models import JobTemplate, User, Job, AdHocCommand, ProjectUpdate, InstanceGroup, Label, Organization
|
||||||
JobTemplate,
|
|
||||||
User,
|
|
||||||
Job,
|
|
||||||
AdHocCommand,
|
|
||||||
ProjectUpdate,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_job_relaunch_permission_denied_response(post, get, inventory, project, credential, net_credential, machine_credential):
|
def test_job_relaunch_permission_denied_response(post, get, inventory, project, net_credential, machine_credential):
|
||||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
|
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
|
||||||
jt.credentials.add(machine_credential)
|
jt.credentials.add(machine_credential)
|
||||||
jt_user = User.objects.create(username='jobtemplateuser')
|
jt_user = User.objects.create(username='jobtemplateuser')
|
||||||
@@ -39,6 +33,22 @@ def test_job_relaunch_permission_denied_response(post, get, inventory, project,
|
|||||||
job.launch_config.credentials.add(net_credential)
|
job.launch_config.credentials.add(net_credential)
|
||||||
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
|
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
|
||||||
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||||
|
job.launch_config.credentials.clear()
|
||||||
|
|
||||||
|
# Job has prompted instance group that user cannot see
|
||||||
|
job.launch_config.instance_groups.add(InstanceGroup.objects.create())
|
||||||
|
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
|
||||||
|
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||||
|
job.launch_config.instance_groups.clear()
|
||||||
|
|
||||||
|
# Job has prompted label that user cannot see
|
||||||
|
job.launch_config.labels.add(Label.objects.create(organization=Organization.objects.create()))
|
||||||
|
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
|
||||||
|
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||||
|
job.launch_config.labels.clear()
|
||||||
|
|
||||||
|
# without any of those prompts, user can launch
|
||||||
|
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=201)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
|
|||||||
@@ -4,8 +4,7 @@ import yaml
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from awx.api.serializers import JobLaunchSerializer
|
from awx.api.serializers import JobLaunchSerializer
|
||||||
from awx.main.models.credential import Credential
|
from awx.main.models import Credential, Inventory, Host, ExecutionEnvironment, Label, InstanceGroup
|
||||||
from awx.main.models.inventory import Inventory, Host
|
|
||||||
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
|
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -15,6 +14,11 @@ from awx.api.versioning import reverse
|
|||||||
def runtime_data(organization, credentialtype_ssh):
|
def runtime_data(organization, credentialtype_ssh):
|
||||||
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
|
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
|
||||||
inv_obj = organization.inventories.create(name="runtime-inv")
|
inv_obj = organization.inventories.create(name="runtime-inv")
|
||||||
|
inv_obj.hosts.create(name='foo1')
|
||||||
|
inv_obj.hosts.create(name='foo2')
|
||||||
|
ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||||
|
ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
|
||||||
|
labels_obj = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||||
return dict(
|
return dict(
|
||||||
extra_vars='{"job_launch_var": 4}',
|
extra_vars='{"job_launch_var": 4}',
|
||||||
limit='test-servers',
|
limit='test-servers',
|
||||||
@@ -25,6 +29,12 @@ def runtime_data(organization, credentialtype_ssh):
|
|||||||
credentials=[cred_obj.pk],
|
credentials=[cred_obj.pk],
|
||||||
diff_mode=True,
|
diff_mode=True,
|
||||||
verbosity=2,
|
verbosity=2,
|
||||||
|
execution_environment=ee_obj.pk,
|
||||||
|
labels=[labels_obj.pk],
|
||||||
|
forks=7,
|
||||||
|
job_slice_count=2,
|
||||||
|
timeout=10,
|
||||||
|
instance_groups=[ig_obj.pk],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -54,6 +64,12 @@ def job_template_prompts(project, inventory, machine_credential):
|
|||||||
ask_credential_on_launch=on_off,
|
ask_credential_on_launch=on_off,
|
||||||
ask_diff_mode_on_launch=on_off,
|
ask_diff_mode_on_launch=on_off,
|
||||||
ask_verbosity_on_launch=on_off,
|
ask_verbosity_on_launch=on_off,
|
||||||
|
ask_execution_environment_on_launch=on_off,
|
||||||
|
ask_labels_on_launch=on_off,
|
||||||
|
ask_forks_on_launch=on_off,
|
||||||
|
ask_job_slice_count_on_launch=on_off,
|
||||||
|
ask_timeout_on_launch=on_off,
|
||||||
|
ask_instance_groups_on_launch=on_off,
|
||||||
)
|
)
|
||||||
jt.credentials.add(machine_credential)
|
jt.credentials.add(machine_credential)
|
||||||
return jt
|
return jt
|
||||||
@@ -77,6 +93,12 @@ def job_template_prompts_null(project):
|
|||||||
ask_credential_on_launch=True,
|
ask_credential_on_launch=True,
|
||||||
ask_diff_mode_on_launch=True,
|
ask_diff_mode_on_launch=True,
|
||||||
ask_verbosity_on_launch=True,
|
ask_verbosity_on_launch=True,
|
||||||
|
ask_execution_environment_on_launch=True,
|
||||||
|
ask_labels_on_launch=True,
|
||||||
|
ask_forks_on_launch=True,
|
||||||
|
ask_job_slice_count_on_launch=True,
|
||||||
|
ask_timeout_on_launch=True,
|
||||||
|
ask_instance_groups_on_launch=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -92,6 +114,12 @@ def data_to_internal(data):
|
|||||||
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
|
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
|
||||||
if 'inventory' in data:
|
if 'inventory' in data:
|
||||||
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
|
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
|
||||||
|
if 'execution_environment' in data:
|
||||||
|
internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment'])
|
||||||
|
if 'labels' in data:
|
||||||
|
internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']]
|
||||||
|
if 'instance_groups' in data:
|
||||||
|
internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']]
|
||||||
return internal
|
return internal
|
||||||
|
|
||||||
|
|
||||||
@@ -124,6 +152,12 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad
|
|||||||
assert 'credentials' in response.data['ignored_fields']
|
assert 'credentials' in response.data['ignored_fields']
|
||||||
assert 'job_tags' in response.data['ignored_fields']
|
assert 'job_tags' in response.data['ignored_fields']
|
||||||
assert 'skip_tags' in response.data['ignored_fields']
|
assert 'skip_tags' in response.data['ignored_fields']
|
||||||
|
assert 'execution_environment' in response.data['ignored_fields']
|
||||||
|
assert 'labels' in response.data['ignored_fields']
|
||||||
|
assert 'forks' in response.data['ignored_fields']
|
||||||
|
assert 'job_slice_count' in response.data['ignored_fields']
|
||||||
|
assert 'timeout' in response.data['ignored_fields']
|
||||||
|
assert 'instance_groups' in response.data['ignored_fields']
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -162,6 +196,34 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
|
|||||||
mock_job.signal_start.assert_called_once()
|
mock_job.signal_start.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.job_runtime_vars
|
||||||
|
def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mocker):
|
||||||
|
job_template = job_template_prompts(True)
|
||||||
|
|
||||||
|
mock_job = mocker.MagicMock(spec=Job, id=968)
|
||||||
|
|
||||||
|
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||||
|
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
|
||||||
|
response = post(
|
||||||
|
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'timeout': '', 'job_slice_count': '', 'forks': ''}, admin_user, expect=400
|
||||||
|
)
|
||||||
|
assert 'forks' in response.data and response.data['forks'][0] == 'A valid integer is required.'
|
||||||
|
assert 'job_slice_count' in response.data and response.data['job_slice_count'][0] == 'A valid integer is required.'
|
||||||
|
assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.job_runtime_vars
|
||||||
|
def test_slice_count_not_supported(job_template_prompts, post, admin_user):
|
||||||
|
job_template = job_template_prompts(True)
|
||||||
|
assert job_template.inventory.hosts.count() == 0
|
||||||
|
job_template.inventory.hosts.create(name='foo')
|
||||||
|
|
||||||
|
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_slice_count': 8}, admin_user, expect=400)
|
||||||
|
assert response.data['job_slice_count'][0] == 'Job inventory does not have enough hosts for slicing'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@pytest.mark.job_runtime_vars
|
@pytest.mark.job_runtime_vars
|
||||||
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
|
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
|
||||||
@@ -176,6 +238,10 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null,
|
|||||||
inventory = Inventory.objects.get(pk=runtime_data['inventory'])
|
inventory = Inventory.objects.get(pk=runtime_data['inventory'])
|
||||||
inventory.use_role.members.add(rando)
|
inventory.use_role.members.add(rando)
|
||||||
|
|
||||||
|
# Instance Groups and label can not currently easily be used by rando so we need to remove the instance groups from the runtime data
|
||||||
|
runtime_data.pop('instance_groups')
|
||||||
|
runtime_data.pop('labels')
|
||||||
|
|
||||||
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
|
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
|
||||||
|
|
||||||
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||||
@@ -243,12 +309,59 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@pytest.mark.job_runtime_vars
|
@pytest.mark.job_runtime_vars
|
||||||
def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando):
|
def test_job_launch_works_without_access_to_ig_if_ig_in_template(job_template_prompts, runtime_data, post, rando, mocker):
|
||||||
|
job_template = job_template_prompts(True)
|
||||||
|
job_template.instance_groups.add(InstanceGroup.objects.get(id=runtime_data['instance_groups'][0]))
|
||||||
|
job_template.instance_groups.add(InstanceGroup.objects.create(name='foo'))
|
||||||
|
job_template.save()
|
||||||
|
job_template.execute_role.members.add(rando)
|
||||||
|
|
||||||
|
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
|
||||||
|
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(instance_groups=runtime_data['instance_groups']), rando, expect=201)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.job_runtime_vars
|
||||||
|
def test_job_launch_works_without_access_to_label_if_label_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
|
||||||
|
job_template = job_template_prompts(True)
|
||||||
|
job_template.labels.add(Label.objects.get(id=runtime_data['labels'][0]))
|
||||||
|
job_template.labels.add(Label.objects.create(name='baz', description='faz', organization=organization))
|
||||||
|
job_template.save()
|
||||||
|
job_template.execute_role.members.add(rando)
|
||||||
|
|
||||||
|
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
|
||||||
|
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(labels=runtime_data['labels']), rando, expect=201)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.job_runtime_vars
|
||||||
|
def test_job_launch_works_without_access_to_ee_if_ee_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
|
||||||
|
job_template = job_template_prompts(True)
|
||||||
|
job_template.execute_role.members.add(rando)
|
||||||
|
|
||||||
|
# Make sure we get a 201 instead of a 403 since we are providing an override that is already in the template
|
||||||
|
post(
|
||||||
|
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(execution_environment=runtime_data['execution_environment']), rando, expect=201
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'item_type',
|
||||||
|
[
|
||||||
|
('credentials'),
|
||||||
|
('labels'),
|
||||||
|
('instance_groups'),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.job_runtime_vars
|
||||||
|
def test_job_launch_fails_without_access(job_template_prompts, runtime_data, post, rando, item_type):
|
||||||
job_template = job_template_prompts(True)
|
job_template = job_template_prompts(True)
|
||||||
job_template.execute_role.members.add(rando)
|
job_template.execute_role.members.add(rando)
|
||||||
|
|
||||||
# Assure that giving a credential without access blocks the launch
|
# Assure that giving a credential without access blocks the launch
|
||||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(credentials=runtime_data['credentials']), rando, expect=403)
|
data = {item_type: runtime_data[item_type]}
|
||||||
|
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), data, rando, expect=403)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
|
|||||||
@@ -13,7 +13,10 @@ from awx.main.models.workflow import (
|
|||||||
WorkflowJobTemplateNode,
|
WorkflowJobTemplateNode,
|
||||||
)
|
)
|
||||||
from awx.main.models.credential import Credential
|
from awx.main.models.credential import Credential
|
||||||
from awx.main.scheduler import TaskManager
|
from awx.main.scheduler import TaskManager, WorkflowManager, DependencyManager
|
||||||
|
|
||||||
|
# Django
|
||||||
|
from django.utils.timezone import now, timedelta
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -74,6 +77,18 @@ class TestApprovalNodes:
|
|||||||
assert approval_node.unified_job_template.description == 'Approval Node'
|
assert approval_node.unified_job_template.description == 'Approval Node'
|
||||||
assert approval_node.unified_job_template.timeout == 0
|
assert approval_node.unified_job_template.timeout == 0
|
||||||
|
|
||||||
|
def test_approval_node_creation_with_timeout(self, post, approval_node, admin_user):
|
||||||
|
assert approval_node.timeout is None
|
||||||
|
|
||||||
|
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})
|
||||||
|
post(url, {'name': 'Test', 'description': 'Approval Node', 'timeout': 10}, user=admin_user, expect=201)
|
||||||
|
|
||||||
|
approval_node = WorkflowJobTemplateNode.objects.get(pk=approval_node.pk)
|
||||||
|
approval_node.refresh_from_db()
|
||||||
|
assert approval_node.timeout is None
|
||||||
|
assert isinstance(approval_node.unified_job_template, WorkflowApprovalTemplate)
|
||||||
|
assert approval_node.unified_job_template.timeout == 10
|
||||||
|
|
||||||
def test_approval_node_creation_failure(self, post, approval_node, admin_user):
|
def test_approval_node_creation_failure(self, post, approval_node, admin_user):
|
||||||
# This test leaves off a required param to assert that user will get a 400.
|
# This test leaves off a required param to assert that user will get a 400.
|
||||||
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})
|
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})
|
||||||
@@ -137,8 +152,9 @@ class TestApprovalNodes:
|
|||||||
post(url, {'name': 'Approve Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
post(url, {'name': 'Approve Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
||||||
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
||||||
wf_job = WorkflowJob.objects.first()
|
wf_job = WorkflowJob.objects.first()
|
||||||
|
DependencyManager().schedule() # TODO: exclude workflows from this and delete line
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager().schedule()
|
WorkflowManager().schedule()
|
||||||
wfj_node = wf_job.workflow_nodes.first()
|
wfj_node = wf_job.workflow_nodes.first()
|
||||||
approval = wfj_node.job
|
approval = wfj_node.job
|
||||||
assert approval.name == 'Approve Test'
|
assert approval.name == 'Approve Test'
|
||||||
@@ -162,8 +178,9 @@ class TestApprovalNodes:
|
|||||||
post(url, {'name': 'Deny Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
post(url, {'name': 'Deny Test', 'description': '', 'timeout': 0}, user=admin_user, expect=201)
|
||||||
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
post(reverse('api:workflow_job_template_launch', kwargs={'pk': wfjt.pk}), user=admin_user, expect=201)
|
||||||
wf_job = WorkflowJob.objects.first()
|
wf_job = WorkflowJob.objects.first()
|
||||||
|
DependencyManager().schedule() # TODO: exclude workflows from this and delete line
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager().schedule()
|
WorkflowManager().schedule()
|
||||||
wfj_node = wf_job.workflow_nodes.first()
|
wfj_node = wf_job.workflow_nodes.first()
|
||||||
approval = wfj_node.job
|
approval = wfj_node.job
|
||||||
assert approval.name == 'Deny Test'
|
assert approval.name == 'Deny Test'
|
||||||
@@ -216,6 +233,37 @@ class TestApprovalNodes:
|
|||||||
approval.refresh_from_db()
|
approval.refresh_from_db()
|
||||||
assert approval.status == 'failed'
|
assert approval.status == 'failed'
|
||||||
|
|
||||||
|
def test_expires_time_on_creation(self):
|
||||||
|
now_time = now()
|
||||||
|
wa = WorkflowApproval.objects.create(timeout=34)
|
||||||
|
# this is fudged, so we assert that the expires time is in reasonable range
|
||||||
|
assert timedelta(seconds=33) < (wa.expires - now_time) < timedelta(seconds=35)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('with_update_fields', [True, False])
|
||||||
|
def test_expires_time_update(self, with_update_fields):
|
||||||
|
wa = WorkflowApproval.objects.create()
|
||||||
|
assert wa.timeout == 0
|
||||||
|
assert wa.expires is None
|
||||||
|
wa.timeout = 1234
|
||||||
|
if with_update_fields:
|
||||||
|
wa.save(update_fields=['timeout'])
|
||||||
|
else:
|
||||||
|
wa.save()
|
||||||
|
assert wa.created + timedelta(seconds=1234) == wa.expires
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('with_update_fields', [True, False])
|
||||||
|
def test_reset_timeout_and_expires(self, with_update_fields):
|
||||||
|
wa = WorkflowApproval.objects.create()
|
||||||
|
wa.timeout = 1234
|
||||||
|
wa.save()
|
||||||
|
assert wa.expires
|
||||||
|
wa.timeout = 0
|
||||||
|
if with_update_fields:
|
||||||
|
wa.save(update_fields=['timeout'])
|
||||||
|
else:
|
||||||
|
wa.save()
|
||||||
|
assert wa.expires is None
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestExclusiveRelationshipEnforcement:
|
class TestExclusiveRelationshipEnforcement:
|
||||||
|
|||||||
@@ -706,7 +706,7 @@ def jt_linked(organization, project, inventory, machine_credential, credential,
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def workflow_job_template(organization):
|
def workflow_job_template(organization):
|
||||||
wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization)
|
wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization)
|
||||||
wjt.save()
|
wjt.save()
|
||||||
|
|
||||||
return wjt
|
return wjt
|
||||||
|
|||||||
40
awx/main/tests/functional/models/test_base.py
Normal file
40
awx/main/tests/functional/models/test_base.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
from unittest import mock
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from crum import impersonate
|
||||||
|
|
||||||
|
from awx.main.models import Host
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_modified_by_not_changed(inventory):
|
||||||
|
with impersonate(None):
|
||||||
|
host = Host.objects.create(name='foo', inventory=inventory)
|
||||||
|
assert host.modified_by == None
|
||||||
|
host.variables = {'foo': 'bar'}
|
||||||
|
with mock.patch('django.db.models.Model.save') as save_mock:
|
||||||
|
host.save(update_fields=['variables'])
|
||||||
|
save_mock.assert_called_once_with(update_fields=['variables'])
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_modified_by_changed(inventory, alice):
|
||||||
|
with impersonate(None):
|
||||||
|
host = Host.objects.create(name='foo', inventory=inventory)
|
||||||
|
assert host.modified_by == None
|
||||||
|
with impersonate(alice):
|
||||||
|
host.variables = {'foo': 'bar'}
|
||||||
|
with mock.patch('django.db.models.Model.save') as save_mock:
|
||||||
|
host.save(update_fields=['variables'])
|
||||||
|
save_mock.assert_called_once_with(update_fields=['variables', 'modified_by'])
|
||||||
|
assert host.modified_by == alice
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_created_by(inventory, alice):
|
||||||
|
with impersonate(alice):
|
||||||
|
host = Host.objects.create(name='foo', inventory=inventory)
|
||||||
|
assert host.created_by == alice
|
||||||
|
with impersonate(None):
|
||||||
|
host = Host.objects.create(name='bar', inventory=inventory)
|
||||||
|
assert host.created_by == None
|
||||||
@@ -64,3 +64,26 @@ class TestSlicingModels:
|
|||||||
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
|
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
|
||||||
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)]
|
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)]
|
||||||
assert job_template.get_effective_slice_ct({'inventory': inventory2})
|
assert job_template.get_effective_slice_ct({'inventory': inventory2})
|
||||||
|
|
||||||
|
def test_effective_slice_count_prompt(self, job_template, inventory, organization):
|
||||||
|
job_template.inventory = inventory
|
||||||
|
# Add our prompt fields to the JT to allow overrides
|
||||||
|
job_template.ask_job_slice_count_on_launch = True
|
||||||
|
job_template.ask_inventory_on_launch = True
|
||||||
|
# Set a default value of the slice count to something low
|
||||||
|
job_template.job_slice_count = 2
|
||||||
|
# Create an inventory with 4 nodes
|
||||||
|
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
|
||||||
|
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(4)]
|
||||||
|
# The inventory slice count will be the min of the number of nodes (4) or the job slice (2)
|
||||||
|
assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2
|
||||||
|
# Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back
|
||||||
|
assert job_template.get_effective_slice_ct({'inventory': inventory2, 'job_slice_count': 3}) == 3
|
||||||
|
|
||||||
|
def test_slice_count_prompt_limited_by_inventory(self, job_template, inventory, organization):
|
||||||
|
assert inventory.hosts.count() == 0
|
||||||
|
job_template.inventory = inventory
|
||||||
|
inventory.hosts.create(name='foo')
|
||||||
|
|
||||||
|
unified_job = job_template.create_unified_job(job_slice_count=2)
|
||||||
|
assert isinstance(unified_job, Job)
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models import JobTemplate, JobLaunchConfig
|
from awx.main.models.jobs import JobTemplate, LaunchTimeConfigBase
|
||||||
|
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -11,18 +12,6 @@ def full_jt(inventory, project, machine_credential):
|
|||||||
return jt
|
return jt
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def config_factory(full_jt):
|
|
||||||
def return_config(data):
|
|
||||||
job = full_jt.create_unified_job(**data)
|
|
||||||
try:
|
|
||||||
return job.launch_config
|
|
||||||
except JobLaunchConfig.DoesNotExist:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return return_config
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestConfigCreation:
|
class TestConfigCreation:
|
||||||
"""
|
"""
|
||||||
@@ -40,28 +29,73 @@ class TestConfigCreation:
|
|||||||
assert config.limit == 'foobar'
|
assert config.limit == 'foobar'
|
||||||
assert config.char_prompts == {'limit': 'foobar'}
|
assert config.char_prompts == {'limit': 'foobar'}
|
||||||
|
|
||||||
def test_added_credential(self, full_jt, credential):
|
def test_added_related(self, full_jt, credential, default_instance_group, label):
|
||||||
job = full_jt.create_unified_job(credentials=[credential])
|
job = full_jt.create_unified_job(credentials=[credential], instance_groups=[default_instance_group], labels=[label])
|
||||||
config = job.launch_config
|
config = job.launch_config
|
||||||
assert set(config.credentials.all()) == set([credential])
|
assert set(config.credentials.all()) == set([credential])
|
||||||
|
assert set(config.labels.all()) == set([label])
|
||||||
|
assert set(config.instance_groups.all()) == set([default_instance_group])
|
||||||
|
|
||||||
def test_survey_passwords_ignored(self, inventory_source):
|
def test_survey_passwords_ignored(self, inventory_source):
|
||||||
iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'})
|
iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'})
|
||||||
assert iu.launch_config.prompts_dict() == {}
|
assert iu.launch_config.prompts_dict() == {}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def full_prompts_dict(inventory, credential, label, default_instance_group):
|
||||||
|
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||||
|
r = {
|
||||||
|
'limit': 'foobar',
|
||||||
|
'inventory': inventory,
|
||||||
|
'credentials': [credential],
|
||||||
|
'execution_environment': ee,
|
||||||
|
'labels': [label],
|
||||||
|
'instance_groups': [default_instance_group],
|
||||||
|
'verbosity': 3,
|
||||||
|
'scm_branch': 'non_dev',
|
||||||
|
'diff_mode': True,
|
||||||
|
'skip_tags': 'foobar',
|
||||||
|
'job_tags': 'untagged',
|
||||||
|
'forks': 26,
|
||||||
|
'job_slice_count': 2,
|
||||||
|
'timeout': 200,
|
||||||
|
'extra_vars': {'prompted_key': 'prompted_val'},
|
||||||
|
'job_type': 'check',
|
||||||
|
}
|
||||||
|
assert set(JobTemplate.get_ask_mapping().keys()) - set(r.keys()) == set() # make fixture comprehensive
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestConfigReversibility:
|
def test_config_reversibility(full_jt, full_prompts_dict):
|
||||||
"""
|
"""
|
||||||
Checks that a blob of saved prompts will be re-created in the
|
Checks that a blob of saved prompts will be re-created in the
|
||||||
prompts_dict for launching new jobs
|
prompts_dict for launching new jobs
|
||||||
"""
|
"""
|
||||||
|
config = full_jt.create_unified_job(**full_prompts_dict).launch_config
|
||||||
|
assert config.prompts_dict() == full_prompts_dict
|
||||||
|
|
||||||
def test_char_field_only(self, config_factory):
|
|
||||||
config = config_factory({'limit': 'foobar'})
|
|
||||||
assert config.prompts_dict() == {'limit': 'foobar'}
|
|
||||||
|
|
||||||
def test_related_objects(self, config_factory, inventory, credential):
|
@pytest.mark.django_db
|
||||||
prompts = {'limit': 'foobar', 'inventory': inventory, 'credentials': set([credential])}
|
class TestLaunchConfigModels:
|
||||||
config = config_factory(prompts)
|
def get_concrete_subclasses(self, cls):
|
||||||
assert config.prompts_dict() == prompts
|
r = []
|
||||||
|
for c in cls.__subclasses__():
|
||||||
|
if c._meta.abstract:
|
||||||
|
r.extend(self.get_concrete_subclasses(c))
|
||||||
|
else:
|
||||||
|
r.append(c)
|
||||||
|
return r
|
||||||
|
|
||||||
|
def test_non_job_config_complete(self):
|
||||||
|
"""This performs model validation which replaces code that used run on import."""
|
||||||
|
for field_name in JobTemplate.get_ask_mapping().keys():
|
||||||
|
if field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
|
||||||
|
assert not hasattr(LaunchTimeConfigBase, field_name)
|
||||||
|
else:
|
||||||
|
assert hasattr(LaunchTimeConfigBase, field_name)
|
||||||
|
|
||||||
|
def test_subclass_fields_complete(self):
|
||||||
|
for cls in self.get_concrete_subclasses(LaunchTimeConfigBase):
|
||||||
|
for field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
|
||||||
|
assert hasattr(cls, field_name)
|
||||||
|
|||||||
@@ -252,12 +252,14 @@ class TestTaskImpact:
|
|||||||
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||||
job = job_host_limit(5, 2)
|
job = job_host_limit(5, 2)
|
||||||
job.inventory.update_computed_fields()
|
job.inventory.update_computed_fields()
|
||||||
|
job.task_impact = job._get_task_impact()
|
||||||
assert job.inventory.total_hosts == 5
|
assert job.inventory.total_hosts == 5
|
||||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||||
|
|
||||||
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||||
job = job_host_limit(3, 5)
|
job = job_host_limit(3, 5)
|
||||||
job.inventory.update_computed_fields()
|
job.inventory.update_computed_fields()
|
||||||
|
job.task_impact = job._get_task_impact()
|
||||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||||
|
|
||||||
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
||||||
@@ -270,9 +272,13 @@ class TestTaskImpact:
|
|||||||
# Even distribution - all jobs run on 1 host
|
# Even distribution - all jobs run on 1 host
|
||||||
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [1, 1, 1]
|
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [1, 1, 1]
|
||||||
jobs[0].inventory.update_computed_fields()
|
jobs[0].inventory.update_computed_fields()
|
||||||
|
for j in jobs:
|
||||||
|
j.task_impact = j._get_task_impact()
|
||||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||||
# Uneven distribution - first job takes the extra host
|
# Uneven distribution - first job takes the extra host
|
||||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||||
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [2, 1, 1]
|
assert [len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts']) for i in range(3)] == [2, 1, 1]
|
||||||
jobs[0].inventory.update_computed_fields()
|
jobs[0].inventory.update_computed_fields()
|
||||||
|
# recalculate task_impact
|
||||||
|
jobs[0].task_impact = jobs[0]._get_task_impact()
|
||||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ from awx.main.models.workflow import (
|
|||||||
)
|
)
|
||||||
from awx.main.models.jobs import JobTemplate, Job
|
from awx.main.models.jobs import JobTemplate, Job
|
||||||
from awx.main.models.projects import ProjectUpdate
|
from awx.main.models.projects import ProjectUpdate
|
||||||
|
from awx.main.models.credential import Credential, CredentialType
|
||||||
|
from awx.main.models.label import Label
|
||||||
|
from awx.main.models.ha import InstanceGroup
|
||||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList
|
from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList
|
||||||
@@ -229,6 +232,65 @@ class TestWorkflowJob:
|
|||||||
assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43}
|
assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43}
|
||||||
assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43}
|
assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43}
|
||||||
|
|
||||||
|
def test_combine_prompts_WFJT_to_node(self, project, inventory, organization):
|
||||||
|
"""
|
||||||
|
Test that complex prompts like variables, credentials, labels, etc
|
||||||
|
are properly combined from the workflow-level with the node-level
|
||||||
|
"""
|
||||||
|
jt = JobTemplate.objects.create(
|
||||||
|
project=project,
|
||||||
|
inventory=inventory,
|
||||||
|
ask_variables_on_launch=True,
|
||||||
|
ask_credential_on_launch=True,
|
||||||
|
ask_instance_groups_on_launch=True,
|
||||||
|
ask_labels_on_launch=True,
|
||||||
|
ask_limit_on_launch=True,
|
||||||
|
)
|
||||||
|
wj = WorkflowJob.objects.create(name='test-wf-job', extra_vars='{}')
|
||||||
|
|
||||||
|
common_ig = InstanceGroup.objects.create(name='common')
|
||||||
|
common_ct = CredentialType.objects.create(name='common')
|
||||||
|
|
||||||
|
node = WorkflowJobNode.objects.create(workflow_job=wj, unified_job_template=jt, extra_vars={'node_key': 'node_val'})
|
||||||
|
node.limit = 'node_limit'
|
||||||
|
node.save()
|
||||||
|
node_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='node'))
|
||||||
|
node_cred_conflicting = Credential.objects.create(credential_type=common_ct)
|
||||||
|
node.credentials.add(node_cred_unique, node_cred_conflicting)
|
||||||
|
node_labels = [Label.objects.create(name='node1', organization=organization), Label.objects.create(name='node2', organization=organization)]
|
||||||
|
node.labels.add(*node_labels)
|
||||||
|
node_igs = [common_ig, InstanceGroup.objects.create(name='node')]
|
||||||
|
for ig in node_igs:
|
||||||
|
node.instance_groups.add(ig)
|
||||||
|
|
||||||
|
# assertions for where node has prompts but workflow job does not
|
||||||
|
data = node.get_job_kwargs()
|
||||||
|
assert data['extra_vars'] == {'node_key': 'node_val'}
|
||||||
|
assert set(data['credentials']) == set([node_cred_conflicting, node_cred_unique])
|
||||||
|
assert data['instance_groups'] == node_igs
|
||||||
|
assert set(data['labels']) == set(node_labels)
|
||||||
|
assert data['limit'] == 'node_limit'
|
||||||
|
|
||||||
|
# add prompts to the WorkflowJob
|
||||||
|
wj.limit = 'wj_limit'
|
||||||
|
wj.extra_vars = {'wj_key': 'wj_val'}
|
||||||
|
wj.save()
|
||||||
|
wj_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='wj'))
|
||||||
|
wj_cred_conflicting = Credential.objects.create(credential_type=common_ct)
|
||||||
|
wj.credentials.add(wj_cred_unique, wj_cred_conflicting)
|
||||||
|
wj.labels.add(Label.objects.create(name='wj1', organization=organization), Label.objects.create(name='wj2', organization=organization))
|
||||||
|
wj_igs = [InstanceGroup.objects.create(name='wj'), common_ig]
|
||||||
|
for ig in wj_igs:
|
||||||
|
wj.instance_groups.add(ig)
|
||||||
|
|
||||||
|
# assertions for behavior where node and workflow jobs have prompts
|
||||||
|
data = node.get_job_kwargs()
|
||||||
|
assert data['extra_vars'] == {'node_key': 'node_val', 'wj_key': 'wj_val'}
|
||||||
|
assert set(data['credentials']) == set([wj_cred_unique, wj_cred_conflicting, node_cred_unique])
|
||||||
|
assert data['instance_groups'] == wj_igs
|
||||||
|
assert set(data['labels']) == set(node_labels) # as exception, WFJT labels not applied
|
||||||
|
assert data['limit'] == 'wj_limit'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestWorkflowJobTemplate:
|
class TestWorkflowJobTemplate:
|
||||||
@@ -287,12 +349,25 @@ class TestWorkflowJobTemplatePrompts:
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def wfjt_prompts(self):
|
def wfjt_prompts(self):
|
||||||
return WorkflowJobTemplate.objects.create(
|
return WorkflowJobTemplate.objects.create(
|
||||||
ask_inventory_on_launch=True, ask_variables_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True
|
ask_variables_on_launch=True,
|
||||||
|
ask_inventory_on_launch=True,
|
||||||
|
ask_tags_on_launch=True,
|
||||||
|
ask_labels_on_launch=True,
|
||||||
|
ask_limit_on_launch=True,
|
||||||
|
ask_scm_branch_on_launch=True,
|
||||||
|
ask_skip_tags_on_launch=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def prompts_data(self, inventory):
|
def prompts_data(self, inventory):
|
||||||
return dict(inventory=inventory, extra_vars={'foo': 'bar'}, limit='webservers', scm_branch='release-3.3')
|
return dict(
|
||||||
|
inventory=inventory,
|
||||||
|
extra_vars={'foo': 'bar'},
|
||||||
|
limit='webservers',
|
||||||
|
scm_branch='release-3.3',
|
||||||
|
job_tags='foo',
|
||||||
|
skip_tags='bar',
|
||||||
|
)
|
||||||
|
|
||||||
def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory):
|
def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory):
|
||||||
# null or empty fields used
|
# null or empty fields used
|
||||||
@@ -300,6 +375,9 @@ class TestWorkflowJobTemplatePrompts:
|
|||||||
assert workflow_job.limit is None
|
assert workflow_job.limit is None
|
||||||
assert workflow_job.inventory is None
|
assert workflow_job.inventory is None
|
||||||
assert workflow_job.scm_branch is None
|
assert workflow_job.scm_branch is None
|
||||||
|
assert workflow_job.job_tags is None
|
||||||
|
assert workflow_job.skip_tags is None
|
||||||
|
assert len(workflow_job.labels.all()) is 0
|
||||||
|
|
||||||
# fields from prompts used
|
# fields from prompts used
|
||||||
workflow_job = workflow_job_template.create_unified_job(**prompts_data)
|
workflow_job = workflow_job_template.create_unified_job(**prompts_data)
|
||||||
@@ -307,15 +385,21 @@ class TestWorkflowJobTemplatePrompts:
|
|||||||
assert workflow_job.limit == 'webservers'
|
assert workflow_job.limit == 'webservers'
|
||||||
assert workflow_job.inventory == inventory
|
assert workflow_job.inventory == inventory
|
||||||
assert workflow_job.scm_branch == 'release-3.3'
|
assert workflow_job.scm_branch == 'release-3.3'
|
||||||
|
assert workflow_job.job_tags == 'foo'
|
||||||
|
assert workflow_job.skip_tags == 'bar'
|
||||||
|
|
||||||
# non-null fields from WFJT used
|
# non-null fields from WFJT used
|
||||||
workflow_job_template.inventory = inventory
|
workflow_job_template.inventory = inventory
|
||||||
workflow_job_template.limit = 'fooo'
|
workflow_job_template.limit = 'fooo'
|
||||||
workflow_job_template.scm_branch = 'bar'
|
workflow_job_template.scm_branch = 'bar'
|
||||||
|
workflow_job_template.job_tags = 'baz'
|
||||||
|
workflow_job_template.skip_tags = 'dinosaur'
|
||||||
workflow_job = workflow_job_template.create_unified_job()
|
workflow_job = workflow_job_template.create_unified_job()
|
||||||
assert workflow_job.limit == 'fooo'
|
assert workflow_job.limit == 'fooo'
|
||||||
assert workflow_job.inventory == inventory
|
assert workflow_job.inventory == inventory
|
||||||
assert workflow_job.scm_branch == 'bar'
|
assert workflow_job.scm_branch == 'bar'
|
||||||
|
assert workflow_job.job_tags == 'baz'
|
||||||
|
assert workflow_job.skip_tags == 'dinosaur'
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data):
|
def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data):
|
||||||
@@ -340,12 +424,19 @@ class TestWorkflowJobTemplatePrompts:
|
|||||||
ask_limit_on_launch=True,
|
ask_limit_on_launch=True,
|
||||||
scm_branch='bar',
|
scm_branch='bar',
|
||||||
ask_scm_branch_on_launch=True,
|
ask_scm_branch_on_launch=True,
|
||||||
|
job_tags='foo',
|
||||||
|
skip_tags='bar',
|
||||||
),
|
),
|
||||||
user=org_admin,
|
user=org_admin,
|
||||||
expect=201,
|
expect=201,
|
||||||
)
|
)
|
||||||
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
|
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
|
||||||
assert wfjt.char_prompts == {'limit': 'foooo', 'scm_branch': 'bar'}
|
assert wfjt.char_prompts == {
|
||||||
|
'limit': 'foooo',
|
||||||
|
'scm_branch': 'bar',
|
||||||
|
'job_tags': 'foo',
|
||||||
|
'skip_tags': 'bar',
|
||||||
|
}
|
||||||
assert wfjt.ask_scm_branch_on_launch is True
|
assert wfjt.ask_scm_branch_on_launch is True
|
||||||
assert wfjt.ask_limit_on_launch is True
|
assert wfjt.ask_limit_on_launch is True
|
||||||
|
|
||||||
@@ -355,6 +446,67 @@ class TestWorkflowJobTemplatePrompts:
|
|||||||
assert r.data['limit'] == 'prompt_limit'
|
assert r.data['limit'] == 'prompt_limit'
|
||||||
assert r.data['scm_branch'] == 'prompt_branch'
|
assert r.data['scm_branch'] == 'prompt_branch'
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin):
|
||||||
|
'''
|
||||||
|
Tests default behaviour and values of ask_for_* fields on WFJT via POST
|
||||||
|
'''
|
||||||
|
r = post(
|
||||||
|
url=reverse('api:workflow_job_template_list'),
|
||||||
|
data=dict(
|
||||||
|
name='workflow that tests ask_for prompts',
|
||||||
|
organization=organization.id,
|
||||||
|
inventory=inventory.id,
|
||||||
|
job_tags='',
|
||||||
|
skip_tags='',
|
||||||
|
),
|
||||||
|
user=org_admin,
|
||||||
|
expect=201,
|
||||||
|
)
|
||||||
|
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
|
||||||
|
|
||||||
|
assert wfjt.ask_inventory_on_launch is False
|
||||||
|
assert wfjt.ask_labels_on_launch is False
|
||||||
|
assert wfjt.ask_limit_on_launch is False
|
||||||
|
assert wfjt.ask_scm_branch_on_launch is False
|
||||||
|
assert wfjt.ask_skip_tags_on_launch is False
|
||||||
|
assert wfjt.ask_tags_on_launch is False
|
||||||
|
assert wfjt.ask_variables_on_launch is False
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
|
||||||
|
'''
|
||||||
|
Tests behaviour and values of ask_for_* fields on WFJT via POST
|
||||||
|
'''
|
||||||
|
r = post(
|
||||||
|
url=reverse('api:workflow_job_template_list'),
|
||||||
|
data=dict(
|
||||||
|
name='workflow that tests ask_for prompts',
|
||||||
|
organization=organization.id,
|
||||||
|
inventory=inventory.id,
|
||||||
|
job_tags='',
|
||||||
|
skip_tags='',
|
||||||
|
ask_inventory_on_launch=True,
|
||||||
|
ask_labels_on_launch=True,
|
||||||
|
ask_limit_on_launch=True,
|
||||||
|
ask_scm_branch_on_launch=True,
|
||||||
|
ask_skip_tags_on_launch=True,
|
||||||
|
ask_tags_on_launch=True,
|
||||||
|
ask_variables_on_launch=True,
|
||||||
|
),
|
||||||
|
user=org_admin,
|
||||||
|
expect=201,
|
||||||
|
)
|
||||||
|
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
|
||||||
|
|
||||||
|
assert wfjt.ask_inventory_on_launch is True
|
||||||
|
assert wfjt.ask_labels_on_launch is True
|
||||||
|
assert wfjt.ask_limit_on_launch is True
|
||||||
|
assert wfjt.ask_scm_branch_on_launch is True
|
||||||
|
assert wfjt.ask_skip_tags_on_launch is True
|
||||||
|
assert wfjt.ask_tags_on_launch is True
|
||||||
|
assert wfjt.ask_variables_on_launch is True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_workflow_ancestors(organization):
|
def test_workflow_ancestors(organization):
|
||||||
|
|||||||
6
awx/main/tests/functional/task_management/__init__.py
Normal file
6
awx/main/tests/functional/task_management/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
def create_job(jt, dependencies_processed=True):
|
||||||
|
job = jt.create_unified_job()
|
||||||
|
job.status = "pending"
|
||||||
|
job.dependencies_processed = dependencies_processed
|
||||||
|
job.save()
|
||||||
|
return job
|
||||||
@@ -1,9 +1,10 @@
|
|||||||
import pytest
|
import pytest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from awx.main.scheduler import TaskManager
|
from awx.main.scheduler import TaskManager, DependencyManager
|
||||||
from awx.main.models import InstanceGroup, WorkflowJob
|
from awx.main.models import InstanceGroup
|
||||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||||
|
from . import create_job
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -12,16 +13,12 @@ def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_gr
|
|||||||
i2 = instance_factory("i2")
|
i2 = instance_factory("i2")
|
||||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||||
objects1.job_template.instance_groups.add(ig1)
|
objects1.job_template.instance_groups.add(ig1)
|
||||||
j1 = objects1.jobs['job_should_start']
|
j1 = create_job(objects1.job_template)
|
||||||
j1.status = 'pending'
|
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2')
|
||||||
j1.save()
|
|
||||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_still_start"])
|
|
||||||
objects2.job_template.instance_groups.add(ig2)
|
objects2.job_template.instance_groups.add(ig2)
|
||||||
j2 = objects2.jobs['job_should_still_start']
|
j2 = create_job(objects2.job_template)
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
|
||||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||||
mock_task_impact.return_value = 500
|
mock_task_impact.return_value = 500
|
||||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
@@ -35,23 +32,26 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
|||||||
i2 = instance_factory("i2")
|
i2 = instance_factory("i2")
|
||||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
objects1 = job_template_factory(
|
||||||
|
'jt1',
|
||||||
|
organization='org1',
|
||||||
|
project='proj1',
|
||||||
|
inventory='inv1',
|
||||||
|
credential='cred1',
|
||||||
|
)
|
||||||
objects1.job_template.instance_groups.add(ig1)
|
objects1.job_template.instance_groups.add(ig1)
|
||||||
|
j1 = create_job(objects1.job_template, dependencies_processed=False)
|
||||||
p = objects1.project
|
p = objects1.project
|
||||||
p.scm_update_on_launch = True
|
p.scm_update_on_launch = True
|
||||||
p.scm_update_cache_timeout = 0
|
p.scm_update_cache_timeout = 0
|
||||||
p.scm_type = "git"
|
p.scm_type = "git"
|
||||||
p.scm_url = "http://github.com/ansible/ansible.git"
|
p.scm_url = "http://github.com/ansible/ansible.git"
|
||||||
p.save()
|
p.save()
|
||||||
j1 = objects1.jobs['job_should_start']
|
objects2 = job_template_factory('jt2', organization=objects1.organization, project=p, inventory='inv2', credential='cred2')
|
||||||
j1.status = 'pending'
|
|
||||||
j1.save()
|
|
||||||
objects2 = job_template_factory('jt2', organization=objects1.organization, project=p, inventory='inv2', credential='cred2', jobs=["job_should_still_start"])
|
|
||||||
objects2.job_template.instance_groups.add(ig2)
|
objects2.job_template.instance_groups.add(ig2)
|
||||||
j2 = objects2.jobs['job_should_still_start']
|
j2 = create_job(objects2.job_template, dependencies_processed=False)
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
|
||||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
|
DependencyManager().schedule()
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
pu = p.project_updates.first()
|
pu = p.project_updates.first()
|
||||||
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
|
TaskManager.start_task.assert_called_once_with(pu, controlplane_instance_group, [j1, j2], controlplane_instance_group.instances.all()[0])
|
||||||
@@ -59,6 +59,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
|||||||
pu.status = "successful"
|
pu.status = "successful"
|
||||||
pu.save()
|
pu.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
|
DependencyManager().schedule()
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
|
|
||||||
TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
|
TaskManager.start_task.assert_any_call(j1, ig1, [], i1)
|
||||||
@@ -69,7 +70,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
|||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlplane_instance_group, mocker):
|
def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlplane_instance_group, mocker):
|
||||||
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
||||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
|
wfj = wfjt.create_unified_job()
|
||||||
wfj.status = "pending"
|
wfj.status = "pending"
|
||||||
wfj.save()
|
wfj.save()
|
||||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
@@ -85,39 +86,50 @@ def test_overcapacity_blocking_other_groups_unaffected(instance_factory, control
|
|||||||
i1.capacity = 1020
|
i1.capacity = 1020
|
||||||
i1.save()
|
i1.save()
|
||||||
i2 = instance_factory("i2")
|
i2 = instance_factory("i2")
|
||||||
|
i2.capacity = 1020
|
||||||
|
i2.save()
|
||||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||||
objects1.job_template.instance_groups.add(ig1)
|
objects1.job_template.instance_groups.add(ig1)
|
||||||
j1 = objects1.jobs['job_should_start']
|
j1 = create_job(objects1.job_template)
|
||||||
j1.status = 'pending'
|
objects2 = job_template_factory('jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2')
|
||||||
j1.save()
|
|
||||||
objects2 = job_template_factory(
|
|
||||||
'jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_start", "job_should_also_start"]
|
|
||||||
)
|
|
||||||
objects2.job_template.instance_groups.add(ig1)
|
objects2.job_template.instance_groups.add(ig1)
|
||||||
j1_1 = objects2.jobs['job_should_also_start']
|
j1_1 = create_job(objects2.job_template)
|
||||||
j1_1.status = 'pending'
|
objects3 = job_template_factory('jt3', organization='org2', project='proj3', inventory='inv3', credential='cred3')
|
||||||
j1_1.save()
|
|
||||||
objects3 = job_template_factory('jt3', organization='org2', project='proj3', inventory='inv3', credential='cred3', jobs=["job_should_still_start"])
|
|
||||||
objects3.job_template.instance_groups.add(ig2)
|
objects3.job_template.instance_groups.add(ig2)
|
||||||
j2 = objects3.jobs['job_should_still_start']
|
j2 = create_job(objects3.job_template)
|
||||||
j2.status = 'pending'
|
objects4 = job_template_factory('jt4', organization=objects3.organization, project='proj4', inventory='inv4', credential='cred4')
|
||||||
j2.save()
|
|
||||||
objects4 = job_template_factory(
|
|
||||||
'jt4', organization=objects3.organization, project='proj4', inventory='inv4', credential='cred4', jobs=["job_should_not_start"]
|
|
||||||
)
|
|
||||||
objects4.job_template.instance_groups.add(ig2)
|
objects4.job_template.instance_groups.add(ig2)
|
||||||
j2_1 = objects4.jobs['job_should_not_start']
|
j2_1 = create_job(objects4.job_template)
|
||||||
j2_1.status = 'pending'
|
|
||||||
j2_1.save()
|
|
||||||
tm = TaskManager()
|
|
||||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||||
mock_task_impact.return_value = 500
|
mock_task_impact.return_value = 500
|
||||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
TaskManager().schedule()
|
||||||
tm.schedule()
|
|
||||||
mock_job.assert_has_calls([mock.call(j1, ig1, [], i1), mock.call(j1_1, ig1, [], i1), mock.call(j2, ig2, [], i2)])
|
# all jobs should be able to run, plenty of capacity across both instances
|
||||||
assert mock_job.call_count == 3
|
for j in [j1, j1_1, j2, j2_1]:
|
||||||
|
j.refresh_from_db()
|
||||||
|
assert j.status == "waiting"
|
||||||
|
|
||||||
|
# reset to pending
|
||||||
|
for j in [j1, j1_1, j2, j2_1]:
|
||||||
|
j.status = "pending"
|
||||||
|
j.save()
|
||||||
|
|
||||||
|
# make i2 can only be able to fit 1 job
|
||||||
|
i2.capacity = 510
|
||||||
|
i2.save()
|
||||||
|
|
||||||
|
TaskManager().schedule()
|
||||||
|
|
||||||
|
for j in [j1, j1_1, j2]:
|
||||||
|
j.refresh_from_db()
|
||||||
|
assert j.status == "waiting"
|
||||||
|
|
||||||
|
j2_1.refresh_from_db()
|
||||||
|
# could not run because i2 is full
|
||||||
|
assert j2_1.status == "pending"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -126,19 +138,13 @@ def test_failover_group_run(instance_factory, controlplane_instance_group, mocke
|
|||||||
i2 = instance_factory("i2")
|
i2 = instance_factory("i2")
|
||||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||||
ig2 = instance_group_factory("ig2", instances=[i2])
|
ig2 = instance_group_factory("ig2", instances=[i2])
|
||||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||||
objects1.job_template.instance_groups.add(ig1)
|
objects1.job_template.instance_groups.add(ig1)
|
||||||
j1 = objects1.jobs['job_should_start']
|
j1 = create_job(objects1.job_template)
|
||||||
j1.status = 'pending'
|
objects2 = job_template_factory('jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2')
|
||||||
j1.save()
|
|
||||||
objects2 = job_template_factory(
|
|
||||||
'jt2', organization=objects1.organization, project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_start", "job_should_also_start"]
|
|
||||||
)
|
|
||||||
objects2.job_template.instance_groups.add(ig1)
|
objects2.job_template.instance_groups.add(ig1)
|
||||||
objects2.job_template.instance_groups.add(ig2)
|
objects2.job_template.instance_groups.add(ig2)
|
||||||
j1_1 = objects2.jobs['job_should_also_start']
|
j1_1 = create_job(objects2.job_template)
|
||||||
j1_1.status = 'pending'
|
|
||||||
j1_1.save()
|
|
||||||
tm = TaskManager()
|
tm = TaskManager()
|
||||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||||
mock_task_impact.return_value = 500
|
mock_task_impact.return_value = 500
|
||||||
|
|||||||
@@ -3,21 +3,19 @@ from unittest import mock
|
|||||||
import json
|
import json
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
from awx.main.scheduler import TaskManager
|
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
|
||||||
from awx.main.utils import encrypt_field
|
from awx.main.utils import encrypt_field
|
||||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
|
from . import create_job
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_group, job_template_factory, mocker):
|
def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_group, job_template_factory, mocker):
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
j = objects.jobs["job_should_start"]
|
j = create_job(objects.job_template)
|
||||||
j.status = 'pending'
|
|
||||||
j.save()
|
|
||||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||||
@@ -32,10 +30,8 @@ class TestJobLifeCycle:
|
|||||||
expect_commit - list of expected on_commit calls
|
expect_commit - list of expected on_commit calls
|
||||||
If any of these are None, then the assertion is not made.
|
If any of these are None, then the assertion is not made.
|
||||||
"""
|
"""
|
||||||
if expect_schedule and len(expect_schedule) > 1:
|
|
||||||
raise RuntimeError('Task manager should reschedule itself one time, at most.')
|
|
||||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJob.websocket_emit_status') as mock_channel:
|
with mock.patch('awx.main.models.unified_jobs.UnifiedJob.websocket_emit_status') as mock_channel:
|
||||||
with mock.patch('awx.main.utils.common._schedule_task_manager') as tm_sch:
|
with mock.patch('awx.main.utils.common.ScheduleManager._schedule') as tm_sch:
|
||||||
# Job are ultimately submitted in on_commit hook, but this will not
|
# Job are ultimately submitted in on_commit hook, but this will not
|
||||||
# actually run, because it waits until outer transaction, which is the test
|
# actually run, because it waits until outer transaction, which is the test
|
||||||
# itself in this case
|
# itself in this case
|
||||||
@@ -56,22 +52,21 @@ class TestJobLifeCycle:
|
|||||||
wj = wfjt.create_unified_job()
|
wj = wfjt.create_unified_job()
|
||||||
assert wj.workflow_nodes.count() == 2
|
assert wj.workflow_nodes.count() == 2
|
||||||
wj.signal_start()
|
wj.signal_start()
|
||||||
tm = TaskManager()
|
|
||||||
|
|
||||||
# Transitions workflow job to running
|
# Transitions workflow job to running
|
||||||
# needs to re-schedule so it spawns jobs next round
|
# needs to re-schedule so it spawns jobs next round
|
||||||
self.run_tm(tm, [mock.call('running')], [mock.call()])
|
self.run_tm(TaskManager(), [mock.call('running')])
|
||||||
|
|
||||||
# Spawns jobs
|
# Spawns jobs
|
||||||
# needs re-schedule to submit jobs next round
|
# needs re-schedule to submit jobs next round
|
||||||
self.run_tm(tm, [mock.call('pending'), mock.call('pending')], [mock.call()])
|
self.run_tm(WorkflowManager(), [mock.call('pending'), mock.call('pending')])
|
||||||
|
|
||||||
assert jt.jobs.count() == 2 # task manager spawned jobs
|
assert jt.jobs.count() == 2 # task manager spawned jobs
|
||||||
|
|
||||||
# Submits jobs
|
# Submits jobs
|
||||||
# intermission - jobs will run and reschedule TM when finished
|
# intermission - jobs will run and reschedule TM when finished
|
||||||
self.run_tm(tm, [mock.call('waiting'), mock.call('waiting')], [])
|
self.run_tm(DependencyManager()) # flip dependencies_processed to True
|
||||||
|
self.run_tm(TaskManager())
|
||||||
# I am the job runner
|
# I am the job runner
|
||||||
for job in jt.jobs.all():
|
for job in jt.jobs.all():
|
||||||
job.status = 'successful'
|
job.status = 'successful'
|
||||||
@@ -79,7 +74,7 @@ class TestJobLifeCycle:
|
|||||||
|
|
||||||
# Finishes workflow
|
# Finishes workflow
|
||||||
# no further action is necessary, so rescheduling should not happen
|
# no further action is necessary, so rescheduling should not happen
|
||||||
self.run_tm(tm, [mock.call('successful')], [])
|
self.run_tm(WorkflowManager(), [mock.call('successful')])
|
||||||
|
|
||||||
def test_task_manager_workflow_workflow_rescheduling(self, controlplane_instance_group):
|
def test_task_manager_workflow_workflow_rescheduling(self, controlplane_instance_group):
|
||||||
wfjts = [WorkflowJobTemplate.objects.create(name='foo')]
|
wfjts = [WorkflowJobTemplate.objects.create(name='foo')]
|
||||||
@@ -90,16 +85,13 @@ class TestJobLifeCycle:
|
|||||||
|
|
||||||
wj = wfjts[0].create_unified_job()
|
wj = wfjts[0].create_unified_job()
|
||||||
wj.signal_start()
|
wj.signal_start()
|
||||||
tm = TaskManager()
|
|
||||||
|
|
||||||
while wfjts[0].status != 'successful':
|
attempts = 10
|
||||||
wfjts[1].refresh_from_db()
|
while wfjts[0].status != 'successful' and attempts > 0:
|
||||||
if wfjts[1].status == 'successful':
|
self.run_tm(TaskManager())
|
||||||
# final run, no more work to do
|
self.run_tm(WorkflowManager())
|
||||||
self.run_tm(tm, expect_schedule=[])
|
|
||||||
else:
|
|
||||||
self.run_tm(tm, expect_schedule=[mock.call()])
|
|
||||||
wfjts[0].refresh_from_db()
|
wfjts[0].refresh_from_db()
|
||||||
|
attempts -= 1
|
||||||
|
|
||||||
def test_control_and_execution_instance(self, project, system_job_template, job_template, inventory_source, control_instance, execution_instance):
|
def test_control_and_execution_instance(self, project, system_job_template, job_template, inventory_source, control_instance, execution_instance):
|
||||||
assert Instance.objects.count() == 2
|
assert Instance.objects.count() == 2
|
||||||
@@ -113,6 +105,7 @@ class TestJobLifeCycle:
|
|||||||
for uj in all_ujs:
|
for uj in all_ujs:
|
||||||
uj.signal_start()
|
uj.signal_start()
|
||||||
|
|
||||||
|
DependencyManager().schedule()
|
||||||
tm = TaskManager()
|
tm = TaskManager()
|
||||||
self.run_tm(tm)
|
self.run_tm(tm)
|
||||||
|
|
||||||
@@ -135,6 +128,7 @@ class TestJobLifeCycle:
|
|||||||
for uj in all_ujs:
|
for uj in all_ujs:
|
||||||
uj.signal_start()
|
uj.signal_start()
|
||||||
|
|
||||||
|
DependencyManager().schedule()
|
||||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||||
tm = TaskManager()
|
tm = TaskManager()
|
||||||
self.run_tm(tm)
|
self.run_tm(tm)
|
||||||
@@ -157,6 +151,7 @@ class TestJobLifeCycle:
|
|||||||
for uj in all_ujs:
|
for uj in all_ujs:
|
||||||
uj.signal_start()
|
uj.signal_start()
|
||||||
|
|
||||||
|
DependencyManager().schedule()
|
||||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||||
tm = TaskManager()
|
tm = TaskManager()
|
||||||
self.run_tm(tm)
|
self.run_tm(tm)
|
||||||
@@ -197,63 +192,49 @@ class TestJobLifeCycle:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_single_jt_multi_job_launch_blocks_last(controlplane_instance_group, job_template_factory, mocker):
|
def test_single_jt_multi_job_launch_blocks_last(job_template_factory):
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
objects = job_template_factory(
|
j1 = create_job(objects.job_template)
|
||||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
j2 = create_job(objects.job_template)
|
||||||
)
|
|
||||||
j1 = objects.jobs["job_should_start"]
|
TaskManager().schedule()
|
||||||
j1.status = 'pending'
|
j1.refresh_from_db()
|
||||||
|
j2.refresh_from_db()
|
||||||
|
assert j1.status == "waiting"
|
||||||
|
assert j2.status == "pending"
|
||||||
|
|
||||||
|
# mimic running j1 to unblock j2
|
||||||
|
j1.status = "successful"
|
||||||
j1.save()
|
j1.save()
|
||||||
j2 = objects.jobs["job_should_not_start"]
|
TaskManager().schedule()
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
j2.refresh_from_db()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
assert j2.status == "waiting"
|
||||||
TaskManager().schedule()
|
|
||||||
TaskManager.start_task.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
|
||||||
j1.status = "successful"
|
|
||||||
j1.save()
|
|
||||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
|
||||||
TaskManager().schedule()
|
|
||||||
TaskManager.start_task.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_single_jt_multi_job_launch_allow_simul_allowed(controlplane_instance_group, job_template_factory, mocker):
|
def test_single_jt_multi_job_launch_allow_simul_allowed(job_template_factory):
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
objects = job_template_factory(
|
|
||||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
|
||||||
)
|
|
||||||
jt = objects.job_template
|
jt = objects.job_template
|
||||||
|
jt.allow_simultaneous = True
|
||||||
jt.save()
|
jt.save()
|
||||||
|
j1 = create_job(objects.job_template)
|
||||||
j1 = objects.jobs["job_should_start"]
|
j2 = create_job(objects.job_template)
|
||||||
j1.allow_simultaneous = True
|
TaskManager().schedule()
|
||||||
j1.status = 'pending'
|
j1.refresh_from_db()
|
||||||
j1.save()
|
j2.refresh_from_db()
|
||||||
j2 = objects.jobs["job_should_not_start"]
|
assert j1.status == "waiting"
|
||||||
j2.allow_simultaneous = True
|
assert j2.status == "waiting"
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
|
||||||
TaskManager().schedule()
|
|
||||||
TaskManager.start_task.assert_has_calls(
|
|
||||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||||
instance = hybrid_instance
|
instance = hybrid_instance
|
||||||
controlplane_instance_group = instance.rampart_groups.first()
|
controlplane_instance_group = instance.rampart_groups.first()
|
||||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1')
|
||||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_not_start"])
|
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2')
|
||||||
j1 = objects1.jobs["job_should_start"]
|
j1 = create_job(objects1.job_template)
|
||||||
j1.status = 'pending'
|
j2 = create_job(objects2.job_template)
|
||||||
j1.save()
|
|
||||||
j2 = objects2.jobs["job_should_not_start"]
|
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
|
||||||
tm = TaskManager()
|
tm = TaskManager()
|
||||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||||
mock_task_impact.return_value = 505
|
mock_task_impact.return_value = 505
|
||||||
@@ -269,11 +250,9 @@ def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocke
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_single_job_dependencies_project_launch(controlplane_instance_group, job_template_factory, mocker):
|
def test_single_job_dependencies_project_launch(controlplane_instance_group, job_template_factory, mocker):
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
j = objects.jobs["job_should_start"]
|
j = create_job(objects.job_template, dependencies_processed=False)
|
||||||
j.status = 'pending'
|
|
||||||
j.save()
|
|
||||||
p = objects.project
|
p = objects.project
|
||||||
p.scm_update_on_launch = True
|
p.scm_update_on_launch = True
|
||||||
p.scm_update_cache_timeout = 0
|
p.scm_update_cache_timeout = 0
|
||||||
@@ -281,12 +260,13 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
|||||||
p.scm_url = "http://github.com/ansible/ansible.git"
|
p.scm_url = "http://github.com/ansible/ansible.git"
|
||||||
p.save(skip_update=True)
|
p.save(skip_update=True)
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
tm = TaskManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu:
|
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||||
tm.schedule()
|
dm.schedule()
|
||||||
mock_pu.assert_called_once_with(j)
|
mock_pu.assert_called_once_with(j)
|
||||||
pu = [x for x in p.project_updates.all()]
|
pu = [x for x in p.project_updates.all()]
|
||||||
assert len(pu) == 1
|
assert len(pu) == 1
|
||||||
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
|
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, [j], instance)
|
||||||
pu[0].status = "successful"
|
pu[0].status = "successful"
|
||||||
pu[0].save()
|
pu[0].save()
|
||||||
@@ -297,11 +277,9 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_single_job_dependencies_inventory_update_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
def test_single_job_dependencies_inventory_update_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
j = objects.jobs["job_should_start"]
|
j = create_job(objects.job_template, dependencies_processed=False)
|
||||||
j.status = 'pending'
|
|
||||||
j.save()
|
|
||||||
i = objects.inventory
|
i = objects.inventory
|
||||||
ii = inventory_source_factory("ec2")
|
ii = inventory_source_factory("ec2")
|
||||||
ii.source = "ec2"
|
ii.source = "ec2"
|
||||||
@@ -310,12 +288,13 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
|||||||
ii.save()
|
ii.save()
|
||||||
i.inventory_sources.add(ii)
|
i.inventory_sources.add(ii)
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
tm = TaskManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu:
|
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||||
tm.schedule()
|
dm.schedule()
|
||||||
mock_iu.assert_called_once_with(j, ii)
|
mock_iu.assert_called_once_with(j, ii)
|
||||||
iu = [x for x in ii.inventory_updates.all()]
|
iu = [x for x in ii.inventory_updates.all()]
|
||||||
assert len(iu) == 1
|
assert len(iu) == 1
|
||||||
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
||||||
iu[0].status = "successful"
|
iu[0].status = "successful"
|
||||||
iu[0].save()
|
iu[0].save()
|
||||||
@@ -334,19 +313,17 @@ def test_inventory_update_launches_project_update(controlplane_instance_group, s
|
|||||||
iu.status = "pending"
|
iu.status = "pending"
|
||||||
iu.save()
|
iu.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
tm = TaskManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu:
|
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
||||||
tm.schedule()
|
dm.schedule()
|
||||||
mock_pu.assert_called_with(iu, project_id=project.id)
|
mock_pu.assert_called_with(iu, project_id=project.id)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_job_dependency_with_already_updated(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
def test_job_dependency_with_already_updated(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
j = objects.jobs["job_should_start"]
|
j = create_job(objects.job_template, dependencies_processed=False)
|
||||||
j.status = 'pending'
|
|
||||||
j.save()
|
|
||||||
i = objects.inventory
|
i = objects.inventory
|
||||||
ii = inventory_source_factory("ec2")
|
ii = inventory_source_factory("ec2")
|
||||||
ii.source = "ec2"
|
ii.source = "ec2"
|
||||||
@@ -359,9 +336,9 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
|||||||
j.start_args = encrypt_field(j, field_name="start_args")
|
j.start_args = encrypt_field(j, field_name="start_args")
|
||||||
j.save()
|
j.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
tm = TaskManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu:
|
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
||||||
tm.schedule()
|
dm.schedule()
|
||||||
mock_iu.assert_not_called()
|
mock_iu.assert_not_called()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
@@ -371,13 +348,11 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
|||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_shared_dependencies_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
def test_shared_dependencies_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||||
instance = controlplane_instance_group.instances.all()[0]
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["first_job", "second_job"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
j1 = objects.jobs["first_job"]
|
objects.job_template.allow_simultaneous = True
|
||||||
j1.status = 'pending'
|
objects.job_template.save()
|
||||||
j1.save()
|
j1 = create_job(objects.job_template, dependencies_processed=False)
|
||||||
j2 = objects.jobs["second_job"]
|
j2 = create_job(objects.job_template, dependencies_processed=False)
|
||||||
j2.status = 'pending'
|
|
||||||
j2.save()
|
|
||||||
p = objects.project
|
p = objects.project
|
||||||
p.scm_update_on_launch = True
|
p.scm_update_on_launch = True
|
||||||
p.scm_update_cache_timeout = 300
|
p.scm_update_cache_timeout = 300
|
||||||
@@ -392,8 +367,8 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
|||||||
ii.update_cache_timeout = 300
|
ii.update_cache_timeout = 300
|
||||||
ii.save()
|
ii.save()
|
||||||
i.inventory_sources.add(ii)
|
i.inventory_sources.add(ii)
|
||||||
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
|
DependencyManager().schedule()
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
pu = p.project_updates.first()
|
pu = p.project_updates.first()
|
||||||
iu = ii.inventory_updates.first()
|
iu = ii.inventory_updates.first()
|
||||||
@@ -408,12 +383,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
|||||||
iu.save()
|
iu.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
TaskManager.start_task.assert_has_calls(
|
||||||
j1.status = "successful"
|
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||||
j1.save()
|
)
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
|
||||||
TaskManager().schedule()
|
|
||||||
TaskManager.start_task.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
|
||||||
pu = [x for x in p.project_updates.all()]
|
pu = [x for x in p.project_updates.all()]
|
||||||
iu = [x for x in ii.inventory_updates.all()]
|
iu = [x for x in ii.inventory_updates.all()]
|
||||||
assert len(pu) == 1
|
assert len(pu) == 1
|
||||||
@@ -422,30 +394,27 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_job_not_blocking_project_update(controlplane_instance_group, job_template_factory):
|
def test_job_not_blocking_project_update(controlplane_instance_group, job_template_factory):
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
job = objects.jobs["job"]
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred')
|
||||||
|
job = objects.job_template.create_unified_job()
|
||||||
job.instance_group = controlplane_instance_group
|
job.instance_group = controlplane_instance_group
|
||||||
|
job.dependencies_process = True
|
||||||
job.status = "running"
|
job.status = "running"
|
||||||
job.save()
|
job.save()
|
||||||
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
task_manager = TaskManager()
|
|
||||||
task_manager._schedule()
|
|
||||||
|
|
||||||
proj = objects.project
|
proj = objects.project
|
||||||
project_update = proj.create_project_update()
|
project_update = proj.create_project_update()
|
||||||
project_update.instance_group = controlplane_instance_group
|
project_update.instance_group = controlplane_instance_group
|
||||||
project_update.status = "pending"
|
project_update.status = "pending"
|
||||||
project_update.save()
|
project_update.save()
|
||||||
assert not task_manager.job_blocked_by(project_update)
|
TaskManager().schedule()
|
||||||
|
TaskManager.start_task.assert_called_once_with(project_update, controlplane_instance_group, [], instance)
|
||||||
dependency_graph = DependencyGraph()
|
|
||||||
dependency_graph.add_job(job)
|
|
||||||
assert not dependency_graph.task_blocked_by(project_update)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_job_not_blocking_inventory_update(controlplane_instance_group, job_template_factory, inventory_source_factory):
|
def test_job_not_blocking_inventory_update(controlplane_instance_group, job_template_factory, inventory_source_factory):
|
||||||
|
instance = controlplane_instance_group.instances.all()[0]
|
||||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
||||||
job = objects.jobs["job"]
|
job = objects.jobs["job"]
|
||||||
job.instance_group = controlplane_instance_group
|
job.instance_group = controlplane_instance_group
|
||||||
@@ -453,9 +422,6 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
|||||||
job.save()
|
job.save()
|
||||||
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
task_manager = TaskManager()
|
|
||||||
task_manager._schedule()
|
|
||||||
|
|
||||||
inv = objects.inventory
|
inv = objects.inventory
|
||||||
inv_source = inventory_source_factory("ec2")
|
inv_source = inventory_source_factory("ec2")
|
||||||
inv_source.source = "ec2"
|
inv_source.source = "ec2"
|
||||||
@@ -465,11 +431,9 @@ def test_job_not_blocking_inventory_update(controlplane_instance_group, job_temp
|
|||||||
inventory_update.status = "pending"
|
inventory_update.status = "pending"
|
||||||
inventory_update.save()
|
inventory_update.save()
|
||||||
|
|
||||||
assert not task_manager.job_blocked_by(inventory_update)
|
DependencyManager().schedule()
|
||||||
|
TaskManager().schedule()
|
||||||
dependency_graph = DependencyGraph()
|
TaskManager.start_task.assert_called_once_with(inventory_update, controlplane_instance_group, [], instance)
|
||||||
dependency_graph.add_job(job)
|
|
||||||
assert not dependency_graph.task_blocked_by(inventory_update)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -484,7 +448,7 @@ def test_generate_dependencies_only_once(job_template_factory):
|
|||||||
# job starts with dependencies_processed as False
|
# job starts with dependencies_processed as False
|
||||||
assert not job.dependencies_processed
|
assert not job.dependencies_processed
|
||||||
# run one cycle of ._schedule() to generate dependencies
|
# run one cycle of ._schedule() to generate dependencies
|
||||||
TaskManager()._schedule()
|
DependencyManager().schedule()
|
||||||
|
|
||||||
# make sure dependencies_processed is now True
|
# make sure dependencies_processed is now True
|
||||||
job = Job.objects.filter(name="job_gen_dep")[0]
|
job = Job.objects.filter(name="job_gen_dep")[0]
|
||||||
@@ -492,7 +456,7 @@ def test_generate_dependencies_only_once(job_template_factory):
|
|||||||
|
|
||||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||||
# called with job in the argument list
|
# called with job in the argument list
|
||||||
tm = TaskManager()
|
dm = DependencyManager()
|
||||||
tm.generate_dependencies = mock.MagicMock(return_value=[])
|
dm.generate_dependencies = mock.MagicMock(return_value=[])
|
||||||
tm._schedule()
|
dm.schedule()
|
||||||
tm.generate_dependencies.assert_has_calls([mock.call([]), mock.call([])])
|
dm.generate_dependencies.assert_not_called()
|
||||||
|
|||||||
@@ -6,12 +6,20 @@ from awx.main.utils import decrypt_field
|
|||||||
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||||
from awx.main.models.jobs import JobTemplate
|
from awx.main.models.jobs import JobTemplate
|
||||||
from awx.main.tasks.system import deep_copy_model_obj
|
from awx.main.tasks.system import deep_copy_model_obj
|
||||||
|
from awx.main.models import Label, ExecutionEnvironment, InstanceGroup
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_job_template_copy(post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin):
|
def test_job_template_copy(
|
||||||
|
post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin, organization
|
||||||
|
):
|
||||||
|
label = Label.objects.create(name="foobar", organization=organization)
|
||||||
|
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
|
||||||
job_template_with_survey_passwords.project = project
|
job_template_with_survey_passwords.project = project
|
||||||
job_template_with_survey_passwords.inventory = inventory
|
job_template_with_survey_passwords.inventory = inventory
|
||||||
|
job_template_with_survey_passwords.labels.add(label)
|
||||||
|
job_template_with_survey_passwords.instance_groups.add(ig)
|
||||||
|
job_template_with_survey_passwords.prevent_instance_group_fallback = True
|
||||||
job_template_with_survey_passwords.save()
|
job_template_with_survey_passwords.save()
|
||||||
job_template_with_survey_passwords.credentials.add(credential)
|
job_template_with_survey_passwords.credentials.add(credential)
|
||||||
job_template_with_survey_passwords.credentials.add(machine_credential)
|
job_template_with_survey_passwords.credentials.add(machine_credential)
|
||||||
@@ -54,6 +62,11 @@ def test_job_template_copy(post, get, project, inventory, machine_credential, va
|
|||||||
assert vault_credential in jt_copy.credentials.all()
|
assert vault_credential in jt_copy.credentials.all()
|
||||||
assert machine_credential in jt_copy.credentials.all()
|
assert machine_credential in jt_copy.credentials.all()
|
||||||
assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec
|
assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec
|
||||||
|
assert jt_copy.labels.count() != 0
|
||||||
|
assert jt_copy.labels.get(pk=label.pk) == label
|
||||||
|
assert jt_copy.instance_groups.count() != 0
|
||||||
|
assert jt_copy.instance_groups.get(pk=ig.pk) == ig
|
||||||
|
assert jt_copy.prevent_instance_group_fallback == True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -84,6 +97,8 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
|
|||||||
host = group_1_1.hosts.create(name='host', inventory=inventory)
|
host = group_1_1.hosts.create(name='host', inventory=inventory)
|
||||||
group_2_1.hosts.add(host)
|
group_2_1.hosts.add(host)
|
||||||
inventory.admin_role.members.add(alice)
|
inventory.admin_role.members.add(alice)
|
||||||
|
inventory.prevent_instance_group_fallback = True
|
||||||
|
inventory.save()
|
||||||
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is False
|
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is False
|
||||||
inventory.organization.admin_role.members.add(alice)
|
inventory.organization.admin_role.members.add(alice)
|
||||||
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is True
|
assert get(reverse('api:inventory_copy', kwargs={'pk': inventory.pk}), alice, expect=200).data['can_copy'] is True
|
||||||
@@ -99,6 +114,7 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
|
|||||||
assert inventory_copy.organization == organization
|
assert inventory_copy.organization == organization
|
||||||
assert inventory_copy.created_by == alice
|
assert inventory_copy.created_by == alice
|
||||||
assert inventory_copy.name == 'new inv name'
|
assert inventory_copy.name == 'new inv name'
|
||||||
|
assert inventory_copy.prevent_instance_group_fallback == True
|
||||||
assert set(group_1_1_copy.parents.all()) == set()
|
assert set(group_1_1_copy.parents.all()) == set()
|
||||||
assert set(group_2_1_copy.parents.all()) == set([group_1_1_copy])
|
assert set(group_2_1_copy.parents.all()) == set([group_1_1_copy])
|
||||||
assert set(group_2_2_copy.parents.all()) == set([group_1_1_copy, group_2_1_copy])
|
assert set(group_2_2_copy.parents.all()) == set([group_1_1_copy, group_2_1_copy])
|
||||||
@@ -109,8 +125,22 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
|
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
|
||||||
|
'''
|
||||||
|
Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs
|
||||||
|
'''
|
||||||
workflow_job_template.organization = organization
|
workflow_job_template.organization = organization
|
||||||
|
|
||||||
|
label = Label.objects.create(name="foobar", organization=organization)
|
||||||
|
workflow_job_template.labels.add(label)
|
||||||
|
|
||||||
|
ee = ExecutionEnvironment.objects.create(name="barfoo", organization=organization)
|
||||||
|
workflow_job_template.execution_environment = ee
|
||||||
|
|
||||||
|
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
|
||||||
|
workflow_job_template.instance_groups.add(ig)
|
||||||
|
|
||||||
workflow_job_template.save()
|
workflow_job_template.save()
|
||||||
|
|
||||||
jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)]
|
jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)]
|
||||||
nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)]
|
nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)]
|
||||||
nodes[0].success_nodes.add(nodes[1])
|
nodes[0].success_nodes.add(nodes[1])
|
||||||
@@ -124,9 +154,16 @@ def test_workflow_job_template_copy(workflow_job_template, post, get, admin, org
|
|||||||
wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id)
|
wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id)
|
||||||
args, kwargs = deep_copy_mock.call_args
|
args, kwargs = deep_copy_mock.call_args
|
||||||
deep_copy_model_obj(*args, **kwargs)
|
deep_copy_model_obj(*args, **kwargs)
|
||||||
|
|
||||||
assert wfjt_copy.organization == organization
|
assert wfjt_copy.organization == organization
|
||||||
assert wfjt_copy.created_by == admin
|
assert wfjt_copy.created_by == admin
|
||||||
assert wfjt_copy.name == 'new wfjt name'
|
assert wfjt_copy.name == 'new wfjt name'
|
||||||
|
assert wfjt_copy.labels.count() != 0
|
||||||
|
assert wfjt_copy.labels.get(pk=label.pk) == label
|
||||||
|
assert wfjt_copy.execution_environment == ee
|
||||||
|
assert wfjt_copy.instance_groups.count() != 0
|
||||||
|
assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig
|
||||||
|
|
||||||
copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()]
|
copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()]
|
||||||
copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1]))
|
copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1]))
|
||||||
for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]):
|
for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]):
|
||||||
|
|||||||
@@ -74,34 +74,37 @@ GLqbpJyX2r3p/Rmo6mLY71SqpA==
|
|||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_default_cred_types():
|
def test_default_cred_types():
|
||||||
assert sorted(CredentialType.defaults.keys()) == [
|
assert sorted(CredentialType.defaults.keys()) == sorted(
|
||||||
'aim',
|
[
|
||||||
'aws',
|
'aim',
|
||||||
'azure_kv',
|
'aws',
|
||||||
'azure_rm',
|
'azure_kv',
|
||||||
'centrify_vault_kv',
|
'azure_rm',
|
||||||
'conjur',
|
'centrify_vault_kv',
|
||||||
'controller',
|
'conjur',
|
||||||
'galaxy_api_token',
|
'controller',
|
||||||
'gce',
|
'galaxy_api_token',
|
||||||
'github_token',
|
'gce',
|
||||||
'gitlab_token',
|
'github_token',
|
||||||
'hashivault_kv',
|
'gitlab_token',
|
||||||
'hashivault_ssh',
|
'gpg_public_key',
|
||||||
'insights',
|
'hashivault_kv',
|
||||||
'kubernetes_bearer_token',
|
'hashivault_ssh',
|
||||||
'net',
|
'insights',
|
||||||
'openstack',
|
'kubernetes_bearer_token',
|
||||||
'registry',
|
'net',
|
||||||
'rhv',
|
'openstack',
|
||||||
'satellite6',
|
'registry',
|
||||||
'scm',
|
'rhv',
|
||||||
'ssh',
|
'satellite6',
|
||||||
'thycotic_dsv',
|
'scm',
|
||||||
'thycotic_tss',
|
'ssh',
|
||||||
'vault',
|
'thycotic_dsv',
|
||||||
'vmware',
|
'thycotic_tss',
|
||||||
]
|
'vault',
|
||||||
|
'vmware',
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
for type_ in CredentialType.defaults.values():
|
for type_ in CredentialType.defaults.values():
|
||||||
assert type_().managed is True
|
assert type_().managed is True
|
||||||
|
|||||||
@@ -199,9 +199,7 @@ class TestAutoScaling:
|
|||||||
assert len(self.pool) == 10
|
assert len(self.pool) == 10
|
||||||
|
|
||||||
# cleanup should scale down to 8 workers
|
# cleanup should scale down to 8 workers
|
||||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
self.pool.cleanup()
|
||||||
self.pool.cleanup()
|
|
||||||
reap.assert_called()
|
|
||||||
assert len(self.pool) == 2
|
assert len(self.pool) == 2
|
||||||
|
|
||||||
def test_max_scale_up(self):
|
def test_max_scale_up(self):
|
||||||
@@ -246,12 +244,10 @@ class TestAutoScaling:
|
|||||||
assert not self.pool.should_grow
|
assert not self.pool.should_grow
|
||||||
alive_pid = self.pool.workers[1].pid
|
alive_pid = self.pool.workers[1].pid
|
||||||
self.pool.workers[0].process.terminate()
|
self.pool.workers[0].process.terminate()
|
||||||
time.sleep(1) # wait a moment for sigterm
|
time.sleep(2) # wait a moment for sigterm
|
||||||
|
|
||||||
# clean up and the dead worker
|
# clean up and the dead worker
|
||||||
with mock.patch('awx.main.dispatch.reaper.reap') as reap:
|
self.pool.cleanup()
|
||||||
self.pool.cleanup()
|
|
||||||
reap.assert_called()
|
|
||||||
assert len(self.pool) == 1
|
assert len(self.pool) == 1
|
||||||
assert self.pool.workers[0].pid == alive_pid
|
assert self.pool.workers[0].pid == alive_pid
|
||||||
|
|
||||||
@@ -353,7 +349,7 @@ class TestJobReaper(object):
|
|||||||
('waiting', '', '', None, False), # waiting, not assigned to the instance
|
('waiting', '', '', None, False), # waiting, not assigned to the instance
|
||||||
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
|
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
|
||||||
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
|
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
|
||||||
('waiting', 'awx', '', yesterday, True), # waiting, assigned to the execution_node, stale
|
('waiting', 'awx', '', yesterday, False), # waiting, managed by another node, ignore
|
||||||
('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
|
('waiting', '', 'awx', yesterday, True), # waiting, assigned to the controller_node, stale
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -372,6 +368,7 @@ class TestJobReaper(object):
|
|||||||
# (because .save() overwrites it to _now_)
|
# (because .save() overwrites it to _now_)
|
||||||
Job.objects.filter(id=j.id).update(modified=modified)
|
Job.objects.filter(id=j.id).update(modified=modified)
|
||||||
reaper.reap(i)
|
reaper.reap(i)
|
||||||
|
reaper.reap_waiting(i)
|
||||||
job = Job.objects.first()
|
job = Job.objects.first()
|
||||||
if fail:
|
if fail:
|
||||||
assert job.status == 'failed'
|
assert job.status == 'failed'
|
||||||
|
|||||||
@@ -391,6 +391,8 @@ class TestInstanceGroupOrdering:
|
|||||||
assert ad_hoc.preferred_instance_groups == [ig_org]
|
assert ad_hoc.preferred_instance_groups == [ig_org]
|
||||||
inventory.instance_groups.add(ig_inv)
|
inventory.instance_groups.add(ig_inv)
|
||||||
assert ad_hoc.preferred_instance_groups == [ig_inv, ig_org]
|
assert ad_hoc.preferred_instance_groups == [ig_inv, ig_org]
|
||||||
|
inventory.prevent_instance_group_fallback = True
|
||||||
|
assert ad_hoc.preferred_instance_groups == [ig_inv]
|
||||||
|
|
||||||
def test_inventory_update_instance_groups(self, instance_group_factory, inventory_source, default_instance_group):
|
def test_inventory_update_instance_groups(self, instance_group_factory, inventory_source, default_instance_group):
|
||||||
iu = InventoryUpdate.objects.create(inventory_source=inventory_source, source=inventory_source.source)
|
iu = InventoryUpdate.objects.create(inventory_source=inventory_source, source=inventory_source.source)
|
||||||
@@ -404,6 +406,8 @@ class TestInstanceGroupOrdering:
|
|||||||
inventory_source.instance_groups.add(ig_tmp)
|
inventory_source.instance_groups.add(ig_tmp)
|
||||||
# API does not allow setting IGs on inventory source, so ignore those
|
# API does not allow setting IGs on inventory source, so ignore those
|
||||||
assert iu.preferred_instance_groups == [ig_inv, ig_org]
|
assert iu.preferred_instance_groups == [ig_inv, ig_org]
|
||||||
|
inventory_source.inventory.prevent_instance_group_fallback = True
|
||||||
|
assert iu.preferred_instance_groups == [ig_inv]
|
||||||
|
|
||||||
def test_job_instance_groups(self, instance_group_factory, inventory, project, default_instance_group):
|
def test_job_instance_groups(self, instance_group_factory, inventory, project, default_instance_group):
|
||||||
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||||
@@ -417,3 +421,31 @@ class TestInstanceGroupOrdering:
|
|||||||
assert job.preferred_instance_groups == [ig_inv, ig_org]
|
assert job.preferred_instance_groups == [ig_inv, ig_org]
|
||||||
job.job_template.instance_groups.add(ig_tmp)
|
job.job_template.instance_groups.add(ig_tmp)
|
||||||
assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org]
|
assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org]
|
||||||
|
|
||||||
|
def test_job_instance_groups_cache_default(self, instance_group_factory, inventory, project, default_instance_group):
|
||||||
|
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||||
|
job = jt.create_unified_job()
|
||||||
|
print(job.preferred_instance_groups_cache)
|
||||||
|
print(default_instance_group)
|
||||||
|
assert job.preferred_instance_groups_cache == [default_instance_group.id]
|
||||||
|
|
||||||
|
def test_job_instance_groups_cache_default_additional_items(self, instance_group_factory, inventory, project, default_instance_group):
|
||||||
|
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
|
||||||
|
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
|
||||||
|
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
|
||||||
|
project.organization.instance_groups.add(ig_org)
|
||||||
|
inventory.instance_groups.add(ig_inv)
|
||||||
|
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||||
|
jt.instance_groups.add(ig_tmp)
|
||||||
|
job = jt.create_unified_job()
|
||||||
|
assert job.preferred_instance_groups_cache == [ig_tmp.id, ig_inv.id, ig_org.id]
|
||||||
|
|
||||||
|
def test_job_instance_groups_cache_prompt(self, instance_group_factory, inventory, project, default_instance_group):
|
||||||
|
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
|
||||||
|
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
|
||||||
|
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
|
||||||
|
project.organization.instance_groups.add(ig_org)
|
||||||
|
inventory.instance_groups.add(ig_inv)
|
||||||
|
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||||
|
job = jt.create_unified_job(instance_groups=[ig_tmp])
|
||||||
|
assert job.preferred_instance_groups_cache == [ig_tmp.id]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user