mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
453 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
799968460d | ||
|
|
3f08e26881 | ||
|
|
9af2c92795 | ||
|
|
dabae456d9 | ||
|
|
c40785b6eb | ||
|
|
e2e80313ac | ||
|
|
14a99a7b9e | ||
|
|
50e8c299c6 | ||
|
|
326d12382f | ||
|
|
1de9dddd21 | ||
|
|
87b1f0d0de | ||
|
|
f085afd92f | ||
|
|
604cbc1737 | ||
|
|
60b6faff19 | ||
|
|
b26c1c16b9 | ||
|
|
c2bf9d94be | ||
|
|
ea09adbbf3 | ||
|
|
9d0de57fae | ||
|
|
da733538c4 | ||
|
|
6db7cea148 | ||
|
|
3993aa9524 | ||
|
|
6f9d4d89cd | ||
|
|
443bdc1234 | ||
|
|
9cd43d044e | ||
|
|
f8e680867b | ||
|
|
96a5540083 | ||
|
|
750e1bd80a | ||
|
|
a12f161be5 | ||
|
|
04568ea830 | ||
|
|
3be0b527d6 | ||
|
|
afc0732a32 | ||
|
|
9703fb06fc | ||
|
|
54cbf13219 | ||
|
|
6774a12c67 | ||
|
|
94e53d988b | ||
|
|
22d47ea8c4 | ||
|
|
73bba00cc6 | ||
|
|
6ed429ada2 | ||
|
|
d2c2d459c4 | ||
|
|
c8b906ffb7 | ||
|
|
264f1d6638 | ||
|
|
16c7908adc | ||
|
|
c9d05d7d4a | ||
|
|
ec7e4488dc | ||
|
|
72f440acf5 | ||
|
|
21bf698c81 | ||
|
|
489ee30e54 | ||
|
|
2abab0772f | ||
|
|
0bca0fabaa | ||
|
|
93ac3fea43 | ||
|
|
c72b71a43a | ||
|
|
9e8c40598c | ||
|
|
4ded4afb7d | ||
|
|
801c45da6d | ||
|
|
278b356a18 | ||
|
|
a718e01dbf | ||
|
|
8e6cdde861 | ||
|
|
62b0c2b647 | ||
|
|
1cd30ceb31 | ||
|
|
15c7a3f85b | ||
|
|
d977aff8cf | ||
|
|
e3b44c3950 | ||
|
|
ba035efc91 | ||
|
|
76cfd7784a | ||
|
|
3e6875ce1d | ||
|
|
1ab7aa0fc4 | ||
|
|
5950e0bfcb | ||
|
|
ac540d3d3f | ||
|
|
848ddc5f3e | ||
|
|
30d1d63813 | ||
|
|
9781a9094f | ||
|
|
ab3de5898d | ||
|
|
7ff8a3764b | ||
|
|
32d6d746b3 | ||
|
|
ecf9a0827d | ||
|
|
a9a7fac308 | ||
|
|
54b5884943 | ||
|
|
1fb38137dc | ||
|
|
2d6192db75 | ||
|
|
9ecceb4a1e | ||
|
|
6b25fcaa80 | ||
|
|
c5c83a4240 | ||
|
|
5e0eb5ab97 | ||
|
|
2de5ffc8d9 | ||
|
|
3b2fe39a0a | ||
|
|
285ff080d0 | ||
|
|
627bde9e9e | ||
|
|
ef7d5e6004 | ||
|
|
598c8a1c4d | ||
|
|
b3c20ee0ae | ||
|
|
cd8d382038 | ||
|
|
b678d61318 | ||
|
|
43c8231f7d | ||
|
|
db401e0daa | ||
|
|
675d4c5f2b | ||
|
|
fdbf3ed279 | ||
|
|
5660f9ac59 | ||
|
|
546e63aa4c | ||
|
|
ddbd143793 | ||
|
|
35ba321546 | ||
|
|
2fe7fe30f8 | ||
|
|
8d4d1d594b | ||
|
|
c86fafbd7e | ||
|
|
709c439afc | ||
|
|
4cdc88e4bb | ||
|
|
7c550a76a5 | ||
|
|
cfabbcaaf6 | ||
|
|
7ae6286152 | ||
|
|
fd9c28c960 | ||
|
|
fa9ee96f7f | ||
|
|
334c33ca07 | ||
|
|
85cc67fb4e | ||
|
|
af9eb7c374 | ||
|
|
44968cc01e | ||
|
|
af69b25eaa | ||
|
|
eb33b95083 | ||
|
|
aa9124e072 | ||
|
|
c086fad945 | ||
|
|
0fef88c358 | ||
|
|
56f8f8d3f4 | ||
|
|
5bced09fc5 | ||
|
|
b4e9ff7ce0 | ||
|
|
208cbabb31 | ||
|
|
2fb5cfd55d | ||
|
|
582036ba45 | ||
|
|
e06f9f5438 | ||
|
|
461876da93 | ||
|
|
4f1c662691 | ||
|
|
9abd4e05d0 | ||
|
|
faba64890e | ||
|
|
add54bfd0b | ||
|
|
16d39bb72b | ||
|
|
e63ce9ed08 | ||
|
|
60831cae88 | ||
|
|
97cf46eaa9 | ||
|
|
381e75b913 | ||
|
|
7bd516a16c | ||
|
|
3dd01cde89 | ||
|
|
495394084d | ||
|
|
2609ee5ed0 | ||
|
|
da930ce276 | ||
|
|
987924cbda | ||
|
|
8fac1c18c8 | ||
|
|
eb64fde885 | ||
|
|
b1e9537499 | ||
|
|
9d636cad29 | ||
|
|
696c0b0055 | ||
|
|
6e030fd62f | ||
|
|
bb14a95076 | ||
|
|
9664aed1f2 | ||
|
|
6dda5f477e | ||
|
|
72cd73ca71 | ||
|
|
02e18cf919 | ||
|
|
82671680e3 | ||
|
|
bff49f2a5f | ||
|
|
59d582ce83 | ||
|
|
a4a3ba65d7 | ||
|
|
11f4b64229 | ||
|
|
b76029fac3 | ||
|
|
3d45f31536 | ||
|
|
ade00c70e5 | ||
|
|
82dca5336d | ||
|
|
8c33d0ecbd | ||
|
|
dea5fd1a9d | ||
|
|
6a131f70f0 | ||
|
|
d33a0d5dde | ||
|
|
11cc7e37e1 | ||
|
|
7e6cb7ecc9 | ||
|
|
807c58dc36 | ||
|
|
1517f2d910 | ||
|
|
b0c59ee330 | ||
|
|
1ff52bab56 | ||
|
|
7a9fca7f77 | ||
|
|
dea53a0dba | ||
|
|
db999b82ed | ||
|
|
c92468062d | ||
|
|
4de0f09c85 | ||
|
|
9c9c1b4d3b | ||
|
|
5ffe91f069 | ||
|
|
63867518ee | ||
|
|
53ff99e391 | ||
|
|
c035c12c0a | ||
|
|
6e39a02e99 | ||
|
|
956638e564 | ||
|
|
37907ad348 | ||
|
|
386aa898ec | ||
|
|
f1c5da7026 | ||
|
|
fc2a5224ef | ||
|
|
ce5aefd3d8 | ||
|
|
b2124dffb5 | ||
|
|
25eaace4be | ||
|
|
bb8efbcc82 | ||
|
|
e0bd5ad041 | ||
|
|
69ec49d0e9 | ||
|
|
8126f734e3 | ||
|
|
f2aaa6778c | ||
|
|
4fd5b01a83 | ||
|
|
1747a844fc | ||
|
|
afc210a70d | ||
|
|
f63003f982 | ||
|
|
e89037dd77 | ||
|
|
ab6e650e9c | ||
|
|
2ed246cb61 | ||
|
|
4449555abe | ||
|
|
f340f491dc | ||
|
|
c8f1e714e1 | ||
|
|
ddc428532f | ||
|
|
3414cae677 | ||
|
|
9d6972c6ce | ||
|
|
0566a0f1d6 | ||
|
|
de0561dcc2 | ||
|
|
a9f4f53f92 | ||
|
|
5fdfd4114a | ||
|
|
b195f9da44 | ||
|
|
1205d71f4b | ||
|
|
3f762a6476 | ||
|
|
4aa403c122 | ||
|
|
a13070a8da | ||
|
|
b63b171653 | ||
|
|
7219f8fed8 | ||
|
|
b6a5f834d6 | ||
|
|
99b9d53bbb | ||
|
|
edca19a697 | ||
|
|
c13d721062 | ||
|
|
d2f316c484 | ||
|
|
70e832d4db | ||
|
|
21895bd09b | ||
|
|
411ef5f9e8 | ||
|
|
f6282b9a09 | ||
|
|
e10030b73d | ||
|
|
cdf14158b4 | ||
|
|
f310e672b0 | ||
|
|
675d0d28d2 | ||
|
|
4c2fd056ef | ||
|
|
a259e48377 | ||
|
|
095c586172 | ||
|
|
c9c198b54b | ||
|
|
2a11bb4f3b | ||
|
|
35bac50962 | ||
|
|
366d2c1d97 | ||
|
|
9a930cbd95 | ||
|
|
03277513a9 | ||
|
|
1b0fca8026 | ||
|
|
c9cf5b78c5 | ||
|
|
d6679a1e9b | ||
|
|
b721a4b361 | ||
|
|
88bbd43314 | ||
|
|
fb1c97cdc1 | ||
|
|
f5ae8a0a4c | ||
|
|
1994eaa406 | ||
|
|
510b40a776 | ||
|
|
f37b070965 | ||
|
|
41385261f3 | ||
|
|
19b4849345 | ||
|
|
76283bd299 | ||
|
|
2e4cda74c8 | ||
|
|
5512b71e16 | ||
|
|
97b60c43b7 | ||
|
|
35b62f8526 | ||
|
|
a15a3f005c | ||
|
|
776c4a988a | ||
|
|
c419969253 | ||
|
|
ba324c73ce | ||
|
|
4a5dc78331 | ||
|
|
55dc9dfb54 | ||
|
|
23a8191bb5 | ||
|
|
c665caaf35 | ||
|
|
099efb883d | ||
|
|
44237426df | ||
|
|
eeefd19ad3 | ||
|
|
47ae6e7a5a | ||
|
|
03ed6e9755 | ||
|
|
8d4e7f0a82 | ||
|
|
7fdf491c05 | ||
|
|
ef1563283e | ||
|
|
a206d79851 | ||
|
|
42c9c0a06b | ||
|
|
f0ede01017 | ||
|
|
d67007f777 | ||
|
|
83d81e3788 | ||
|
|
e789e16289 | ||
|
|
61c9683aa6 | ||
|
|
ee9d1356b2 | ||
|
|
f92a49fda9 | ||
|
|
3dc6a055ac | ||
|
|
229f0d97f9 | ||
|
|
7cc530f950 | ||
|
|
2ef840ce12 | ||
|
|
a372d8d1d5 | ||
|
|
aad150cf1d | ||
|
|
be13a11dd5 | ||
|
|
59c6f35b0b | ||
|
|
37e45c5e7c | ||
|
|
39370f1eab | ||
|
|
aec7ac6ebd | ||
|
|
f6e63d0917 | ||
|
|
0ae67edaba | ||
|
|
481f6435ee | ||
|
|
d0c5c3d3cf | ||
|
|
9f8250bd47 | ||
|
|
3a3fffb2dd | ||
|
|
4cfa4eaf8e | ||
|
|
abb1125a2c | ||
|
|
a2acbe9fe6 | ||
|
|
cab8c690d2 | ||
|
|
0d1f8a06ce | ||
|
|
d42fe921db | ||
|
|
db7fb81855 | ||
|
|
d3c695b853 | ||
|
|
010c3ab0b8 | ||
|
|
58cdbca5cf | ||
|
|
8275082896 | ||
|
|
d79da1ef9f | ||
|
|
a9636426b8 | ||
|
|
329caad681 | ||
|
|
ecb84e090c | ||
|
|
8e9fc14b0e | ||
|
|
0f77ca605d | ||
|
|
231fcc8178 | ||
|
|
2839091b22 | ||
|
|
47e67481b3 | ||
|
|
55059b015f | ||
|
|
eb6c58682d | ||
|
|
26055de772 | ||
|
|
ebb4581595 | ||
|
|
d1fecc11c9 | ||
|
|
056247a34a | ||
|
|
7010015e8a | ||
|
|
62d50d27be | ||
|
|
1e5231d68b | ||
|
|
e04efad3c0 | ||
|
|
e54db3ce50 | ||
|
|
77076dbd67 | ||
|
|
6f20a798ab | ||
|
|
0d3a22bbc3 | ||
|
|
f34c96ecf5 | ||
|
|
206c85778e | ||
|
|
d6b4b9f973 | ||
|
|
3065e29deb | ||
|
|
481047bed8 | ||
|
|
f72292cce2 | ||
|
|
7b35902d33 | ||
|
|
1660900914 | ||
|
|
a7be25ce8b | ||
|
|
54b5ba08b8 | ||
|
|
0fb8d48074 | ||
|
|
b5fac4157d | ||
|
|
9e61949f9f | ||
|
|
6c5640798f | ||
|
|
03222197a3 | ||
|
|
12f417d0a3 | ||
|
|
c77aaece1d | ||
|
|
25140c9072 | ||
|
|
3a636c29ab | ||
|
|
a11d5ccd37 | ||
|
|
f6e7937f74 | ||
|
|
e447b667e5 | ||
|
|
24c635e9bc | ||
|
|
2ad4dcd741 | ||
|
|
f5cd9e0799 | ||
|
|
e7064868b4 | ||
|
|
65cbbf15c9 | ||
|
|
a325509e1e | ||
|
|
69ae731898 | ||
|
|
3452dee1b0 | ||
|
|
64b337e3c6 | ||
|
|
5df9655fe3 | ||
|
|
f3669f3be6 | ||
|
|
61eb99c46d | ||
|
|
f74a14e34f | ||
|
|
517f1d7991 | ||
|
|
25e69885d0 | ||
|
|
60a357eda1 | ||
|
|
d74679a5f9 | ||
|
|
73a865073d | ||
|
|
4ff8c28fe4 | ||
|
|
4ab2539c8a | ||
|
|
459eb3903e | ||
|
|
611a537b55 | ||
|
|
3a74cc5a74 | ||
|
|
f1520e1a70 | ||
|
|
727b4668c2 | ||
|
|
1287e001d8 | ||
|
|
c9b53cf975 | ||
|
|
64811d0b6b | ||
|
|
74af187568 | ||
|
|
a28c023cf1 | ||
|
|
cdf7fd64b2 | ||
|
|
84ffa4a5b7 | ||
|
|
326a43de11 | ||
|
|
07f193d8d6 | ||
|
|
f79a57c3e2 | ||
|
|
f8319fcd02 | ||
|
|
815ef4c9c9 | ||
|
|
d1800aa6d0 | ||
|
|
dda940344e | ||
|
|
1fffeb430c | ||
|
|
7d0bbd0a4c | ||
|
|
15fd22681d | ||
|
|
6a2826b91c | ||
|
|
112111c7f9 | ||
|
|
ed8498f43f | ||
|
|
77a5bb9069 | ||
|
|
37f86803f7 | ||
|
|
160858b051 | ||
|
|
68f44c01ea | ||
|
|
bef8d7426f | ||
|
|
c758f079cd | ||
|
|
7e404b7c19 | ||
|
|
4b7faea552 | ||
|
|
4ddd391033 | ||
|
|
e52416fd47 | ||
|
|
f67a2d2f46 | ||
|
|
fcdda8d7a7 | ||
|
|
1f0b936e82 | ||
|
|
b70793db5c | ||
|
|
0f044f6c21 | ||
|
|
4c205dfde9 | ||
|
|
d58d460119 | ||
|
|
24a6edef9e | ||
|
|
a5485096ac | ||
|
|
60a5ccf70b | ||
|
|
d93a7c2997 | ||
|
|
af5f8e8a4a | ||
|
|
1596c855ff | ||
|
|
f45dd7a748 | ||
|
|
a036363e85 | ||
|
|
4aceea41fd | ||
|
|
7bbfcbaefd | ||
|
|
18eaa9bb92 | ||
|
|
6826d5444b | ||
|
|
622ec69216 | ||
|
|
d38c109d49 | ||
|
|
a31b2d0259 | ||
|
|
b13c076881 | ||
|
|
c429a55382 | ||
|
|
20c4b21c39 | ||
|
|
d3289dc688 | ||
|
|
685c0b844e | ||
|
|
57c9b14198 | ||
|
|
d0a13cb12a | ||
|
|
71c72f74a1 | ||
|
|
ad24fe7017 | ||
|
|
e5578a8ef3 | ||
|
|
3a40d5e243 | ||
|
|
8e34898b4e | ||
|
|
0b0d049071 | ||
|
|
9e74ac24fa | ||
|
|
cbe612baa5 | ||
|
|
899d36b2c9 | ||
|
|
530977d6b3 | ||
|
|
aa682fa2c9 | ||
|
|
e3893b1887 |
@@ -1,2 +1,3 @@
|
||||
awx/ui/node_modules
|
||||
Dockerfile
|
||||
.git
|
||||
|
||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -16,7 +16,7 @@ https://www.ansible.com/security
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
- Collection
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,26 +1,24 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
labels:
|
||||
- bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues are for **concrete, actionable bugs and feature requests** only. For debugging help or technical support, please use:
|
||||
- The #ansible-awx channel on irc.libera.chat
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
- The awx project mailing list, https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
options:
|
||||
- label: I agree to follow this project's [code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
- label: I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
required: true
|
||||
- label: I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I am not entitled to status updates or other assurances.
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
@@ -39,6 +37,15 @@ body:
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: components
|
||||
attributes:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
|
||||
- type: dropdown
|
||||
id: awx-install-method
|
||||
attributes:
|
||||
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -25,6 +25,7 @@ the change does.
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
|
||||
12
.github/issue_labeler.yml
vendored
Normal file
12
.github/issue_labeler.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
needs_triage:
|
||||
- '.*'
|
||||
"type:bug":
|
||||
- "Please confirm the following"
|
||||
"type:enhancement":
|
||||
- "Feature Idea"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
- "\\[X\\] Docs"
|
||||
14
.github/pr_labeler.yml
vendored
Normal file
14
.github/pr_labeler.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"component:api":
|
||||
- any: ['awx/**/*', '!awx/ui/*']
|
||||
|
||||
"component:ui":
|
||||
- any: ['awx/ui/**/*']
|
||||
|
||||
"component:docs":
|
||||
- any: ['docs/**/*']
|
||||
|
||||
"component:cli":
|
||||
- any: ['awxkit/**/*']
|
||||
|
||||
"component:collection":
|
||||
- any: ['awx_collection/**/*']
|
||||
203
.github/workflows/ci.yml
vendored
203
.github/workflows/ci.yml
vendored
@@ -4,64 +4,48 @@ env:
|
||||
BRANCH: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [devel]
|
||||
jobs:
|
||||
api-test:
|
||||
common_tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
label: Run API Tests
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
label: Run API Linters
|
||||
- name: api-swagger
|
||||
command: /start_tests.sh swagger
|
||||
label: Generate API Reference
|
||||
- name: awx-collection
|
||||
command: /start_tests.sh test_collection_all
|
||||
label: Run Collection Tests
|
||||
- name: api-schema
|
||||
label: Check API Schema
|
||||
command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }}
|
||||
- name: ui-lint
|
||||
label: Run UI Linters
|
||||
command: make ui-lint
|
||||
- name: ui-test
|
||||
label: Run UI Tests
|
||||
command: make ui-test
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Run API Tests
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} /start_tests.sh
|
||||
api-lint:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Run API Linters
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
api-swagger:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
@@ -71,107 +55,50 @@ jobs:
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Generate API Reference
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} /start_tests.sh swagger
|
||||
awx-collection:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Run Collection Tests
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} /start_tests.sh test_collection_all
|
||||
api-schema:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Check API Schema
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} /start_tests.sh detect-schema-change
|
||||
ui-lint:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
|
||||
- name: Run UI Linters
|
||||
- name: ${{ matrix.texts.label }}
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} make ui-lint
|
||||
ui-test:
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} ${{ matrix.tests.command }}
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }}
|
||||
python3 -m pip install docker
|
||||
|
||||
- name: Build image
|
||||
- name: Build AWX image
|
||||
working-directory: awx
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
|
||||
ansible-playbook -v tools/ansible/build.yml \
|
||||
-e headless=yes \
|
||||
-e awx_image=awx \
|
||||
-e awx_image_tag=ci \
|
||||
-e ansible_python_interpreter=$(which python3)
|
||||
|
||||
- name: Run UI Tests
|
||||
- name: Run test deployment with awx-operator
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} make ui-test
|
||||
python3 -m pip install -r molecule/requirements.txt
|
||||
ansible-galaxy collection install -r molecule/requirements.yml
|
||||
sudo rm -f $(which kustomize)
|
||||
make kustomize
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind
|
||||
env:
|
||||
AWX_TEST_IMAGE: awx
|
||||
AWX_TEST_VERSION: ci
|
||||
|
||||
8
.github/workflows/devel_image.yml
vendored
8
.github/workflows/devel_image.yml
vendored
@@ -13,6 +13,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
10
.github/workflows/e2e_test.yml
vendored
10
.github/workflows/e2e_test.yml
vendored
@@ -18,6 +18,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
@@ -85,7 +93,7 @@ jobs:
|
||||
-e CYPRESS_baseUrl="https://$AWX_IP:8043" \
|
||||
-e CYPRESS_AWX_E2E_USERNAME=admin \
|
||||
-e CYPRESS_AWX_E2E_PASSWORD='password' \
|
||||
-e COMMAND="npm run cypress-gha" \
|
||||
-e COMMAND="npm run cypress-concurrently-gha" \
|
||||
-v /dev/shm:/dev/shm \
|
||||
-v $PWD:/e2e \
|
||||
-w /e2e \
|
||||
|
||||
22
.github/workflows/label_issue.yml
vendored
Normal file
22
.github/workflows/label_issue.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Label Issue
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label Issue
|
||||
|
||||
steps:
|
||||
- name: Label Issue
|
||||
uses: github/issue-labeler@v2.4.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
configuration-path: .github/issue_labeler.yml
|
||||
enable-versioned-regex: 0
|
||||
20
.github/workflows/label_pr.yml
vendored
Normal file
20
.github/workflows/label_pr.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Label PR
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label PR
|
||||
|
||||
steps:
|
||||
- name: Label PR
|
||||
uses: actions/labeler@v3
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/pr_labeler.yml
|
||||
26
.github/workflows/promote.yml
vendored
Normal file
26
.github/workflows/promote.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Promote Release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to GHCR
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Log in to Quay
|
||||
run: |
|
||||
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
|
||||
|
||||
- name: Re-tag and promote awx image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
|
||||
56
.github/workflows/release.yml
vendored
56
.github/workflows/release.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: Release AWX
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version'
|
||||
required: true
|
||||
default: ''
|
||||
confirm:
|
||||
description: 'Are you sure? Set this to yes.'
|
||||
required: true
|
||||
default: 'no'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: "Verify inputs"
|
||||
run: |
|
||||
set -e
|
||||
|
||||
if [[ ${{ github.event.inputs.confirm }} != "yes" ]]; then
|
||||
>&2 echo "Confirm must be 'yes'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${{ github.event.inputs.version }} == "" ]]; then
|
||||
>&2 echo "Set version to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
||||
- name: Generate changelog
|
||||
uses: shanemcd/simple-changelog-generator@v1
|
||||
id: changelog
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
|
||||
- name: Write changelog to file
|
||||
run: |
|
||||
cat << 'EOF' > /tmp/changelog
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
EOF
|
||||
|
||||
- name: Release AWX
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/release.yml \
|
||||
-e changelog_path=/tmp/changelog \
|
||||
-e version=${{ github.event.inputs.version }} \
|
||||
-e github_token=${{ secrets.GITHUB_TOKEN }} \
|
||||
-e repo=${{ github.repository }}
|
||||
|
||||
|
||||
131
.github/workflows/stage.yml
vendored
Normal file
131
.github/workflows/stage.yml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
---
|
||||
name: Stage Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'AWX version.'
|
||||
required: true
|
||||
default: ''
|
||||
operator_version:
|
||||
description: 'Operator version. Leave blank to skip staging awx-operator.'
|
||||
default: ''
|
||||
confirm:
|
||||
description: 'Are you sure? Set this to yes.'
|
||||
required: true
|
||||
default: 'no'
|
||||
|
||||
jobs:
|
||||
stage:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: write
|
||||
steps:
|
||||
- name: Verify inputs
|
||||
run: |
|
||||
set -e
|
||||
|
||||
if [[ ${{ github.event.inputs.confirm }} != "yes" ]]; then
|
||||
>&2 echo "Confirm must be 'yes'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${{ github.event.inputs.version }} == "" ]]; then
|
||||
>&2 echo "Set version to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
- name: Build and stage AWX
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/build.yml \
|
||||
-e registry=ghcr.io \
|
||||
-e registry_username=${{ github.actor }} \
|
||||
-e registry_password=${{ secrets.GITHUB_TOKEN }} \
|
||||
-e awx_image=${{ github.repository }} \
|
||||
-e awx_version=${{ github.event.inputs.version }} \
|
||||
-e ansible_python_interpreter=$(which python3) \
|
||||
-e push=yes \
|
||||
-e awx_official=yes
|
||||
|
||||
- name: Build and stage awx-operator
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }}" \
|
||||
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
|
||||
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
|
||||
|
||||
- name: Run test deployment with awx-operator
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
python3 -m pip install -r molecule/requirements.txt
|
||||
ansible-galaxy collection install -r molecule/requirements.yml
|
||||
sudo rm -f $(which kustomize)
|
||||
make kustomize
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule test -s kind
|
||||
env:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
|
||||
- name: Generate changelog
|
||||
uses: shanemcd/simple-changelog-generator@v1
|
||||
id: changelog
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
|
||||
- name: Write changelog to file
|
||||
run: |
|
||||
cat << 'EOF' > /tmp/awx-changelog
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
EOF
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/stage.yml \
|
||||
-e changelog_path=/tmp/awx-changelog \
|
||||
-e repo=${{ github.repository }} \
|
||||
-e awx_image=ghcr.io/${{ github.repository }} \
|
||||
-e version=${{ github.event.inputs.version }} \
|
||||
-e github_token=${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create draft release for awx-operator
|
||||
if: ${{ github.event.inputs.operator_version != '' }}
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook tools/ansible/stage.yml \
|
||||
-e version=${{ github.event.inputs.operator_version }} \
|
||||
-e repo=${{ github.repository_owner }}/awx-operator \
|
||||
-e github_token=${{ secrets.AWX_OPERATOR_RELEASE_TOKEN }}
|
||||
13
.github/workflows/upload_schema.yml
vendored
13
.github/workflows/upload_schema.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
- release_4.1
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -13,13 +14,21 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
@@ -38,6 +47,6 @@ jobs:
|
||||
run: |
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a 'src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=schema.json mode=put permission=public-read'
|
||||
-a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"
|
||||
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -42,6 +42,7 @@ tools/docker-compose/_build
|
||||
tools/docker-compose/_sources
|
||||
tools/docker-compose/overrides/
|
||||
tools/docker-compose-minikube/_sources
|
||||
tools/docker-compose/keycloak.awx.realm.json
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
@@ -58,6 +59,7 @@ __pycache__
|
||||
/dist
|
||||
/*.egg-info
|
||||
*.py[c,o]
|
||||
/.eggs
|
||||
|
||||
# JavaScript
|
||||
/Gruntfile.js
|
||||
|
||||
@@ -6,8 +6,11 @@ ignore: |
|
||||
# vault files
|
||||
awx/main/tests/data/ansible_utils/playbooks/valid/vault.yml
|
||||
awx/ui/test/e2e/tests/smoke-vars.yml
|
||||
awx/ui/node_modules
|
||||
tools/docker-compose/_sources
|
||||
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
|
||||
@@ -110,7 +110,7 @@ For feature work, take a look at the current [Enhancements](https://github.com/a
|
||||
|
||||
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
|
||||
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](https://github.com/ansible/awx/blob/devel/docs/debugging.md).
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](./docs/debugging/).
|
||||
|
||||
**NOTE**
|
||||
|
||||
|
||||
98
Makefile
98
Makefile
@@ -1,29 +1,32 @@
|
||||
PYTHON ?= python3.8
|
||||
PYTHON_VERSION = $(shell $(PYTHON) -c "from distutils.sysconfig import get_python_version; print(get_python_version())")
|
||||
PYTHON ?= python3.9
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell cat VERSION)
|
||||
VERSION := $(shell $(PYTHON) setup.py --version)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) setup.py --version | cut -d . -f 1-3)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
COMPOSE_HOST ?= $(shell hostname)
|
||||
MAIN_NODE_TYPE ?= hybrid
|
||||
# If set to true docker-compose will also start a keycloak instance
|
||||
KEYCLOAK ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= quay.io/awx
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0 wheel==0.36.2
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==58.2.0 wheel==0.36.2
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -40,7 +43,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION docker-compose-sources \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
@@ -142,24 +145,6 @@ version_file:
|
||||
fi; \
|
||||
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
|
||||
|
||||
# Do any one-time init tasks.
|
||||
comma := ,
|
||||
init:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST) --node_type=$(MAIN_NODE_TYPE); \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=controlplane --instance_percent=100;\
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=default --instance_percent=100;
|
||||
if [ ! -f /etc/receptor/certs/awx.key ]; then \
|
||||
rm -f /etc/receptor/certs/*; \
|
||||
receptor --cert-init commonname="AWX Test CA" bits=2048 outcert=/etc/receptor/certs/ca.crt outkey=/etc/receptor/certs/ca.key; \
|
||||
for node in $(RECEPTOR_MUTUAL_TLS); do \
|
||||
receptor --cert-makereq bits=2048 commonname="$$node test cert" dnsname=$$node nodeid=$$node outreq=/etc/receptor/certs/$$node.csr outkey=/etc/receptor/certs/$$node.key; \
|
||||
receptor --cert-signreq req=/etc/receptor/certs/$$node.csr cacert=/etc/receptor/certs/ca.crt cakey=/etc/receptor/certs/ca.key outcert=/etc/receptor/certs/$$node.crt verify=yes; \
|
||||
done; \
|
||||
fi
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
|
||||
@@ -280,17 +265,16 @@ api-lint:
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
|
||||
PYTEST_ARGS ?= -n auto
|
||||
# Run all API unit tests.
|
||||
test:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
cmp VERSION awxkit/VERSION || "VERSION and awxkit/VERSION *must* match"
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
@@ -322,12 +306,16 @@ symlink_collection:
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION) -e '{"awx_template_version":false}'
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml \
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
-e '{"awx_template_version":false}'
|
||||
ansible-galaxy collection build awx_collection_build --force --output-path=awx_collection_build
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(VERSION).tar.gz
|
||||
ansible-galaxy collection install awx_collection_build/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(COLLECTION_VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
@@ -378,9 +366,9 @@ clean-ui:
|
||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
awx/ui/node_modules:
|
||||
NODE_OPTIONS=--max-old-space-size=4096 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
|
||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
|
||||
|
||||
$(UI_BUILD_FLAG_FILE):
|
||||
$(UI_BUILD_FLAG_FILE): awx/ui/node_modules
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||
@@ -392,7 +380,9 @@ $(UI_BUILD_FLAG_FILE):
|
||||
cp -r awx/ui/build/static/media/* awx/public/static/media
|
||||
touch $@
|
||||
|
||||
ui-release: awx/ui/node_modules $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
|
||||
ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
@@ -410,7 +400,7 @@ ui-lint:
|
||||
|
||||
ui-test:
|
||||
$(NPM_BIN) --prefix awx/ui install
|
||||
$(NPM_BIN) run --prefix awx/ui test -- --coverage --maxWorkers=4 --watchAll=false
|
||||
$(NPM_BIN) run --prefix awx/ui test
|
||||
|
||||
|
||||
# Build a pip-installable package into dist/ with a timestamped version number.
|
||||
@@ -421,10 +411,17 @@ dev_build:
|
||||
release_build:
|
||||
$(PYTHON) setup.py release_build
|
||||
|
||||
dist/$(SDIST_TAR_FILE): ui-release VERSION
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
endif
|
||||
$(PYTHON) setup.py $(SDIST_COMMAND)
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
|
||||
sdist: dist/$(SDIST_TAR_FILE)
|
||||
echo $(HEADLESS)
|
||||
@echo "#############################################"
|
||||
@echo "Artifacts:"
|
||||
@echo dist/$(SDIST_TAR_FILE)
|
||||
@@ -450,19 +447,21 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
-e awx_image=$(DEV_DOCKER_TAG_BASE)/awx_devel \
|
||||
-e awx_image_tag=$(COMPOSE_TAG) \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP)
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_keycloak=$(KEYCLOAK)
|
||||
|
||||
|
||||
docker-compose: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
|
||||
|
||||
docker-compose-test: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose-test: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
|
||||
|
||||
docker-compose-runtest: awx/projects docker-compose-sources
|
||||
@@ -471,8 +470,9 @@ docker-compose-runtest: awx/projects docker-compose-sources
|
||||
docker-compose-build-swagger: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
|
||||
|
||||
SCHEMA_DIFF_BASE_BRANCH ?= devel
|
||||
detect-schema-change: genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/$(SCHEMA_DIFF_BASE_BRANCH)/schema.json -o reference-schema.json
|
||||
# Ignore differences in whitespace with -b
|
||||
diff -u -b reference-schema.json schema.json
|
||||
|
||||
@@ -487,15 +487,15 @@ docker-compose-container-group-clean:
|
||||
|
||||
# Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
if [ $(shell docker images | grep "awx_devel") ]; then \
|
||||
docker images | grep "awx_devel" | awk '{print $$3}' | xargs docker rmi --force; \
|
||||
if [ "$(shell docker images | grep awx_devel)" ]; then \
|
||||
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
|
||||
fi
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
@@ -504,10 +504,10 @@ docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
@@ -530,14 +530,18 @@ psql-container:
|
||||
VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
-e kube_dev=True \
|
||||
-e template_dest=_build_kube_dev
|
||||
-e template_dest=_build_kube_dev \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
docker build -f Dockerfile.kube-dev \
|
||||
|
||||
@@ -151,7 +151,7 @@ def manage():
|
||||
from django.core.management import execute_from_command_line
|
||||
|
||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||
if not MODE == 'development':
|
||||
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
|
||||
if (connection.pg_version // 10000) < 12:
|
||||
sys.stderr.write("Postgres version 12 is required\n")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -44,6 +44,7 @@ from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
|
||||
from awx.conf import settings_registry
|
||||
|
||||
__all__ = [
|
||||
'APIView',
|
||||
@@ -208,12 +209,27 @@ class APIView(views.APIView):
|
||||
return response
|
||||
|
||||
if response.status_code >= 400:
|
||||
status_msg = "status %s received by user %s attempting to access %s from %s" % (
|
||||
response.status_code,
|
||||
request.user,
|
||||
request.path,
|
||||
request.META.get('REMOTE_ADDR', None),
|
||||
)
|
||||
msg_data = {
|
||||
'status_code': response.status_code,
|
||||
'user_name': request.user,
|
||||
'url_path': request.path,
|
||||
'remote_addr': request.META.get('REMOTE_ADDR', None),
|
||||
}
|
||||
|
||||
if type(response.data) is dict:
|
||||
msg_data['error'] = response.data.get('error', response.status_text)
|
||||
elif type(response.data) is list:
|
||||
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
|
||||
else:
|
||||
msg_data['error'] = response.status_text
|
||||
|
||||
try:
|
||||
status_msg = getattr(settings, 'API_400_ERROR_LOG_FORMAT').format(**msg_data)
|
||||
except Exception as e:
|
||||
if getattr(settings, 'API_400_ERROR_LOG_FORMAT', None):
|
||||
logger.error("Unable to format API_400_ERROR_LOG_FORMAT setting, defaulting log message: {}".format(e))
|
||||
status_msg = settings_registry.get_setting_field('API_400_ERROR_LOG_FORMAT').get_default().format(**msg_data)
|
||||
|
||||
if hasattr(self, '__init_request_error__'):
|
||||
response = self.handle_exception(self.__init_request_error__)
|
||||
if response.status_code == 401:
|
||||
@@ -221,6 +237,7 @@ class APIView(views.APIView):
|
||||
logger.info(status_msg)
|
||||
else:
|
||||
logger.warning(status_msg)
|
||||
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
@@ -817,7 +834,7 @@ class ResourceAccessList(ParentMixin, ListAPIView):
|
||||
|
||||
|
||||
def trigger_delayed_deep_copy(*args, **kwargs):
|
||||
from awx.main.tasks import deep_copy_model_obj
|
||||
from awx.main.tasks.system import deep_copy_model_obj
|
||||
|
||||
connection.on_commit(lambda: deep_copy_model_obj.delay(*args, **kwargs))
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||
"""
|
||||
|
||||
def has_permission(self, request, view):
|
||||
if not request.user:
|
||||
if not (request.user and request.user.is_authenticated):
|
||||
return False
|
||||
if request.method == 'GET':
|
||||
return request.user.is_superuser or request.user.is_system_auditor
|
||||
|
||||
@@ -57,6 +57,7 @@ from awx.main.models import (
|
||||
Host,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InstanceLink,
|
||||
Inventory,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
@@ -378,19 +379,22 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
def _get_related(self, obj):
|
||||
return {} if obj is None else self.get_related(obj)
|
||||
|
||||
def _generate_named_url(self, url_path, obj, node):
|
||||
url_units = url_path.split('/')
|
||||
def _generate_friendly_id(self, obj, node):
|
||||
reset_counters()
|
||||
named_url = node.generate_named_url(obj)
|
||||
url_units[4] = named_url
|
||||
return '/'.join(url_units)
|
||||
return node.generate_named_url(obj)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = OrderedDict()
|
||||
view = self.context.get('view', None)
|
||||
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and type(obj) in settings.NAMED_URL_GRAPH:
|
||||
original_url = self.get_url(obj)
|
||||
res['named_url'] = self._generate_named_url(original_url, obj, settings.NAMED_URL_GRAPH[type(obj)])
|
||||
original_path = self.get_url(obj)
|
||||
path_components = original_path.lstrip('/').rstrip('/').split('/')
|
||||
|
||||
friendly_id = self._generate_friendly_id(obj, settings.NAMED_URL_GRAPH[type(obj)])
|
||||
path_components[-1] = friendly_id
|
||||
|
||||
new_path = '/' + '/'.join(path_components) + '/'
|
||||
res['named_url'] = new_path
|
||||
if getattr(obj, 'created_by', None):
|
||||
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
|
||||
if getattr(obj, 'modified_by', None):
|
||||
@@ -861,7 +865,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
if 'elapsed' in ret:
|
||||
if obj and obj.pk and obj.started and not obj.finished:
|
||||
td = now() - obj.started
|
||||
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
|
||||
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / (10**6 * 1.0)
|
||||
ret['elapsed'] = float(ret['elapsed'])
|
||||
# Because this string is saved in the db in the source language,
|
||||
# it must be marked for translation after it is pulled from the db, not when set
|
||||
@@ -1639,7 +1643,25 @@ class BaseSerializerWithVariables(BaseSerializer):
|
||||
return vars_validate_or_raise(value)
|
||||
|
||||
|
||||
class InventorySerializer(BaseSerializerWithVariables):
|
||||
class LabelsListMixin(object):
|
||||
def _summary_field_labels(self, obj):
|
||||
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
|
||||
if has_model_field_prefetched(obj, 'labels'):
|
||||
label_ct = len(obj.labels.all())
|
||||
else:
|
||||
if len(label_list) < 10:
|
||||
label_ct = len(label_list)
|
||||
else:
|
||||
label_ct = obj.labels.count()
|
||||
return {'count': label_ct, 'results': label_list}
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
res = super(LabelsListMixin, self).get_summary_fields(obj)
|
||||
res['labels'] = self._summary_field_labels(obj)
|
||||
return res
|
||||
|
||||
|
||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
|
||||
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
|
||||
|
||||
@@ -1680,6 +1702,7 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
object_roles=self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups=self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
copy=self.reverse('api:inventory_copy', kwargs={'pk': obj.pk}),
|
||||
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.organization:
|
||||
@@ -2749,24 +2772,6 @@ class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
|
||||
fields = ('*', '-user', '-team')
|
||||
|
||||
|
||||
class LabelsListMixin(object):
|
||||
def _summary_field_labels(self, obj):
|
||||
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
|
||||
if has_model_field_prefetched(obj, 'labels'):
|
||||
label_ct = len(obj.labels.all())
|
||||
else:
|
||||
if len(label_list) < 10:
|
||||
label_ct = len(label_list)
|
||||
else:
|
||||
label_ct = obj.labels.count()
|
||||
return {'count': label_ct, 'results': label_list}
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
res = super(LabelsListMixin, self).get_summary_fields(obj)
|
||||
res['labels'] = self._summary_field_labels(obj)
|
||||
return res
|
||||
|
||||
|
||||
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
class Meta:
|
||||
fields = (
|
||||
@@ -4767,6 +4772,28 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
return super(ScheduleSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
fields = ('id', 'hostname', 'node_type', 'node_state')
|
||||
|
||||
node_state = serializers.SerializerMethodField()
|
||||
|
||||
def get_node_state(self, obj):
|
||||
if not obj.enabled:
|
||||
return "disabled"
|
||||
return "error" if obj.errors else "healthy"
|
||||
|
||||
|
||||
class InstanceSerializer(BaseSerializer):
|
||||
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
@@ -4810,7 +4837,8 @@ class InstanceSerializer(BaseSerializer):
|
||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
if obj.node_type != 'hop':
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
@@ -5003,6 +5031,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
('credential_type', ('id', 'name', 'description', 'kind', 'managed')),
|
||||
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
|
||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||
('instance', ('id', 'hostname')),
|
||||
]
|
||||
return field_list
|
||||
|
||||
|
||||
1
awx/api/templates/api/mesh_visualizer.md
Normal file
1
awx/api/templates/api/mesh_visualizer.md
Normal file
@@ -0,0 +1 @@
|
||||
Make a GET request to this resource to obtain a list all Receptor Nodes and their links.
|
||||
@@ -20,6 +20,7 @@ from awx.api.views import (
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
InventoryInstanceGroupsList,
|
||||
InventoryLabelList,
|
||||
InventoryCopy,
|
||||
)
|
||||
|
||||
@@ -41,6 +42,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/access_list/$', InventoryAccessList.as_view(), name='inventory_access_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/object_roles/$', InventoryObjectRolesList.as_view(), name='inventory_object_roles_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/instance_groups/$', InventoryInstanceGroupsList.as_view(), name='inventory_instance_groups_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/labels/$', InventoryLabelList.as_view(), name='inventory_label_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
|
||||
]
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
MeshVisualizer,
|
||||
)
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
@@ -95,6 +96,7 @@ v2_urls = [
|
||||
url(r'^me/$', UserMeList.as_view(), name='user_me_list'),
|
||||
url(r'^dashboard/$', DashboardView.as_view(), name='dashboard_view'),
|
||||
url(r'^dashboard/graphs/jobs/$', DashboardJobsGraphView.as_view(), name='dashboard_jobs_graph_view'),
|
||||
url(r'^mesh_visualizer/', MeshVisualizer.as_view(), name='mesh_visualizer_view'),
|
||||
url(r'^settings/', include('awx.conf.urls')),
|
||||
url(r'^instances/', include(instance_urls)),
|
||||
url(r'^instance_groups/', include(instance_group_urls)),
|
||||
|
||||
@@ -62,7 +62,7 @@ import pytz
|
||||
from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
@@ -157,8 +157,10 @@ from awx.api.views.inventory import ( # noqa
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryLabelList,
|
||||
InventoryCopy,
|
||||
)
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
|
||||
from awx.api.views.root import ( # noqa
|
||||
ApiRootView,
|
||||
ApiOAuthAuthorizationRootView,
|
||||
@@ -406,6 +408,8 @@ class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAtta
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if parent.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {parent.hostname}.")}
|
||||
if parent.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {parent.hostname}.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -416,6 +420,10 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
serializer_class = serializers.InstanceHealthCheckSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def get_queryset(self):
|
||||
# FIXME: For now, we don't have a good way of checking the health of a hop node.
|
||||
return super().get_queryset().exclude(node_type='hop')
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
data = self.get_serializer(data=request.data).to_representation(obj)
|
||||
@@ -425,7 +433,7 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
obj = self.get_object()
|
||||
|
||||
if obj.node_type == 'execution':
|
||||
from awx.main.tasks import execution_node_health_check
|
||||
from awx.main.tasks.system import execution_node_health_check
|
||||
|
||||
runner_data = execution_node_health_check(obj.hostname)
|
||||
obj.refresh_from_db()
|
||||
@@ -435,7 +443,7 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
if extra_field in runner_data:
|
||||
data[extra_field] = runner_data[extra_field]
|
||||
else:
|
||||
from awx.main.tasks import cluster_node_health_check
|
||||
from awx.main.tasks.system import cluster_node_health_check
|
||||
|
||||
if settings.CLUSTER_HOST_ID == obj.hostname:
|
||||
cluster_node_health_check(obj.hostname)
|
||||
@@ -503,6 +511,8 @@ class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetac
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {sub.hostname}.")}
|
||||
if sub.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {sub.hostname}.")}
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -16,17 +16,21 @@ from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Inventory,
|
||||
JobTemplate,
|
||||
Role,
|
||||
User,
|
||||
InstanceGroup,
|
||||
InventoryUpdateEvent,
|
||||
InventoryUpdate,
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
|
||||
from awx.main.models.label import Label
|
||||
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
SubListAPIView,
|
||||
SubListAttachDetachAPIView,
|
||||
ResourceAccessList,
|
||||
CopyAPIView,
|
||||
DeleteLastUnattachLabelMixin,
|
||||
SubListCreateAttachDetachAPIView,
|
||||
)
|
||||
from awx.api.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, SubListAPIView, SubListAttachDetachAPIView, ResourceAccessList, CopyAPIView
|
||||
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
@@ -35,6 +39,7 @@ from awx.api.serializers import (
|
||||
InstanceGroupSerializer,
|
||||
InventoryUpdateEventSerializer,
|
||||
JobTemplateSerializer,
|
||||
LabelSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
|
||||
@@ -152,6 +157,30 @@ class InventoryJobTemplateList(SubListAPIView):
|
||||
return qs.filter(inventory=parent)
|
||||
|
||||
|
||||
class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView):
|
||||
|
||||
model = Label
|
||||
serializer_class = LabelSerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'labels'
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
# If a label already exists in the database, attach it instead of erroring out
|
||||
# that it already exists
|
||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
||||
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
||||
if existing.exists():
|
||||
existing = existing[0]
|
||||
request.data['id'] = existing.id
|
||||
del request.data['name']
|
||||
del request.data['organization']
|
||||
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
|
||||
return Response(
|
||||
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
return super(InventoryLabelList, self).post(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryCopy(CopyAPIView):
|
||||
|
||||
model = Inventory
|
||||
|
||||
25
awx/api/views/mesh_visualizer.py
Normal file
25
awx/api/views/mesh_visualizer.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.serializers import InstanceLinkSerializer, InstanceNodeSerializer
|
||||
from awx.main.models import InstanceLink, Instance
|
||||
|
||||
|
||||
class MeshVisualizer(APIView):
|
||||
|
||||
name = _("Mesh Visualizer")
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
swagger_topic = "System Configuration"
|
||||
|
||||
def get(self, request, format=None):
|
||||
|
||||
data = {
|
||||
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
|
||||
'links': InstanceLinkSerializer(InstanceLink.objects.all(), many=True).data,
|
||||
}
|
||||
|
||||
return Response(data)
|
||||
@@ -123,6 +123,7 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_approvals'] = reverse('api:workflow_approval_list', request=request)
|
||||
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -149,13 +150,13 @@ class ApiV2PingView(APIView):
|
||||
response = {'ha': is_ha_environment(), 'version': get_awx_version(), 'active_node': settings.CLUSTER_HOST_ID, 'install_uuid': settings.INSTALL_UUID}
|
||||
|
||||
response['instances'] = []
|
||||
for instance in Instance.objects.all():
|
||||
for instance in Instance.objects.exclude(node_type='hop'):
|
||||
response['instances'].append(
|
||||
dict(
|
||||
node=instance.hostname,
|
||||
node_type=instance.node_type,
|
||||
uuid=instance.uuid,
|
||||
heartbeat=instance.modified,
|
||||
heartbeat=instance.last_seen,
|
||||
capacity=instance.capacity,
|
||||
version=instance.version,
|
||||
)
|
||||
|
||||
@@ -13,6 +13,9 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField, NullBooleanField # noqa
|
||||
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
||||
|
||||
# AWX
|
||||
from awx.main.constants import CONTAINER_VOLUMES_MOUNT_TYPES, MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
# Use DRF fields to convert/validate settings:
|
||||
@@ -109,6 +112,49 @@ class StringListPathField(StringListField):
|
||||
self.fail('type_error', input_type=type(paths))
|
||||
|
||||
|
||||
class StringListIsolatedPathField(StringListField):
|
||||
# Valid formats
|
||||
# '/etc/pki/ca-trust'
|
||||
# '/etc/pki/ca-trust:/etc/pki/ca-trust'
|
||||
# '/etc/pki/ca-trust:/etc/pki/ca-trust:O'
|
||||
|
||||
default_error_messages = {
|
||||
'type_error': _('Expected list of strings but got {input_type} instead.'),
|
||||
'path_error': _('{path} is not a valid path choice. You must provide an absolute path.'),
|
||||
'mount_error': _('{scontext} is not a valid mount option. Allowed types are {mount_types}'),
|
||||
'syntax_error': _('Invalid syntax. A string HOST-DIR[:CONTAINER-DIR[:OPTIONS]] is expected but got {path}.'),
|
||||
}
|
||||
|
||||
def to_internal_value(self, paths):
|
||||
|
||||
if isinstance(paths, (list, tuple)):
|
||||
for p in paths:
|
||||
if not isinstance(p, str):
|
||||
self.fail('type_error', input_type=type(p))
|
||||
if not p.startswith('/'):
|
||||
self.fail('path_error', path=p)
|
||||
|
||||
if p.count(':'):
|
||||
if p.count(':') > MAX_ISOLATED_PATH_COLON_DELIMITER:
|
||||
self.fail('syntax_error', path=p)
|
||||
try:
|
||||
src, dest, scontext = p.split(':')
|
||||
except ValueError:
|
||||
scontext = 'z'
|
||||
src, dest = p.split(':')
|
||||
finally:
|
||||
for sp in [src, dest]:
|
||||
if not len(sp):
|
||||
self.fail('syntax_error', path=sp)
|
||||
if not sp.startswith('/'):
|
||||
self.fail('path_error', path=sp)
|
||||
if scontext not in CONTAINER_VOLUMES_MOUNT_TYPES:
|
||||
self.fail('mount_error', scontext=scontext, mount_types=CONTAINER_VOLUMES_MOUNT_TYPES)
|
||||
return super(StringListIsolatedPathField, self).to_internal_value(sorted(paths))
|
||||
else:
|
||||
self.fail('type_error', input_type=type(paths))
|
||||
|
||||
|
||||
class URLField(CharField):
|
||||
# these lines set up a custom regex that allow numbers in the
|
||||
# top-level domain
|
||||
|
||||
@@ -26,7 +26,7 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks import handle_setting_changes
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
|
||||
@@ -4856,7 +4856,7 @@ msgid "Exception connecting to PagerDuty: {}"
|
||||
msgstr ""
|
||||
|
||||
#: awx/main/notifications/pagerduty_backend.py:87
|
||||
#: awx/main/notifications/slack_backend.py:48
|
||||
#: awx/main/notifications/slack_backend.py:49
|
||||
#: awx/main/notifications/twilio_backend.py:47
|
||||
msgid "Exception sending messages: {}"
|
||||
msgstr ""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,3 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -40,7 +37,7 @@ msgstr "secondes"
|
||||
|
||||
#: awx/api/conf.py:29
|
||||
msgid "Maximum number of simultaneous logged in sessions"
|
||||
msgstr "Le nombre maximum de sessions actives en simultané"
|
||||
msgstr "Nombre maximum de sessions actives en simultané"
|
||||
|
||||
#: awx/api/conf.py:30
|
||||
msgid ""
|
||||
@@ -2255,7 +2252,6 @@ msgid ""
|
||||
"Maximum number of messages to update the UI live job output with per second. "
|
||||
"Value of 0 means no limit."
|
||||
msgstr "Nombre maximal de messages pour mettre à jour la sortie du Job dans l'interface live utilisateur, par seconde. La valeur de 0 signifie qu'il n'y a pas de limite."
|
||||
|
||||
#: awx/main/conf.py:380
|
||||
msgid "Maximum Scheduled Jobs"
|
||||
msgstr "Nombre max. de tâches planifiées"
|
||||
@@ -2812,7 +2808,7 @@ msgstr "URL du coffre HashiCorp"
|
||||
#: awx/main/models/credential/__init__.py:920
|
||||
#: awx/main/models/credential/__init__.py:939
|
||||
msgid "Token"
|
||||
msgstr "Token"
|
||||
msgstr "Jeton"
|
||||
|
||||
#: awx/main/credential_plugins/hashivault.py:25
|
||||
msgid "The access token used to authenticate to the Vault server"
|
||||
@@ -3724,7 +3720,7 @@ msgstr "interne : à la non-importation pour l'hôte"
|
||||
|
||||
#: awx/main/models/events.py:193
|
||||
msgid "Play Started"
|
||||
msgstr "Scène démarrée"
|
||||
msgstr "Play Démarrage"
|
||||
|
||||
#: awx/main/models/events.py:194
|
||||
msgid "Playbook Complete"
|
||||
@@ -3981,7 +3977,7 @@ msgid ""
|
||||
"The host would be marked enabled. If power_state where any value other than "
|
||||
"powered_on then the host would be disabled when imported. If the key is not "
|
||||
"found then the host will be enabled"
|
||||
msgstr "Utilisé uniquement lorsque enabled_var est défini. Valeur lorsque l'hôte est considéré comme activé. Par exemple, si enabled_var=\"status.power_state \" et enabled_value=\"powered_on\" avec les variables de l'hôte:{ \"status\" : { \"power_state\" : \"powered_on\", \"created\" : \"2020-08-04T18:13:04+00:00\", \"healthy\" : true }, \"name\" : \"foobar\", \"ip_address\" : \"192.168.2.1\"}, l'hôte serait marqué comme étant activé. Si power_state contient une valeur autre que power_on, alors l'hôte sera désactivé lors de l'importation. Si la clé n'est pas trouvée, alors l'hôte sera activé"
|
||||
msgstr "Utilisé uniquement lorsque enabled_var est défini. Valeur lorsque l'hôte est considéré comme activé. Par exemple, si enabled_var=\"status.power_state \" et enabled_value=\"powered_on\" avec les variables de l'hôte:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2020-08-04T18:13:04+00:00\", \"healthy\": true },\"name\" : \"foobar\", \"ip_address\" : \"192.168.2.1\"}, l'hôte serait marqué comme étant activé. Si power_state contient une valeur autre que power_on, alors l'hôte sera désactivé lors de l'importation. Si la clé n'est pas trouvée, alors l'hôte sera activé"
|
||||
|
||||
#: awx/main/models/inventory.py:878
|
||||
msgid "Regex where only matching hosts will be imported."
|
||||
@@ -4301,7 +4297,7 @@ msgstr "Utilisé pour une vérification plus rigoureuse de l'accès à une appli
|
||||
#: awx/main/models/oauth.py:74
|
||||
msgid ""
|
||||
"Set to Public or Confidential depending on how secure the client device is."
|
||||
msgstr "Défini sur sur Public ou Confidentiel selon le degré de sécurité du périphérique client."
|
||||
msgstr "Définir sur sur Public ou Confidentiel selon le degré de sécurité du périphérique client."
|
||||
|
||||
#: awx/main/models/oauth.py:76
|
||||
msgid ""
|
||||
@@ -4555,7 +4551,7 @@ msgstr "Utilisation"
|
||||
|
||||
#: awx/main/models/rbac.py:52
|
||||
msgid "Approve"
|
||||
msgstr "Approbation"
|
||||
msgstr "Approuver"
|
||||
|
||||
#: awx/main/models/rbac.py:56
|
||||
msgid "Can manage all aspects of the system"
|
||||
@@ -5147,7 +5143,7 @@ msgstr "Au moins %(min_certs)d certificats sont requis, seulement %(cert_count)d
|
||||
#: awx/main/validators.py:152
|
||||
#, python-format
|
||||
msgid "Only one certificate is allowed, %(cert_count)d provided."
|
||||
msgstr "Un seul certificat est autorisé, %(cert_count)d ont été fournis."
|
||||
msgstr "Un seul certificat est autorisé, %(cert_count) ont été fournis."
|
||||
|
||||
#: awx/main/validators.py:154
|
||||
#, python-format
|
||||
@@ -5633,7 +5629,7 @@ msgstr "Nom de l'organisation GitHub"
|
||||
msgid ""
|
||||
"The name of your GitHub organization, as used in your organization's URL: "
|
||||
"https://github.com/<yourorg>/."
|
||||
msgstr "Nom de votre organisation GitHub, tel qu'utilisé dans l'URL de votre organisation : https://github.com/<votreorg>/."
|
||||
msgstr "Nom de votre organisation GitHub, tel qu'utilisé dans l'URL de votre organisation : https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:762
|
||||
msgid "GitHub Organization OAuth2 Organization Map"
|
||||
@@ -5653,7 +5649,7 @@ msgid ""
|
||||
"<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and "
|
||||
"secret (Client Secret). Provide this URL as the callback URL for your "
|
||||
"application."
|
||||
msgstr "Créez une application appartenant à une organisation sur https://github.com/organizations/<votreorg>/settings/applications et obtenez une clé OAuth2 (ID client) et un secret (secret client). Entrez cette URL comme URL de rappel de votre application."
|
||||
msgstr "Créez une application appartenant à une organisation sur https://github.com/organizations/<yourorg>/settings/applications et obtenez une clé OAuth2 (ID client) et un secret (secret client). Entrez cette URL comme URL de rappel de votre application."
|
||||
|
||||
#: awx/sso/conf.py:797 awx/sso/conf.py:809 awx/sso/conf.py:820
|
||||
#: awx/sso/conf.py:832 awx/sso/conf.py:843 awx/sso/conf.py:855
|
||||
@@ -5789,7 +5785,7 @@ msgstr "Nom de l'organisation GitHub Enterprise"
|
||||
msgid ""
|
||||
"The name of your GitHub Enterprise organization, as used in your "
|
||||
"organization's URL: https://github.com/<yourorg>/."
|
||||
msgstr "Nom de votre organisation GitHub Enterprise, tel qu'utilisé dans l'URL de votre organisation : https://github.com/<votreorg>/."
|
||||
msgstr "Nom de votre organisation GitHub Enterprise, tel qu'utilisé dans l'URL de votre organisation : https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:1030
|
||||
msgid "GitHub Enterprise Organization OAuth2 Organization Map"
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -596,7 +593,7 @@ msgstr "指定された変数 {} には置き換えるデータベースの値
|
||||
|
||||
#: awx/api/serializers.py:3739
|
||||
msgid "\"$encrypted$ is a reserved keyword, may not be used for {}.\""
|
||||
msgstr "\"$encrypted$ は予約されたキーワードで、{} には使用できません。\""
|
||||
msgstr "\"$encrypted は予約されたキーワードで {} には使用できません。\""
|
||||
|
||||
#: awx/api/serializers.py:4212
|
||||
msgid "A project is required to run a job."
|
||||
@@ -824,7 +821,7 @@ msgstr "ポリシーインスタンスの割合"
|
||||
msgid ""
|
||||
"Minimum percentage of all instances that will be automatically assigned to "
|
||||
"this group when new instances come online."
|
||||
msgstr "新規インスタンスがオンラインになると、このグループに自動的に最小限割り当てられるインスタンスの割合を選択します。"
|
||||
msgstr "新規インスタンスがオンラインになると、このグループに自動的に最小限割り当てられるインスタンスの割合"
|
||||
|
||||
#: awx/api/serializers.py:4853
|
||||
msgid "Policy Instance Minimum"
|
||||
@@ -1253,7 +1250,7 @@ msgstr "デフォルトで指定されている選択項目は、一覧から回
|
||||
msgid ""
|
||||
"$encrypted$ is a reserved keyword for password question defaults, survey "
|
||||
"question {idx} is type {survey_item[type]}."
|
||||
msgstr "$encrypted$ は、デフォルト設定されているパスワードの質問に予約されたキーワードで、Survey の質問 {idx} は {survey_item[type]} タイプです。"
|
||||
msgstr "$encrypted$ はパスワードの質問のデフォルトの予約されたキーワードで、Survey の質問 {idx} はタイプ {survey_item[type]} です。"
|
||||
|
||||
#: awx/api/views/__init__.py:2567
|
||||
#, python-brace-format
|
||||
@@ -3638,7 +3635,7 @@ msgstr "ホスト OK"
|
||||
|
||||
#: awx/main/models/events.py:169
|
||||
msgid "Host Failure"
|
||||
msgstr "ホストの失敗"
|
||||
msgstr "ホストの障害"
|
||||
|
||||
#: awx/main/models/events.py:170 awx/main/models/events.py:767
|
||||
msgid "Host Skipped"
|
||||
@@ -4694,7 +4691,7 @@ msgstr "取り消されました"
|
||||
|
||||
#: awx/main/models/unified_jobs.py:84
|
||||
msgid "Never Updated"
|
||||
msgstr "更新されていません"
|
||||
msgstr "未更新"
|
||||
|
||||
#: awx/main/models/unified_jobs.py:88
|
||||
msgid "OK"
|
||||
@@ -6261,4 +6258,3 @@ msgstr "%s が現在アップグレード中です。"
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "このページは完了すると更新されます。"
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,3 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -596,7 +593,7 @@ msgstr "提供的变量 {} 没有要替换的数据库值。"
|
||||
|
||||
#: awx/api/serializers.py:3739
|
||||
msgid "\"$encrypted$ is a reserved keyword, may not be used for {}.\""
|
||||
msgstr "\"$encrypted$ 是一个保留的关键字,可能不能用于 {}\""
|
||||
msgstr "\"$encrypted$ 是一个保留关键字,可能无法用于 {}。\""
|
||||
|
||||
#: awx/api/serializers.py:4212
|
||||
msgid "A project is required to run a job."
|
||||
@@ -1031,7 +1028,7 @@ msgstr "对于受管执行环境,只能编辑 'pull' 字段。"
|
||||
|
||||
#: awx/api/views/__init__.py:805
|
||||
msgid "Project Schedules"
|
||||
msgstr "项目调度"
|
||||
msgstr "项目计划"
|
||||
|
||||
#: awx/api/views/__init__.py:816
|
||||
msgid "Project SCM Inventory Sources"
|
||||
@@ -1253,14 +1250,14 @@ msgstr "默认的选择必须从列出的选择中回答。"
|
||||
msgid ""
|
||||
"$encrypted$ is a reserved keyword for password question defaults, survey "
|
||||
"question {idx} is type {survey_item[type]}."
|
||||
msgstr "$encrypted$ 是密码问题默认值的保留关键字,问卷调查问题 {idx} 的类型是 {survey_item[type]}。"
|
||||
msgstr "$encrypted$ 是密码问题默认值的保留关键字,问卷调查问题 {idx} 是类型 {survey_item[type]}。"
|
||||
|
||||
#: awx/api/views/__init__.py:2567
|
||||
#, python-brace-format
|
||||
msgid ""
|
||||
"$encrypted$ is a reserved keyword, may not be used for new default in "
|
||||
"position {idx}."
|
||||
msgstr "$encrypted$ 是一个保留关键字,可能无法用于位置 {idx} 中的新默认值。"
|
||||
msgstr "$encrypted$ 是一个保留关键字,无法用于位置 {idx} 中的新默认值。"
|
||||
|
||||
#: awx/api/views/__init__.py:2639
|
||||
#, python-brace-format
|
||||
@@ -2865,7 +2862,7 @@ msgstr "Secret 的路径"
|
||||
|
||||
#: awx/main/credential_plugins/hashivault.py:78
|
||||
msgid "Path to Auth"
|
||||
msgstr "Auth 的路径"
|
||||
msgstr "到 Auth 的路径"
|
||||
|
||||
#: awx/main/credential_plugins/hashivault.py:81
|
||||
msgid "The path where the Authentication method is mounted e.g, approle"
|
||||
@@ -3979,7 +3976,7 @@ msgid ""
|
||||
"The host would be marked enabled. If power_state where any value other than "
|
||||
"powered_on then the host would be disabled when imported. If the key is not "
|
||||
"found then the host will be enabled"
|
||||
msgstr "仅在设置 enabled_var 时使用。 主机被视为启用时的值。 例如: if enabled_var=\"status.power_state\"and enabled_value=\"powered_on\" with host variables:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2020-08-04T18:13:04+00:00\", \"healthy\": true }, \"name\": \"foobar\", \"ip_address\": \"192.168.2.1\"}The host would be marked enabled. 如果 power_state 在除 powered_on 以外的任何值,则会在导入时禁用主机。如果没有找到密钥,则会启用主机"
|
||||
msgstr "仅在设置 enabled_var 时使用。 主机被视为启用时的值。 例如:是否 enabled_var=\"status.power_state\"and enabled_value=\"powered_on\" with host variables:{ \"status\": { \"power_state\": \"powered_on\", \"created\": \"2020-08-04T18:13:04+00:00\", \"healthy\": true }, \"name\": \"foobar\", \"ip_address\": \"192.168.2.1\"}。如果 power_state 在除 powered_on 以外的任何值,则会在导入时禁用主机。如果没有找到密钥,则会启用主机"
|
||||
|
||||
#: awx/main/models/inventory.py:878
|
||||
msgid "Regex where only matching hosts will be imported."
|
||||
@@ -6133,7 +6130,7 @@ msgstr "您的帐户不活跃"
|
||||
#: awx/sso/validators.py:24 awx/sso/validators.py:51
|
||||
#, python-format
|
||||
msgid "DN must include \"%%(user)s\" placeholder for username: %s"
|
||||
msgstr "DN 必须包含 \"%%\" 占位符用于用户名:%s"
|
||||
msgstr "DN 必须包含 \"%%(user)s\" 占位符用于用户名:%s"
|
||||
|
||||
#: awx/sso/validators.py:31
|
||||
#, python-format
|
||||
@@ -6263,4 +6260,3 @@ msgstr "%s 当前正在升级。"
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "完成后,此页面会刷新。"
|
||||
|
||||
|
||||
@@ -853,7 +853,12 @@ class InventoryAccess(BaseAccess):
|
||||
"""
|
||||
|
||||
model = Inventory
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
prefetch_related = (
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'organization',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
|
||||
)
|
||||
|
||||
def filtered_queryset(self, allowed=None, ad_hoc=None):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
|
||||
@@ -211,7 +211,7 @@ def projects_by_scm_type(since, **kwargs):
|
||||
return counts
|
||||
|
||||
|
||||
@register('instance_info', '1.1', description=_('Cluster topology and capacity'))
|
||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
@@ -337,7 +337,11 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
{tbl}.parent_uuid,
|
||||
{tbl}.event,
|
||||
task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
-- '-' operator listed here:
|
||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
||||
-- note that operator is only supported by jsonb objects
|
||||
-- https://www.postgresql.org/docs/current/datatype-json.html
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
|
||||
{tbl}.failed,
|
||||
{tbl}.changed,
|
||||
{tbl}.playbook,
|
||||
@@ -352,14 +356,14 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
x.duration AS duration,
|
||||
x.res->'warnings' AS warnings,
|
||||
x.res->'deprecations' AS deprecations
|
||||
FROM {tbl}, json_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "start" text, "end" text)
|
||||
FROM {tbl}, jsonb_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "start" text, "end" text)
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::jsonb"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::jsonb"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
|
||||
@@ -90,7 +90,7 @@ def package(target, data, timestamp):
|
||||
if isinstance(item, str):
|
||||
f.add(item, arcname=f'./{name}')
|
||||
else:
|
||||
buf = json.dumps(item).encode('utf-8')
|
||||
buf = json.dumps(item, cls=DjangoJSONEncoder).encode('utf-8')
|
||||
info = tarfile.TarInfo(f'./{name}')
|
||||
info.size = len(buf)
|
||||
info.mtime = timestamp.timestamp()
|
||||
@@ -230,7 +230,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
try:
|
||||
last_entry = max(last_entries.get(key) or last_gather, until - timedelta(weeks=4))
|
||||
results = (func(since or last_entry, collection_type=collection_type, until=until), func.__awx_analytics_version__)
|
||||
json.dumps(results) # throwaway check to see if the data is json-serializable
|
||||
json.dumps(results, cls=DjangoJSONEncoder) # throwaway check to see if the data is json-serializable
|
||||
data[filename] = results
|
||||
except Exception:
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
@@ -160,6 +160,7 @@ class Metrics:
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
|
||||
@@ -72,8 +72,8 @@ register(
|
||||
'HTTP headers and meta keys to search to determine remote host '
|
||||
'name or IP. Add additional items to this list, such as '
|
||||
'"HTTP_X_FORWARDED_FOR", if behind a reverse proxy. '
|
||||
'See the "Proxy Support" section of the Adminstrator guide for '
|
||||
'more details.'
|
||||
'See the "Proxy Support" section of the AAP Installation guide '
|
||||
'for more details.'
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
@@ -259,10 +259,14 @@ register(
|
||||
|
||||
register(
|
||||
'AWX_ISOLATION_SHOW_PATHS',
|
||||
field_class=fields.StringListField,
|
||||
field_class=fields.StringListIsolatedPathField,
|
||||
required=False,
|
||||
label=_('Paths to expose to isolated jobs'),
|
||||
help_text=_('List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line.'),
|
||||
help_text=_(
|
||||
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
|
||||
'Volumes will be mounted from the execution node to the container. '
|
||||
'The supported format is HOST-DIR[:CONTAINER-DIR[:OPTIONS]]. '
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
@@ -408,6 +412,21 @@ register(
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_JOB_IDLE_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Job Idle Timeout'),
|
||||
help_text=_(
|
||||
'If no output is detected from ansible in this number of seconds the execution will be terminated. '
|
||||
'Use value of 0 to used default idle_timeout is 600s.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_INVENTORY_UPDATE_TIMEOUT',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -659,6 +678,24 @@ register(
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'API_400_ERROR_LOG_FORMAT',
|
||||
field_class=fields.CharField,
|
||||
default='status {status_code} received by user {user_name} attempting to access {url_path} from {remote_addr}',
|
||||
label=_('Log Format For API 4XX Errors'),
|
||||
help_text=_(
|
||||
'The format of logged messages when an API 4XX error occurs, '
|
||||
'the following variables will be substituted: \n'
|
||||
'status_code - The HTTP status code of the error\n'
|
||||
'user_name - The user name attempting to use the API\n'
|
||||
'url_path - The URL path to the API endpoint called\n'
|
||||
'remote_addr - The remote address seen for the user\n'
|
||||
'error - The error set by the api endpoint\n'
|
||||
'Variables need to be in the format {<variable name>}.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
@@ -672,7 +709,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
field_class=fields.CharField,
|
||||
label=_('Last gathered entries for expensive collectors for Insights for Ansible Automation Platform.'),
|
||||
label=_('Last gathered entries from the data collection service of Insights for Ansible Automation Platform'),
|
||||
default='',
|
||||
allow_blank=True,
|
||||
category=_('System'),
|
||||
|
||||
@@ -81,3 +81,14 @@ LOGGER_BLOCKLIST = (
|
||||
# Reported version for node seen in receptor mesh but for which capacity check
|
||||
# failed or is in progress
|
||||
RECEPTOR_PENDING = 'ansible-runner-???'
|
||||
|
||||
# Naming pattern for AWX jobs in /tmp folder, like /tmp/awx_42_xiwm
|
||||
# also update awxkit.api.pages.unified_jobs if changed
|
||||
JOB_FOLDER_PREFIX = 'awx_%s_'
|
||||
|
||||
# :z option tells Podman that two containers share the volume content with r/w
|
||||
# :O option tells Podman to mount the directory from the host as a temporary storage using the overlay file system.
|
||||
# see podman-run manpage for further details
|
||||
# /HOST-DIR:/CONTAINER-DIR:OPTIONS
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES = ['z', 'O']
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER = 2
|
||||
|
||||
@@ -22,6 +22,7 @@ import psutil
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -248,7 +249,7 @@ class WorkerPool(object):
|
||||
except Exception:
|
||||
logger.exception('could not fork')
|
||||
else:
|
||||
logger.warn('scaling up worker pid:{}'.format(worker.pid))
|
||||
logger.debug('scaling up worker pid:{}'.format(worker.pid))
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
@@ -319,7 +320,8 @@ class AutoscalePool(WorkerPool):
|
||||
if self.max_workers is None:
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
total_memory_gb = int(settings_absmem)
|
||||
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
# 5 workers per GB of total memory
|
||||
@@ -387,7 +389,7 @@ class AutoscalePool(WorkerPool):
|
||||
# more processes in the pool than we need (> min)
|
||||
# send this process a message so it will exit gracefully
|
||||
# at the next opportunity
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
|
||||
@@ -60,7 +60,7 @@ class AWXConsumerBase(object):
|
||||
return f'listening on {self.queues}'
|
||||
|
||||
def control(self, body):
|
||||
logger.warn(body)
|
||||
logger.warn(f'Received control signal:\n{body}')
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
reply_queue = body['reply_to']
|
||||
@@ -137,7 +137,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.warn(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
@@ -188,7 +188,7 @@ class BaseWorker(object):
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
logger.debug('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
def perform_work(self, body):
|
||||
raise NotImplementedError()
|
||||
|
||||
@@ -17,7 +17,7 @@ import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
|
||||
from awx.main.tasks import handle_success_and_failure_notifications
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
@@ -116,19 +116,20 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if force or (time.time() - self.last_flush) > settings.JOB_EVENT_BUFFER_SECONDS or any([len(events) >= 1000 for events in self.buff.values()]):
|
||||
bulk_events_saved = 0
|
||||
singular_events_saved = 0
|
||||
metrics_bulk_events_saved = 0
|
||||
metrics_singular_events_saved = 0
|
||||
metrics_events_batch_save_errors = 0
|
||||
metrics_events_broadcast = 0
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
duration_to_save = time.perf_counter()
|
||||
metrics_duration_to_save = time.perf_counter()
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
bulk_events_saved += len(events)
|
||||
metrics_bulk_events_saved += len(events)
|
||||
except Exception:
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
@@ -137,22 +138,24 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
for e in events:
|
||||
try:
|
||||
e.save()
|
||||
singular_events_saved += 1
|
||||
metrics_singular_events_saved += 1
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
duration_to_save = time.perf_counter() - duration_to_save
|
||||
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
||||
for e in events:
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
metrics_events_broadcast += 1
|
||||
emit_event_detail(e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
if (bulk_events_saved + singular_events_saved) > 0:
|
||||
if (metrics_bulk_events_saved + metrics_singular_events_saved) > 0:
|
||||
self.subsystem_metrics.inc('callback_receiver_batch_events_errors', metrics_events_batch_save_errors)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_insert_db_seconds', duration_to_save)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_insert_db', bulk_events_saved + singular_events_saved)
|
||||
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', bulk_events_saved)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(bulk_events_saved + singular_events_saved))
|
||||
self.subsystem_metrics.inc('callback_receiver_events_insert_db_seconds', metrics_duration_to_save)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_insert_db', metrics_bulk_events_saved + metrics_singular_events_saved)
|
||||
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', metrics_bulk_events_saved)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(metrics_bulk_events_saved + metrics_singular_events_saved))
|
||||
self.subsystem_metrics.inc('callback_receiver_events_broadcast', metrics_events_broadcast)
|
||||
if self.subsystem_metrics.should_pipe_execute() is True:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from kubernetes.config import kube_config
|
||||
from django.conf import settings
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
from awx.main.tasks.system import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
@@ -30,8 +30,8 @@ class TaskWorker(BaseWorker):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
awx.main.tasks.delete_inventory
|
||||
awx.main.tasks.RunProjectUpdate
|
||||
awx.main.tasks.system.delete_inventory
|
||||
awx.main.tasks.jobs.RunProjectUpdate
|
||||
"""
|
||||
if not task.startswith('awx.'):
|
||||
raise ValueError('{} is not a valid awx task'.format(task))
|
||||
@@ -73,15 +73,15 @@ class TaskWorker(BaseWorker):
|
||||
'callbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {}
|
||||
'task': u'awx.main.tasks.handle_work_success'
|
||||
'task': u'awx.main.tasks.system.handle_work_success'
|
||||
}],
|
||||
'errbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {},
|
||||
'task': 'awx.main.tasks.handle_work_error'
|
||||
'task': 'awx.main.tasks.system.handle_work_error'
|
||||
}],
|
||||
'kwargs': {},
|
||||
'task': u'awx.main.tasks.RunProjectUpdate'
|
||||
'task': u'awx.main.tasks.jobs.RunProjectUpdate'
|
||||
}
|
||||
"""
|
||||
settings.__clean_on_fork__()
|
||||
|
||||
@@ -36,3 +36,7 @@ class PostRunError(Exception):
|
||||
self.status = status
|
||||
self.tb = tb
|
||||
super(PostRunError, self).__init__(msg)
|
||||
|
||||
|
||||
class ReceptorNodeNotFound(RuntimeError):
|
||||
pass
|
||||
|
||||
@@ -10,6 +10,6 @@ def is_ha_environment():
|
||||
otherwise.
|
||||
"""
|
||||
# If there are two or more instances, then we are in an HA environment.
|
||||
if Instance.objects.count() > 1:
|
||||
if Instance.objects.filter(node_type__in=('control', 'hybrid')).count() > 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -23,44 +23,54 @@ class Command(BaseCommand):
|
||||
with impersonate(superuser):
|
||||
with disable_computed_fields():
|
||||
if not Organization.objects.exists():
|
||||
o = Organization.objects.create(name='Default')
|
||||
o, _ = Organization.objects.get_or_create(name='Default')
|
||||
|
||||
p = Project(
|
||||
name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o,
|
||||
)
|
||||
# Avoid calling directly the get_or_create() to bypass project update
|
||||
p = Project.objects.filter(name='Demo Project', scm_type='git').first()
|
||||
if not p:
|
||||
p = Project(
|
||||
name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
)
|
||||
|
||||
p.organization = o
|
||||
p.save(skip_update=True)
|
||||
|
||||
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
|
||||
c = Credential.objects.create(
|
||||
c, _ = Credential.objects.get_or_create(
|
||||
credential_type=ssh_type, name='Demo Credential', inputs={'username': superuser.username}, created_by=superuser
|
||||
)
|
||||
|
||||
c.admin_role.members.add(superuser)
|
||||
|
||||
public_galaxy_credential = Credential(
|
||||
public_galaxy_credential, _ = Credential.objects.get_or_create(
|
||||
name='Ansible Galaxy',
|
||||
managed=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
o.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
i = Inventory.objects.create(name='Demo Inventory', organization=o, created_by=superuser)
|
||||
i, _ = Inventory.objects.get_or_create(name='Demo Inventory', organization=o, created_by=superuser)
|
||||
|
||||
Host.objects.create(
|
||||
Host.objects.get_or_create(
|
||||
name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser,
|
||||
)
|
||||
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template', playbook='hello_world.yml', project=p, inventory=i)
|
||||
jt = JobTemplate.objects.filter(name='Demo Job Template').first()
|
||||
if jt:
|
||||
jt.project = p
|
||||
jt.inventory = i
|
||||
jt.playbook = 'hello_world.yml'
|
||||
jt.save()
|
||||
else:
|
||||
jt, _ = JobTemplate.objects.get_or_create(name='Demo Job Template', playbook='hello_world.yml', project=p, inventory=i)
|
||||
jt.credentials.add(c)
|
||||
|
||||
print('Default organization added.')
|
||||
|
||||
@@ -76,7 +76,24 @@ class AnsibleInventoryLoader(object):
|
||||
bargs.extend(['-v', '{0}:{0}:Z'.format(self.source)])
|
||||
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
|
||||
bargs.extend(['-e', '{0}={1}'.format(key, value)])
|
||||
bargs.extend([get_default_execution_environment().image])
|
||||
ee = get_default_execution_environment()
|
||||
|
||||
if settings.IS_K8S:
|
||||
logger.warn('This command is not able to run on kubernetes-based deployment. This action should be done using the API.')
|
||||
sys.exit(1)
|
||||
|
||||
if ee.credential:
|
||||
process = subprocess.run(['podman', 'image', 'exists', ee.image], capture_output=True)
|
||||
if process.returncode != 0:
|
||||
logger.warn(
|
||||
f'The default execution environment (id={ee.id}, name={ee.name}, image={ee.image}) is not available on this node. '
|
||||
'The image needs to be available locally before using this command, due to registry authentication. '
|
||||
'To pull this image, either run a job on this node or manually pull the image.'
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
bargs.extend([ee.image])
|
||||
|
||||
bargs.extend(['ansible-inventory', '-i', self.source])
|
||||
bargs.extend(['--playbook-dir', functioning_dir(self.source)])
|
||||
if self.verbosity:
|
||||
@@ -111,9 +128,7 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
def load(self):
|
||||
base_args = self.get_base_args()
|
||||
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
|
||||
return self.command_to_json(base_args)
|
||||
|
||||
|
||||
@@ -138,7 +153,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable used to ' 'set/clear enabled flag when host is online/offline, may ' 'be specified as "foo.bar" to traverse nested dicts.',
|
||||
help='host variable used to set/clear enabled flag when host is online/offline, may be specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--enabled-value',
|
||||
@@ -146,7 +161,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='value of host variable ' 'specified by --enabled-var that indicates host is ' 'enabled/online.',
|
||||
help='value of host variable specified by --enabled-var that indicates host is enabled/online.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--group-filter',
|
||||
@@ -154,7 +169,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter group name(s); only matches are imported.',
|
||||
help='regular expression to filter group name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host-filter',
|
||||
@@ -162,14 +177,14 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='regex',
|
||||
help='regular expression ' 'to filter host name(s); only matches are imported.',
|
||||
help='regular expression to filter host name(s); only matches are imported.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--exclude-empty-groups',
|
||||
dest='exclude_empty_groups',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='when set, ' 'exclude all groups that have no child groups, hosts, or ' 'variables.',
|
||||
help='when set, exclude all groups that have no child groups, hosts, or variables.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--instance-id-var',
|
||||
@@ -177,7 +192,7 @@ class Command(BaseCommand):
|
||||
type=str,
|
||||
default=None,
|
||||
metavar='v',
|
||||
help='host variable that ' 'specifies the unique, immutable instance ID, may be ' 'specified as "foo.bar" to traverse nested dicts.',
|
||||
help='host variable that specifies the unique, immutable instance ID, may be specified as "foo.bar" to traverse nested dicts.',
|
||||
)
|
||||
|
||||
def set_logging_level(self, verbosity):
|
||||
@@ -1017,4 +1032,4 @@ class Command(BaseCommand):
|
||||
if settings.SQL_DEBUG:
|
||||
queries_this_import = connection.queries[queries_before:]
|
||||
sqltime = sum(float(x['time']) for x in queries_this_import)
|
||||
logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)
|
||||
logger.warning('Inventory import required %d queries taking %0.3fs', len(queries_this_import), sqltime)
|
||||
|
||||
@@ -11,13 +11,16 @@ class Ungrouped(object):
|
||||
policy_instance_percentage = None
|
||||
policy_instance_minimum = None
|
||||
|
||||
def __init__(self):
|
||||
self.qs = Instance.objects.filter(rampart_groups__isnull=True)
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
return Instance.objects.filter(rampart_groups__isnull=True)
|
||||
return self.qs
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum(x.capacity for x in self.instances)
|
||||
return sum(x.capacity for x in self.instances.all())
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -29,26 +32,29 @@ class Command(BaseCommand):
|
||||
|
||||
groups = list(InstanceGroup.objects.all())
|
||||
ungrouped = Ungrouped()
|
||||
if len(ungrouped.instances):
|
||||
if len(ungrouped.instances.all()):
|
||||
groups.append(ungrouped)
|
||||
|
||||
for instance_group in groups:
|
||||
fmt = '[{0.name} capacity={0.capacity}'
|
||||
if instance_group.policy_instance_percentage:
|
||||
fmt += ' policy={0.policy_instance_percentage}%'
|
||||
if instance_group.policy_instance_minimum:
|
||||
fmt += ' policy>={0.policy_instance_minimum}'
|
||||
print((fmt + ']').format(instance_group))
|
||||
for x in instance_group.instances.all():
|
||||
for ig in groups:
|
||||
policy = ''
|
||||
if ig.policy_instance_percentage:
|
||||
policy = f' policy={ig.policy_instance_percentage}%'
|
||||
if ig.policy_instance_minimum:
|
||||
policy = f' policy>={ig.policy_instance_minimum}'
|
||||
print(f'[{ig.name} capacity={ig.capacity}{policy}]')
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
if x.capacity == 0:
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if x.enabled is False:
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} version={1}'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print((fmt + '\033[0m').format(x, x.version or '?'))
|
||||
print('')
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.modified:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
|
||||
print()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.tasks import profile_sql
|
||||
from awx.main.tasks.system import profile_sql
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -13,19 +13,19 @@ class Command(BaseCommand):
|
||||
Register this instance with the database for HA tracking.
|
||||
"""
|
||||
|
||||
help = 'Add instance to the database. ' 'Specify `--hostname` to use this command.'
|
||||
help = "Add instance to the database. Specify `--hostname` to use this command."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
parser.add_argument('--node_type', type=str, default="hybrid", choices=["control", "execution", "hybrid"], help='Instance Node type')
|
||||
parser.add_argument('--uuid', type=str, help='Instance UUID')
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||
|
||||
def _register_hostname(self, hostname, node_type, uuid):
|
||||
if not hostname:
|
||||
return
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
|
||||
if changed:
|
||||
print('Successfully registered instance {}'.format(hostname))
|
||||
print("Successfully registered instance {}".format(hostname))
|
||||
else:
|
||||
print("Instance already registered {}".format(instance.hostname))
|
||||
self.changed = changed
|
||||
@@ -37,4 +37,4 @@ class Command(BaseCommand):
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||
if self.changed:
|
||||
print('(changed: True)')
|
||||
print("(changed: True)")
|
||||
|
||||
87
awx/main/management/commands/register_peers.py
Normal file
87
awx/main/management/commands/register_peers.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import warnings
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
|
||||
from awx.main.models import Instance, InstanceLink
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Internal tower command.
|
||||
Register the peers of a receptor node.
|
||||
"""
|
||||
|
||||
help = "Register or remove links between Receptor nodes."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('source', type=str, help="Receptor node opening the connections.")
|
||||
parser.add_argument('--peers', type=str, nargs='+', required=False, help="Nodes that the source node connects out to.")
|
||||
parser.add_argument('--disconnect', type=str, nargs='+', required=False, help="Nodes that should no longer be connected to by the source node.")
|
||||
parser.add_argument(
|
||||
'--exact',
|
||||
type=str,
|
||||
nargs='*',
|
||||
required=False,
|
||||
help="The exact set of nodes the source node should connect out to. Any existing links registered in the database that do not match will be removed. May be empty.",
|
||||
)
|
||||
|
||||
def handle(self, **options):
|
||||
nodes = Instance.objects.in_bulk(field_name='hostname')
|
||||
if options['source'] not in nodes:
|
||||
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
||||
if not (options['peers'] or options['disconnect'] or options['exact'] is not None):
|
||||
raise CommandError("One of the options --peers, --disconnect, or --exact is required.")
|
||||
if options['exact'] is not None and options['peers']:
|
||||
raise CommandError("The option --peers may not be used with --exact.")
|
||||
if options['exact'] is not None and options['disconnect']:
|
||||
raise CommandError("The option --disconnect may not be used with --exact.")
|
||||
|
||||
# No 1-cycles
|
||||
for collection in ('peers', 'disconnect', 'exact'):
|
||||
if options[collection] is not None and options['source'] in options[collection]:
|
||||
raise CommandError(f"Source node {options['source']} may not also be in --{collection}.")
|
||||
|
||||
# No 2-cycles
|
||||
if options['peers'] or options['exact'] is not None:
|
||||
peers = set(options['peers'] or options['exact'])
|
||||
incoming = set(InstanceLink.objects.filter(target=nodes[options['source']]).values_list('source__hostname', flat=True))
|
||||
if peers & incoming:
|
||||
warnings.warn(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.")
|
||||
|
||||
if options['peers']:
|
||||
missing_peers = set(options['peers']) - set(nodes)
|
||||
if missing_peers:
|
||||
missing = ' '.join(missing_peers)
|
||||
raise CommandError(f"Peers not currently registered as instances: {missing}")
|
||||
|
||||
results = 0
|
||||
for target in options['peers']:
|
||||
_, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
|
||||
if created:
|
||||
results += 1
|
||||
|
||||
print(f"{results} new peer links added to the database.")
|
||||
|
||||
if options['disconnect']:
|
||||
results = 0
|
||||
for target in options['disconnect']:
|
||||
if target not in nodes: # Be permissive, the node might have already been de-registered.
|
||||
continue
|
||||
n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=nodes[target]).delete()
|
||||
results += n
|
||||
|
||||
print(f"{results} peer links removed from the database.")
|
||||
|
||||
if options['exact'] is not None:
|
||||
additions = 0
|
||||
with transaction.atomic():
|
||||
peers = set(options['exact'])
|
||||
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
|
||||
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
|
||||
for target in peers - links:
|
||||
_, created = InstanceLink.objects.get_or_create(source=nodes[options['source']], target=nodes[target])
|
||||
if created:
|
||||
additions += 1
|
||||
|
||||
print(f"{additions} peer links added and {removals} deleted from the database.")
|
||||
@@ -17,13 +17,14 @@ class InstanceNotFound(Exception):
|
||||
|
||||
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None):
|
||||
def __init__(self, queuename, instance_percent, inst_min, hostname_list, is_container_group=None, pod_spec_override=None):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.instance_percent = instance_percent
|
||||
self.instance_min = inst_min
|
||||
self.hostname_list = hostname_list
|
||||
self.is_container_group = is_container_group
|
||||
self.pod_spec_override = pod_spec_override
|
||||
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
@@ -36,10 +37,14 @@ class RegisterQueue:
|
||||
ig.policy_instance_minimum = self.instance_min
|
||||
changed = True
|
||||
|
||||
if self.is_container_group:
|
||||
if self.is_container_group and (ig.is_container_group != self.is_container_group):
|
||||
ig.is_container_group = self.is_container_group
|
||||
changed = True
|
||||
|
||||
if self.pod_spec_override and (ig.pod_spec_override != self.pod_spec_override):
|
||||
ig.pod_spec_override = self.pod_spec_override
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
ig.save()
|
||||
|
||||
@@ -48,14 +53,14 @@ class RegisterQueue:
|
||||
def add_instances_to_group(self, ig):
|
||||
changed = False
|
||||
|
||||
instance_list_unique = set([x.strip() for x in self.hostname_list if x])
|
||||
instance_list_unique = {x for x in (x.strip() for x in self.hostname_list) if x}
|
||||
instances = []
|
||||
for inst_name in instance_list_unique:
|
||||
instance = Instance.objects.filter(hostname=inst_name)
|
||||
instance = Instance.objects.filter(hostname=inst_name).exclude(node_type='hop')
|
||||
if instance.exists():
|
||||
instances.append(instance[0])
|
||||
else:
|
||||
raise InstanceNotFound("Instance does not exist: {}".format(inst_name), changed)
|
||||
raise InstanceNotFound("Instance does not exist or cannot run jobs: {}".format(inst_name), changed)
|
||||
|
||||
ig.instances.add(*instances)
|
||||
|
||||
|
||||
@@ -179,15 +179,13 @@ class InstanceManager(models.Manager):
|
||||
else:
|
||||
registered = self.register(ip_address=pod_ip, uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
|
||||
).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
def active_count(self):
|
||||
"""Return count of active Tower nodes for licensing."""
|
||||
return self.all().count()
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
@@ -245,7 +243,13 @@ class InstanceGroupManager(models.Manager):
|
||||
for t in tasks:
|
||||
# TODO: dock capacity for isolated job management tasks running in queue
|
||||
impact = t.task_impact
|
||||
if t.status == 'waiting' or not t.execution_node:
|
||||
control_groups = []
|
||||
if t.controller_node:
|
||||
control_groups = instance_ig_mapping.get(t.controller_node, [])
|
||||
if not control_groups:
|
||||
logger.warn(f"No instance group found for {t.controller_node}, capacity consumed may be innaccurate.")
|
||||
|
||||
if t.status == 'waiting' or (not t.execution_node and not t.is_container_group_task):
|
||||
# Subtract capacity from any peer groups that share instances
|
||||
if not t.instance_group:
|
||||
impacted_groups = []
|
||||
@@ -262,6 +266,12 @@ class InstanceGroupManager(models.Manager):
|
||||
graph[group_name][f'consumed_{capacity_type}_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['committed_capacity'] += impact
|
||||
for group_name in control_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if breakdown:
|
||||
graph[group_name]['committed_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
elif t.status == 'running':
|
||||
# Subtract capacity from all groups that contain the instance
|
||||
if t.execution_node not in instance_ig_mapping:
|
||||
@@ -273,6 +283,7 @@ class InstanceGroupManager(models.Manager):
|
||||
impacted_groups = []
|
||||
else:
|
||||
impacted_groups = instance_ig_mapping[t.execution_node]
|
||||
|
||||
for group_name in impacted_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
@@ -281,6 +292,12 @@ class InstanceGroupManager(models.Manager):
|
||||
graph[group_name][f'consumed_{capacity_type}_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['running_capacity'] += impact
|
||||
for group_name in control_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if breakdown:
|
||||
graph[group_name]['running_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
else:
|
||||
logger.error('Programming error, %s not in ["running", "waiting"]', t.log_format)
|
||||
return graph
|
||||
|
||||
@@ -180,11 +180,7 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
return '/'.join(url_units)
|
||||
|
||||
def process_request(self, request):
|
||||
if hasattr(request, 'environ') and 'REQUEST_URI' in request.environ:
|
||||
old_path = urllib.parse.urlsplit(request.environ['REQUEST_URI']).path
|
||||
old_path = old_path[request.path.find(request.path_info) :]
|
||||
else:
|
||||
old_path = request.path_info
|
||||
old_path = request.path_info
|
||||
new_path = self._convert_named_url(old_path)
|
||||
if request.path_info != new_path:
|
||||
request.environ['awx.named_url_rewritten'] = request.path
|
||||
|
||||
@@ -9,12 +9,6 @@ def remove_iso_instances(apps, schema_editor):
|
||||
Instance.objects.filter(rampart_groups__controller__isnull=False).delete()
|
||||
|
||||
|
||||
def remove_iso_groups(apps, schema_editor):
|
||||
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||
with transaction.atomic():
|
||||
InstanceGroup.objects.filter(controller__isnull=False).delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
@@ -24,7 +18,6 @@ class Migration(migrations.Migration):
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(remove_iso_instances),
|
||||
migrations.RunPython(remove_iso_groups),
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='last_isolated_check',
|
||||
|
||||
44
awx/main/migrations/0156_capture_mesh_topology.py
Normal file
44
awx/main/migrations/0156_capture_mesh_topology.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# Generated by Django 2.2.20 on 2021-12-17 19:26
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0155_improved_health_check'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='node_type',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('control', 'Control plane node'),
|
||||
('execution', 'Execution plane node'),
|
||||
('hybrid', 'Controller and execution'),
|
||||
('hop', 'Message-passing node, no execution capability'),
|
||||
],
|
||||
default='hybrid',
|
||||
max_length=16,
|
||||
),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='InstanceLink',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='main.Instance')),
|
||||
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reverse_peers', to='main.Instance')),
|
||||
],
|
||||
options={
|
||||
'unique_together': {('source', 'target')},
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='peers',
|
||||
field=models.ManyToManyField(through='main.InstanceLink', to='main.Instance'),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0157_inventory_labels.py
Normal file
18
awx/main/migrations/0157_inventory_labels.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.20 on 2022-01-18 16:46
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0156_capture_mesh_topology'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='labels',
|
||||
field=models.ManyToManyField(blank=True, help_text='Labels associated with this inventory.', related_name='inventory_labels', to='main.Label'),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0158_make_instance_cpu_decimal.py
Normal file
19
awx/main/migrations/0158_make_instance_cpu_decimal.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# Generated by Django 2.2.24 on 2022-02-14 17:37
|
||||
|
||||
from decimal import Decimal
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0157_inventory_labels'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='cpu',
|
||||
field=models.DecimalField(decimal_places=1, default=Decimal('0'), editable=False, max_digits=4),
|
||||
),
|
||||
]
|
||||
@@ -47,6 +47,7 @@ from awx.main.models.execution_environments import ExecutionEnvironment # noqa
|
||||
from awx.main.models.activity_stream import ActivityStream # noqa
|
||||
from awx.main.models.ha import ( # noqa
|
||||
Instance,
|
||||
InstanceLink,
|
||||
InstanceGroup,
|
||||
TowerScheduleState,
|
||||
)
|
||||
@@ -201,6 +202,8 @@ activity_stream_registrar.connect(Organization)
|
||||
activity_stream_registrar.connect(Inventory)
|
||||
activity_stream_registrar.connect(Host)
|
||||
activity_stream_registrar.connect(Group)
|
||||
activity_stream_registrar.connect(Instance)
|
||||
activity_stream_registrar.connect(InstanceGroup)
|
||||
activity_stream_registrar.connect(InventorySource)
|
||||
# activity_stream_registrar.connect(InventoryUpdate)
|
||||
activity_stream_registrar.connect(Credential)
|
||||
|
||||
@@ -144,7 +144,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunAdHocCommand
|
||||
from awx.main.tasks.jobs import RunAdHocCommand
|
||||
|
||||
return RunAdHocCommand
|
||||
|
||||
@@ -160,9 +160,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def notification_templates(self):
|
||||
all_orgs = set()
|
||||
for h in self.hosts.all():
|
||||
all_orgs.add(h.inventory.organization)
|
||||
all_orgs = {h.inventory.organization for h in self.hosts.all()}
|
||||
active_templates = dict(error=set(), success=set(), started=set())
|
||||
base_notification_templates = NotificationTemplate.objects
|
||||
for org in all_orgs:
|
||||
|
||||
@@ -388,7 +388,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([job.id])
|
||||
@@ -541,8 +541,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
return
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric # circular import
|
||||
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
@@ -20,6 +20,7 @@ from awx import __version__ as awx_application_version
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.managers import InstanceManager, InstanceGroupManager, UUID_DEFAULT
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
||||
@@ -28,7 +29,7 @@ from awx.main.models.mixins import RelatedJobsMixin
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
||||
|
||||
__all__ = ('Instance', 'InstanceGroup', 'TowerScheduleState')
|
||||
__all__ = ('Instance', 'InstanceGroup', 'InstanceLink', 'TowerScheduleState')
|
||||
|
||||
logger = logging.getLogger('awx.main.models.ha')
|
||||
|
||||
@@ -53,6 +54,14 @@ class HasPolicyEditsMixin(HasEditsMixin):
|
||||
return self._values_have_edits(new_values)
|
||||
|
||||
|
||||
class InstanceLink(BaseModel):
|
||||
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
|
||||
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
|
||||
|
||||
class Meta:
|
||||
unique_together = ('source', 'target')
|
||||
|
||||
|
||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
"""A model representing an AWX instance running against this database."""
|
||||
|
||||
@@ -73,8 +82,10 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
# Fields defined in health check or heartbeat
|
||||
version = models.CharField(max_length=120, blank=True)
|
||||
cpu = models.IntegerField(
|
||||
default=0,
|
||||
cpu = models.DecimalField(
|
||||
default=Decimal(0.0),
|
||||
max_digits=4,
|
||||
decimal_places=1,
|
||||
editable=False,
|
||||
)
|
||||
memory = models.BigIntegerField(
|
||||
@@ -115,9 +126,16 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
NODE_TYPE_CHOICES = [("control", "Control plane node"), ("execution", "Execution plane node"), ("hybrid", "Controller and execution")]
|
||||
NODE_TYPE_CHOICES = [
|
||||
("control", "Control plane node"),
|
||||
("execution", "Execution plane node"),
|
||||
("hybrid", "Controller and execution"),
|
||||
("hop", "Message-passing node, no execution capability"),
|
||||
]
|
||||
node_type = models.CharField(default='hybrid', choices=NODE_TYPE_CHOICES, max_length=16)
|
||||
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ("hostname",)
|
||||
@@ -129,7 +147,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
|
||||
@property
|
||||
def consumed_capacity(self):
|
||||
return sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')))
|
||||
capacity_consumed = 0
|
||||
if self.node_type in ('hybrid', 'execution'):
|
||||
capacity_consumed += sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')))
|
||||
if self.node_type in ('hybrid', 'control'):
|
||||
capacity_consumed += sum(
|
||||
settings.AWX_CONTROL_NODE_TASK_IMPACT for x in UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting'))
|
||||
)
|
||||
return capacity_consumed
|
||||
|
||||
@property
|
||||
def remaining_capacity(self):
|
||||
@@ -155,13 +180,31 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
Instance.objects.filter(enabled=True, capacity__gt=0).filter(node_type__in=['control', 'hybrid']).values_list('hostname', flat=True)
|
||||
)
|
||||
|
||||
def get_cleanup_task_kwargs(self, **kwargs):
|
||||
"""
|
||||
Produce options to use for the command: ansible-runner worker cleanup
|
||||
returns a dict that is passed to the python interface for the runner method corresponding to that command
|
||||
any kwargs will override that key=value combination in the returned dict
|
||||
"""
|
||||
vargs = dict()
|
||||
if settings.AWX_CLEANUP_PATHS:
|
||||
vargs['file_pattern'] = '/tmp/{}*'.format(JOB_FOLDER_PREFIX % '*')
|
||||
vargs.update(kwargs)
|
||||
if 'exclude_strings' not in vargs and vargs.get('file_pattern'):
|
||||
active_pks = list(UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')).values_list('pk', flat=True))
|
||||
if active_pks:
|
||||
vargs['exclude_strings'] = [JOB_FOLDER_PREFIX % job_id for job_id in active_pks]
|
||||
if 'remove_images' in vargs or 'image_prune' in vargs:
|
||||
vargs.setdefault('process_isolation_executable', 'podman')
|
||||
return vargs
|
||||
|
||||
def is_lost(self, ref_time=None):
|
||||
if self.last_seen is None:
|
||||
return True
|
||||
if ref_time is None:
|
||||
ref_time = now()
|
||||
grace_period = settings.CLUSTER_NODE_HEARTBEAT_PERIOD * 2
|
||||
if self.node_type == 'execution':
|
||||
if self.node_type in ('execution', 'hop'):
|
||||
grace_period += settings.RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD
|
||||
return self.last_seen < ref_time - timedelta(seconds=grace_period)
|
||||
|
||||
@@ -181,7 +224,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
|
||||
def set_capacity_value(self):
|
||||
"""Sets capacity according to capacity adjustment rule (no save)"""
|
||||
if self.enabled:
|
||||
if self.enabled and self.node_type != 'hop':
|
||||
lower_cap = min(self.mem_capacity, self.cpu_capacity)
|
||||
higher_cap = max(self.mem_capacity, self.cpu_capacity)
|
||||
self.capacity = lower_cap + (higher_cap - lower_cap) * self.capacity_adjustment
|
||||
@@ -229,7 +272,11 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.mark_offline(perform_save=False, errors=errors)
|
||||
update_fields.extend(['cpu_capacity', 'mem_capacity', 'capacity', 'errors'])
|
||||
|
||||
self.save(update_fields=update_fields)
|
||||
# disabling activity stream will avoid extra queries, which is important for heatbeat actions
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
|
||||
def local_health_check(self):
|
||||
"""Only call this method on the instance that this record represents"""
|
||||
@@ -286,7 +333,7 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum([inst.capacity for inst in self.instances.all()])
|
||||
return sum(inst.capacity for inst in self.instances.all())
|
||||
|
||||
@property
|
||||
def jobs_running(self):
|
||||
@@ -307,15 +354,21 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
app_label = 'main'
|
||||
|
||||
@staticmethod
|
||||
def fit_task_to_most_remaining_capacity_instance(task, instances):
|
||||
def fit_task_to_most_remaining_capacity_instance(task, instances, impact=None, capacity_type=None, add_hybrid_control_cost=False):
|
||||
impact = impact if impact else task.task_impact
|
||||
capacity_type = capacity_type if capacity_type else task.capacity_type
|
||||
instance_most_capacity = None
|
||||
most_remaining_capacity = -1
|
||||
for i in instances:
|
||||
if i.node_type not in (task.capacity_type, 'hybrid'):
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
if i.remaining_capacity >= task.task_impact and (
|
||||
instance_most_capacity is None or i.remaining_capacity > instance_most_capacity.remaining_capacity
|
||||
):
|
||||
would_be_remaining = i.remaining_capacity - impact
|
||||
# hybrid nodes _always_ control their own tasks
|
||||
if add_hybrid_control_cost and i.node_type == 'hybrid':
|
||||
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
most_remaining_capacity = would_be_remaining
|
||||
return instance_most_capacity
|
||||
|
||||
@staticmethod
|
||||
@@ -342,7 +395,7 @@ class TowerScheduleState(SingletonModel):
|
||||
|
||||
|
||||
def schedule_policy_task():
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||
|
||||
connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
|
||||
|
||||
|
||||
@@ -170,6 +170,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
editable=False,
|
||||
help_text=_('Flag indicating the inventory is being deleted.'),
|
||||
)
|
||||
labels = models.ManyToManyField(
|
||||
"Label",
|
||||
blank=True,
|
||||
related_name='inventory_labels',
|
||||
help_text=_('Labels associated with this inventory.'),
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -366,7 +372,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
@transaction.atomic
|
||||
def schedule_deletion(self, user_id=None):
|
||||
from awx.main.tasks import delete_inventory
|
||||
from awx.main.tasks.system import delete_inventory
|
||||
from awx.main.signals import activity_stream_delete
|
||||
|
||||
if self.pending_deletion is True:
|
||||
@@ -382,7 +388,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if self.kind == 'smart' and settings.AWX_REBUILD_SMART_MEMBERSHIP:
|
||||
|
||||
def on_commit():
|
||||
from awx.main.tasks import update_host_smart_inventory_memberships
|
||||
from awx.main.tasks.system import update_host_smart_inventory_memberships
|
||||
|
||||
update_host_smart_inventory_memberships.delay()
|
||||
|
||||
@@ -551,7 +557,7 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
if settings.AWX_REBUILD_SMART_MEMBERSHIP:
|
||||
|
||||
def on_commit():
|
||||
from awx.main.tasks import update_host_smart_inventory_memberships
|
||||
from awx.main.tasks.system import update_host_smart_inventory_memberships
|
||||
|
||||
update_host_smart_inventory_memberships.delay()
|
||||
|
||||
@@ -631,7 +637,7 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
@transaction.atomic
|
||||
def delete_recursive(self):
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
from awx.main.tasks import update_inventory_computed_fields
|
||||
from awx.main.tasks.system import update_inventory_computed_fields
|
||||
from awx.main.signals import disable_activity_stream, activity_stream_delete
|
||||
|
||||
def mark_actual():
|
||||
@@ -1219,7 +1225,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunInventoryUpdate
|
||||
from awx.main.tasks.jobs import RunInventoryUpdate
|
||||
|
||||
return RunInventoryUpdate
|
||||
|
||||
|
||||
@@ -583,7 +583,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunJob
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
|
||||
return RunJob
|
||||
|
||||
@@ -1213,7 +1213,7 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunSystemJob
|
||||
from awx.main.tasks.jobs import RunSystemJob
|
||||
|
||||
return RunSystemJob
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModelNameNotUnique
|
||||
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models.inventory import Inventory
|
||||
|
||||
__all__ = ('Label',)
|
||||
|
||||
@@ -35,15 +36,14 @@ class Label(CommonModelNameNotUnique):
|
||||
|
||||
@staticmethod
|
||||
def get_orphaned_labels():
|
||||
return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True)
|
||||
return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
|
||||
|
||||
def is_detached(self):
|
||||
return bool(Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True).count())
|
||||
return Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True).exists()
|
||||
|
||||
def is_candidate_for_detach(self):
|
||||
|
||||
c1 = UnifiedJob.objects.filter(labels__in=[self.id]).count()
|
||||
c2 = UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count()
|
||||
if (c1 + c2 - 1) == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
c3 = Inventory.objects.filter(labels__in=[self.id]).count()
|
||||
return (c1 + c2 + c3 - 1) == 0
|
||||
|
||||
@@ -508,7 +508,7 @@ class JobNotificationMixin(object):
|
||||
return (msg, body)
|
||||
|
||||
def send_notification_templates(self, status):
|
||||
from awx.main.tasks import send_notifications # avoid circular import
|
||||
from awx.main.tasks.system import send_notifications # avoid circular import
|
||||
|
||||
if status not in ['running', 'succeeded', 'failed']:
|
||||
raise ValueError(_("status must be either running, succeeded or failed"))
|
||||
|
||||
@@ -118,7 +118,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
from awx.main.models import Credential
|
||||
|
||||
public_galaxy_credential = Credential.objects.filter(managed=True, name='Ansible Galaxy').first()
|
||||
if public_galaxy_credential not in self.galaxy_credentials.all():
|
||||
if public_galaxy_credential is not None and public_galaxy_credential not in self.galaxy_credentials.all():
|
||||
self.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
|
||||
|
||||
@@ -471,7 +471,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
r = super(Project, self).delete(*args, **kwargs)
|
||||
for path_to_delete in paths_to_delete:
|
||||
if self.scm_type and path_to_delete: # non-manual, concrete path
|
||||
from awx.main.tasks import delete_project_files
|
||||
from awx.main.tasks.system import delete_project_files
|
||||
|
||||
delete_project_files.delay(path_to_delete)
|
||||
return r
|
||||
@@ -532,7 +532,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunProjectUpdate
|
||||
from awx.main.tasks.jobs import RunProjectUpdate
|
||||
|
||||
return RunProjectUpdate
|
||||
|
||||
@@ -613,26 +613,6 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
def get_notification_friendly_name(self):
|
||||
return "Project Update"
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
'''
|
||||
Project updates should pretty much always run on the control plane
|
||||
however, we are not yet saying no to custom groupings within the control plane
|
||||
Thus, we return custom groups and then unconditionally add the control plane
|
||||
'''
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
organization_groups = []
|
||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||
selected_groups = template_groups + organization_groups
|
||||
|
||||
controlplane_ig = self.control_plane_instance_group
|
||||
if controlplane_ig and controlplane_ig[0] and controlplane_ig[0] not in selected_groups:
|
||||
selected_groups += controlplane_ig
|
||||
|
||||
return selected_groups
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
added_update_fields = []
|
||||
if not self.job_tags:
|
||||
|
||||
@@ -1046,7 +1046,7 @@ class UnifiedJob(
|
||||
fd = tempfile.NamedTemporaryFile(
|
||||
mode='w', prefix='{}-{}-'.format(self.model_to_str(), self.pk), suffix='.out', dir=settings.JOBOUTPUT_ROOT, encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
from awx.main.tasks.system import purge_old_stdout_files # circular import
|
||||
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
@@ -1497,7 +1497,12 @@ class UnifiedJob(
|
||||
return False
|
||||
|
||||
def log_lifecycle(self, state, blocked_by=None):
|
||||
extra = {'type': self._meta.model_name, 'task_id': self.id, 'state': state}
|
||||
extra = {
|
||||
'type': self._meta.model_name,
|
||||
'task_id': self.id,
|
||||
'state': state,
|
||||
'work_unit_id': self.work_unit_id,
|
||||
}
|
||||
if self.unified_job_template:
|
||||
extra["template_name"] = self.unified_job_template.name
|
||||
if state == "blocked" and blocked_by:
|
||||
@@ -1506,6 +1511,11 @@ class UnifiedJob(
|
||||
extra["blocked_by"] = blocked_by_msg
|
||||
else:
|
||||
msg = f"{self._meta.model_name}-{self.id} {state.replace('_', ' ')}"
|
||||
|
||||
if state == "controller_node_chosen":
|
||||
extra["controller_node"] = self.controller_node or "NOT_SET"
|
||||
elif state == "execution_node_chosen":
|
||||
extra["execution_node"] = self.execution_node or "NOT_SET"
|
||||
logger_job_lifecycle.debug(msg, extra=extra)
|
||||
|
||||
@property
|
||||
|
||||
@@ -813,7 +813,7 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
return True
|
||||
|
||||
def send_approval_notification(self, approval_status):
|
||||
from awx.main.tasks import send_notifications # avoid circular import
|
||||
from awx.main.tasks.system import send_notifications # avoid circular import
|
||||
|
||||
if self.workflow_job_template is None:
|
||||
return
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.rocketchat_backend')
|
||||
@@ -38,7 +39,9 @@ class RocketChatBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
if optvalue is not None:
|
||||
payload[optval] = optvalue.strip()
|
||||
|
||||
r = requests.post("{}".format(m.recipients()[0]), data=json.dumps(payload), verify=(not self.rocketchat_no_verify_ssl))
|
||||
r = requests.post(
|
||||
"{}".format(m.recipients()[0]), data=json.dumps(payload), headers=get_awx_http_client_headers(), verify=(not self.rocketchat_no_verify_ssl)
|
||||
)
|
||||
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_text(_("Error sending notification rocket.chat: {}").format(r.status_code)))
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
from slackclient import SlackClient
|
||||
from slack_sdk import WebClient
|
||||
from slack_sdk.errors import SlackApiError
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -28,23 +29,30 @@ class SlackBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
self.color = hex_color
|
||||
|
||||
def send_messages(self, messages):
|
||||
connection = SlackClient(self.token)
|
||||
client = WebClient(self.token)
|
||||
sent_messages = 0
|
||||
for m in messages:
|
||||
try:
|
||||
for r in m.recipients():
|
||||
if r.startswith('#'):
|
||||
r = r[1:]
|
||||
thread = None
|
||||
channel = r
|
||||
thread = None
|
||||
if ',' in r:
|
||||
channel, thread = r.split(',')
|
||||
if self.color:
|
||||
ret = connection.api_call("chat.postMessage", channel=r, as_user=True, attachments=[{"color": self.color, "text": m.subject}])
|
||||
response = client.chat_postMessage(
|
||||
channel=channel, thread_ts=thread, as_user=True, attachments=[{"color": self.color, "text": m.subject}]
|
||||
)
|
||||
else:
|
||||
ret = connection.api_call("chat.postMessage", channel=r, as_user=True, text=m.subject)
|
||||
logger.debug(ret)
|
||||
if ret['ok']:
|
||||
response = client.chat_postMessage(channel=channel, thread_ts=thread, as_user=True, text=m.subject)
|
||||
logger.debug(response)
|
||||
if response['ok']:
|
||||
sent_messages += 1
|
||||
else:
|
||||
raise RuntimeError("Slack Notification unable to send {}: {} ({})".format(r, m.subject, ret['error']))
|
||||
except Exception as e:
|
||||
raise RuntimeError("Slack Notification unable to send {}: {} ({})".format(r, m.subject, response['error']))
|
||||
except SlackApiError as e:
|
||||
logger.error(smart_text(_("Exception sending messages: {}").format(e)))
|
||||
if not self.fail_silently:
|
||||
raise
|
||||
|
||||
@@ -9,29 +9,12 @@ from kubernetes import client, config
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
from awx.main.utils.common import parse_yaml_or_json, deepmerge
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
def deepmerge(a, b):
|
||||
"""
|
||||
Merge dict structures and return the result.
|
||||
|
||||
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
|
||||
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
|
||||
>>> import pprint; pprint.pprint(deepmerge(a, b))
|
||||
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
|
||||
"""
|
||||
if isinstance(a, dict) and isinstance(b, dict):
|
||||
return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())])
|
||||
elif b is None:
|
||||
return a
|
||||
else:
|
||||
return b
|
||||
|
||||
|
||||
class PodManager(object):
|
||||
def __init__(self, task=None):
|
||||
self.task = task
|
||||
@@ -183,7 +166,7 @@ class PodManager(object):
|
||||
pod_spec_override = {}
|
||||
if self.task and self.task.instance_group.pod_spec_override:
|
||||
pod_spec_override = parse_yaml_or_json(self.task.instance_group.pod_spec_override)
|
||||
pod_spec = {**default_pod_spec, **pod_spec_override}
|
||||
pod_spec = deepmerge(default_pod_spec, pod_spec_override)
|
||||
|
||||
if self.task:
|
||||
pod_spec['metadata'] = deepmerge(
|
||||
|
||||
@@ -13,7 +13,6 @@ from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.db.models import Q
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.reaper import reap_job
|
||||
@@ -69,8 +68,9 @@ class TaskManager:
|
||||
"""
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
instances = Instance.objects.filter(~Q(hostname=None), enabled=True)
|
||||
instances = Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop')
|
||||
self.real_instances = {i.hostname: i for i in instances}
|
||||
self.controlplane_ig = None
|
||||
|
||||
instances_partial = [
|
||||
SimpleNamespace(
|
||||
@@ -87,6 +87,8 @@ class TaskManager:
|
||||
instances_by_hostname = {i.hostname: i for i in instances_partial}
|
||||
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
if rampart_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
self.controlplane_ig = rampart_group
|
||||
self.graph[rampart_group.name] = dict(
|
||||
graph=DependencyGraph(),
|
||||
execution_capacity=0,
|
||||
@@ -239,7 +241,7 @@ class TaskManager:
|
||||
update_fields = ['status', 'start_args']
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(reason)
|
||||
logger.info(f'Workflow job {workflow_job.id} failed due to reason: {reason}')
|
||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
@@ -258,7 +260,7 @@ class TaskManager:
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
schedule_task_manager()
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
dependent_tasks = dependent_tasks or []
|
||||
|
||||
@@ -284,34 +286,13 @@ class TaskManager:
|
||||
task.send_notification_templates('running')
|
||||
logger.debug('Transitioning %s to running status.', task.log_format)
|
||||
schedule_task_manager()
|
||||
elif rampart_group.is_container_group:
|
||||
task.instance_group = rampart_group
|
||||
if task.capacity_type == 'execution':
|
||||
# find one real, non-containerized instance with capacity to
|
||||
# act as the controller for k8s API interaction
|
||||
try:
|
||||
task.controller_node = Instance.choose_online_control_plane_node()
|
||||
except IndexError:
|
||||
logger.warning("No control plane nodes available to run containerized job {}".format(task.log_format))
|
||||
return
|
||||
else:
|
||||
# project updates and system jobs don't *actually* run in pods, so
|
||||
# just pick *any* non-containerized host and use it as the execution node
|
||||
task.execution_node = Instance.choose_online_control_plane_node()
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(task.log_format, task.execution_node))
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
task.execution_node = instance.hostname
|
||||
if instance.node_type == 'execution':
|
||||
try:
|
||||
task.controller_node = Instance.choose_online_control_plane_node()
|
||||
except IndexError:
|
||||
logger.warning("No control plane nodes available to manage {}".format(task.log_format))
|
||||
return
|
||||
else:
|
||||
# control plane nodes will manage jobs locally for performance and resilience
|
||||
task.controller_node = task.execution_node
|
||||
logger.debug('Submitting job {} to queue {} controlled by {}.'.format(task.log_format, task.execution_node, task.controller_node))
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {rampart_group.name}{execution_node_msg}.'
|
||||
)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
@@ -319,6 +300,13 @@ class TaskManager:
|
||||
|
||||
if rampart_group is not None:
|
||||
self.consume_capacity(task, rampart_group.name, instance=instance)
|
||||
if task.controller_node:
|
||||
self.consume_capacity(
|
||||
task,
|
||||
settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME,
|
||||
instance=self.real_instances[task.controller_node],
|
||||
impact=settings.AWX_CONTROL_NODE_TASK_IMPACT,
|
||||
)
|
||||
|
||||
def post_commit():
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
@@ -479,7 +467,7 @@ class TaskManager:
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
running_workflow_templates = {wf.unified_job_template_id for wf in self.get_running_workflow_jobs()}
|
||||
tasks_to_update_job_explanation = []
|
||||
for task in pending_tasks:
|
||||
if self.start_task_limit <= 0:
|
||||
@@ -493,9 +481,10 @@ class TaskManager:
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
|
||||
found_acceptable_queue = False
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
@@ -506,9 +495,36 @@ class TaskManager:
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
|
||||
# Determine if there is control capacity for the task
|
||||
if task.capacity_type == 'control':
|
||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
else:
|
||||
control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
|
||||
task, self.graph[settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]['instances'], impact=control_impact, capacity_type='control'
|
||||
)
|
||||
if not control_instance:
|
||||
self.task_needs_capacity(task, tasks_to_update_job_explanation)
|
||||
logger.debug(f"Skipping task {task.log_format} in pending, not enough capacity left on controlplane to control new tasks")
|
||||
continue
|
||||
|
||||
task.controller_node = control_instance.hostname
|
||||
|
||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||
if task.capacity_type == 'control':
|
||||
task.execution_node = control_instance.hostname
|
||||
control_instance.remaining_capacity = max(0, control_instance.remaining_capacity - control_impact)
|
||||
control_instance.jobs_running += 1
|
||||
self.graph[settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]['graph'].add_job(task)
|
||||
execution_instance = self.real_instances[control_instance.hostname]
|
||||
self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
continue
|
||||
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if task.capacity_type == 'execution' and rampart_group.is_container_group:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
if rampart_group.is_container_group:
|
||||
control_instance.jobs_running += 1
|
||||
self.graph[settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
@@ -517,28 +533,32 @@ class TaskManager:
|
||||
if settings.IS_K8S and task.capacity_type == 'execution':
|
||||
logger.debug("Skipping group {}, task cannot run on control plane".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name, capacity_type=task.capacity_type)
|
||||
if task.task_impact > 0 and remaining_capacity <= 0:
|
||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
|
||||
# at this point we know the instance group is NOT a container group
|
||||
# because if it was, it would have started the task and broke out of the loop.
|
||||
execution_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
|
||||
task, self.graph[rampart_group.name]['instances']
|
||||
task, self.graph[rampart_group.name]['instances'], add_hybrid_control_cost=True
|
||||
) or InstanceGroup.find_largest_idle_instance(self.graph[rampart_group.name]['instances'], capacity_type=task.capacity_type)
|
||||
|
||||
if execution_instance or rampart_group.is_container_group:
|
||||
if not rampart_group.is_container_group:
|
||||
execution_instance.remaining_capacity = max(0, execution_instance.remaining_capacity - task.task_impact)
|
||||
execution_instance.jobs_running += 1
|
||||
logger.debug(
|
||||
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, remaining_capacity
|
||||
)
|
||||
)
|
||||
if execution_instance:
|
||||
task.execution_node = execution_instance.hostname
|
||||
# If our execution instance is a hybrid, prefer to do control tasks there as well.
|
||||
if execution_instance.node_type == 'hybrid':
|
||||
control_instance = execution_instance
|
||||
task.controller_node = execution_instance.hostname
|
||||
|
||||
if execution_instance:
|
||||
execution_instance = self.real_instances[execution_instance.hostname]
|
||||
control_instance.remaining_capacity = max(0, control_instance.remaining_capacity - settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
if control_instance != execution_instance:
|
||||
control_instance.jobs_running += 1
|
||||
execution_instance.remaining_capacity = max(0, execution_instance.remaining_capacity - task.task_impact)
|
||||
execution_instance.jobs_running += 1
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
logger.debug(
|
||||
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, execution_instance.remaining_capacity
|
||||
)
|
||||
)
|
||||
execution_instance = self.real_instances[execution_instance.hostname]
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
@@ -550,18 +570,21 @@ class TaskManager:
|
||||
)
|
||||
)
|
||||
if not found_acceptable_queue:
|
||||
task.log_lifecycle("needs_capacity")
|
||||
job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
# Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
|
||||
# Therefore we should only update the job_explanation after some time has elapsed to
|
||||
# prevent excessive task saves.
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
self.task_needs_capacity(task, tasks_to_update_job_explanation)
|
||||
UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation, ['job_explanation'])
|
||||
|
||||
def task_needs_capacity(self, task, tasks_to_update_job_explanation):
|
||||
task.log_lifecycle("needs_capacity")
|
||||
job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
# Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
|
||||
# Therefore we should only update the job_explanation after some time has elapsed to
|
||||
# prevent excessive task saves.
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
|
||||
def timeout_approval_node(self):
|
||||
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
|
||||
now = tz_now()
|
||||
@@ -588,7 +611,7 @@ class TaskManager:
|
||||
# elsewhere
|
||||
for j in UnifiedJob.objects.filter(
|
||||
status__in=['pending', 'waiting', 'running'],
|
||||
).exclude(execution_node__in=Instance.objects.values_list('hostname', flat=True)):
|
||||
).exclude(execution_node__in=Instance.objects.exclude(node_type='hop').values_list('hostname', flat=True)):
|
||||
if j.execution_node and not j.is_container_group_task:
|
||||
logger.error(f'{j.execution_node} is not a registered instance; reaping {j.log_format}')
|
||||
reap_job(j, 'failed')
|
||||
@@ -596,16 +619,17 @@ class TaskManager:
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def consume_capacity(self, task, instance_group, instance=None):
|
||||
def consume_capacity(self, task, instance_group, instance=None, impact=None):
|
||||
impact = impact if impact else task.task_impact
|
||||
logger.debug(
|
||||
'{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group, self.graph[instance_group]['consumed_capacity']
|
||||
task.log_format, impact, instance_group, self.graph[instance_group]['consumed_capacity']
|
||||
)
|
||||
)
|
||||
self.graph[instance_group]['consumed_capacity'] += task.task_impact
|
||||
self.graph[instance_group]['consumed_capacity'] += impact
|
||||
for capacity_type in ('control', 'execution'):
|
||||
if instance is None or instance.node_type in ('hybrid', capacity_type):
|
||||
self.graph[instance_group][f'consumed_{capacity_type}_capacity'] += task.task_impact
|
||||
self.graph[instance_group][f'consumed_{capacity_type}_capacity'] += impact
|
||||
|
||||
def get_remaining_capacity(self, instance_group, capacity_type='execution'):
|
||||
return self.graph[instance_group][f'{capacity_type}_capacity'] - self.graph[instance_group][f'consumed_{capacity_type}_capacity']
|
||||
|
||||
@@ -34,7 +34,6 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
InstanceGroup,
|
||||
Inventory,
|
||||
InventorySource,
|
||||
Job,
|
||||
@@ -58,7 +57,7 @@ from awx.main.models import (
|
||||
from awx.main.constants import CENSOR_VALUE
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
||||
from awx.main.tasks import update_inventory_computed_fields
|
||||
from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image
|
||||
from awx.main.fields import (
|
||||
is_implicit_parent,
|
||||
update_role_parentage_for_instance,
|
||||
@@ -377,6 +376,7 @@ def model_serializer_mapping():
|
||||
models.Inventory: serializers.InventorySerializer,
|
||||
models.Host: serializers.HostSerializer,
|
||||
models.Group: serializers.GroupSerializer,
|
||||
models.Instance: serializers.InstanceSerializer,
|
||||
models.InstanceGroup: serializers.InstanceGroupSerializer,
|
||||
models.InventorySource: serializers.InventorySourceSerializer,
|
||||
models.Credential: serializers.CredentialSerializer,
|
||||
@@ -624,10 +624,26 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
||||
approval.deny()
|
||||
|
||||
|
||||
def _handle_image_cleanup(removed_image, pk):
|
||||
if (not removed_image) or ExecutionEnvironment.objects.filter(image=removed_image).exclude(pk=pk).exists():
|
||||
return # if other EE objects reference the tag, then do not purge it
|
||||
handle_removed_image.delay(remove_images=[removed_image])
|
||||
|
||||
|
||||
@receiver(pre_delete, sender=ExecutionEnvironment)
|
||||
def remove_default_ee(sender, instance, **kwargs):
|
||||
if instance.id == getattr(settings.DEFAULT_EXECUTION_ENVIRONMENT, 'id', None):
|
||||
settings.DEFAULT_EXECUTION_ENVIRONMENT = None
|
||||
_handle_image_cleanup(instance.image, instance.pk)
|
||||
|
||||
|
||||
@receiver(post_save, sender=ExecutionEnvironment)
|
||||
def remove_stale_image(sender, instance, created, **kwargs):
|
||||
if created:
|
||||
return
|
||||
removed_image = instance._prior_values_store.get('image')
|
||||
if removed_image and removed_image != instance.image:
|
||||
_handle_image_cleanup(removed_image, instance.pk)
|
||||
|
||||
|
||||
@receiver(post_save, sender=Session)
|
||||
@@ -659,9 +675,3 @@ def create_access_token_user_if_missing(sender, **kwargs):
|
||||
post_save.disconnect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
|
||||
obj.save()
|
||||
post_save.connect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
|
||||
|
||||
|
||||
# Connect the Instance Group to Activity Stream receivers.
|
||||
post_save.connect(activity_stream_create, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_create")
|
||||
pre_save.connect(activity_stream_update, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_update")
|
||||
pre_delete.connect(activity_stream_delete, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_delete")
|
||||
|
||||
0
awx/main/tasks/__init__.py
Normal file
0
awx/main/tasks/__init__.py
Normal file
257
awx/main/tasks/callback.py
Normal file
257
awx/main/tasks/callback.py
Normal file
@@ -0,0 +1,257 @@
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
from collections import deque
|
||||
import os
|
||||
import stat
|
||||
|
||||
# Django
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
# AWX
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.utils.update_model import update_model
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.callback')
|
||||
|
||||
|
||||
class RunnerCallback:
|
||||
event_data_key = 'job_id'
|
||||
|
||||
def __init__(self, model=None):
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
self.guid = GuidMiddleware.get_guid()
|
||||
self.job_created = None
|
||||
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
|
||||
self.dispatcher = CallbackQueueDispatcher()
|
||||
self.safe_env = {}
|
||||
self.event_ct = 0
|
||||
self.model = model
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
return update_model(self.model, pk, _attempt=0, **updates)
|
||||
|
||||
def event_handler(self, event_data):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
# This method is called once for *every event* emitted by Ansible
|
||||
# Runner as a playbook runs. That means that changes to the code in
|
||||
# this method are _very_ likely to introduce performance regressions.
|
||||
#
|
||||
# Even if this function is made on average .05s slower, it can have
|
||||
# devastating performance implications for playbooks that emit
|
||||
# tens or hundreds of thousands of events.
|
||||
#
|
||||
# Proceed with caution!
|
||||
#
|
||||
"""
|
||||
Ansible runner puts a parent_uuid on each event, no matter what the type.
|
||||
AWX only saves the parent_uuid if the event is for a Job.
|
||||
"""
|
||||
# cache end_line locally for RunInventoryUpdate tasks
|
||||
# which generate job events from two 'streams':
|
||||
# ansible-inventory and the awx.main.commands.inventory_import
|
||||
# logger
|
||||
|
||||
if event_data.get(self.event_data_key, None):
|
||||
if self.event_data_key != 'job_id':
|
||||
event_data.pop('parent_uuid', None)
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
event_data['job_created'] = self.job_created
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
event_data['host_name'] = host
|
||||
if host in self.host_map:
|
||||
event_data['host_id'] = self.host_map[host]
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
if event_data.get('event') == 'playbook_on_stats':
|
||||
event_data['host_map'] = self.host_map
|
||||
|
||||
if isinstance(self, RunnerCallbackForProjectUpdate):
|
||||
# need a better way to have this check.
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
task = event_data.get('event_data', {}).get('task_action')
|
||||
try:
|
||||
if task in ('git', 'svn'):
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
if 'event_data' in event_data:
|
||||
event_data['event_data']['guid'] = self.guid
|
||||
|
||||
# To prevent overwhelming the broadcast queue, skip some websocket messages
|
||||
if self.recent_event_timings:
|
||||
cpu_time = time.time()
|
||||
first_window_time = self.recent_event_timings[0]
|
||||
last_window_time = self.recent_event_timings[-1]
|
||||
|
||||
if event_data.get('event') in MINIMAL_EVENTS:
|
||||
should_emit = True # always send some types like playbook_on_stats
|
||||
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
|
||||
should_emit = False # exclude events with no output
|
||||
else:
|
||||
should_emit = any(
|
||||
[
|
||||
# if 30the most recent websocket message was sent over 1 second ago
|
||||
cpu_time - first_window_time > 1.0,
|
||||
# if the very last websocket message came in over 1/30 seconds ago
|
||||
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
|
||||
# if the queue is not yet full
|
||||
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
|
||||
]
|
||||
)
|
||||
|
||||
if should_emit:
|
||||
self.recent_event_timings.append(cpu_time)
|
||||
else:
|
||||
event_data.setdefault('event_data', {})
|
||||
event_data['skip_websocket_message'] = True
|
||||
|
||||
elif self.recent_event_timings.maxlen:
|
||||
self.recent_event_timings.append(time.time())
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
|
||||
'''
|
||||
Handle artifacts
|
||||
'''
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
return False
|
||||
|
||||
def cancel_callback(self):
|
||||
"""
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
"""
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance.refresh_from_db()
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
|
||||
return True
|
||||
return False
|
||||
|
||||
def finished_callback(self, runner_obj):
|
||||
"""
|
||||
Ansible runner callback triggered on finished run
|
||||
"""
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': self.event_ct,
|
||||
'guid': self.guid,
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
|
||||
def status_handler(self, status_data, runner_config):
|
||||
"""
|
||||
Ansible runner callback triggered on status transition
|
||||
"""
|
||||
if status_data['status'] == 'starting':
|
||||
job_env = dict(runner_config.env)
|
||||
'''
|
||||
Take the safe environment variables and overwrite
|
||||
'''
|
||||
for k, v in self.safe_env.items():
|
||||
if k in job_env:
|
||||
job_env[k] = v
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||
elif status_data['status'] == 'failed':
|
||||
# For encrypted ssh_key_data, ansible-runner worker will open and write the
|
||||
# ssh_key_data to a named pipe. Then, once the podman container starts, ssh-agent will
|
||||
# read from this named pipe so that the key can be used in ansible-playbook.
|
||||
# Once the podman container exits, the named pipe is deleted.
|
||||
# However, if the podman container fails to start in the first place, e.g. the image
|
||||
# name is incorrect, then this pipe is not cleaned up. Eventually ansible-runner
|
||||
# processor will attempt to write artifacts to the private data dir via unstream_dir, requiring
|
||||
# that it open this named pipe. This leads to a hang. Thus, before any artifacts
|
||||
# are written by the processor, it's important to remove this ssh_key_data pipe.
|
||||
private_data_dir = self.instance.job_env.get('AWX_PRIVATE_DATA_DIR', None)
|
||||
if private_data_dir:
|
||||
key_data_file = os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'ssh_key_data')
|
||||
if os.path.exists(key_data_file) and stat.S_ISFIFO(os.stat(key_data_file).st_mode):
|
||||
os.remove(key_data_file)
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
|
||||
|
||||
class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'project_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForProjectUpdate, self).__init__(*args, **kwargs)
|
||||
self.playbook_new_revision = None
|
||||
self.host_map = {}
|
||||
|
||||
def event_handler(self, event_data):
|
||||
super_return_value = super(RunnerCallbackForProjectUpdate, self).event_handler(event_data)
|
||||
returned_data = event_data.get('event_data', {})
|
||||
if returned_data.get('task_action', '') == 'set_fact':
|
||||
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
|
||||
if 'scm_version' in returned_facts:
|
||||
self.playbook_new_revision = returned_facts['scm_version']
|
||||
return super_return_value
|
||||
|
||||
|
||||
class RunnerCallbackForInventoryUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'inventory_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForInventoryUpdate, self).__init__(*args, **kwargs)
|
||||
self.end_line = 0
|
||||
|
||||
def event_handler(self, event_data):
|
||||
self.end_line = event_data['end_line']
|
||||
|
||||
return super(RunnerCallbackForInventoryUpdate, self).event_handler(event_data)
|
||||
|
||||
|
||||
class RunnerCallbackForAdHocCommand(RunnerCallback):
|
||||
|
||||
event_data_key = 'ad_hoc_command_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForAdHocCommand, self).__init__(*args, **kwargs)
|
||||
self.host_map = {}
|
||||
|
||||
|
||||
class RunnerCallbackForSystemJob(RunnerCallback):
|
||||
|
||||
event_data_key = 'system_job_id'
|
||||
File diff suppressed because it is too large
Load Diff
542
awx/main/tasks/receptor.py
Normal file
542
awx/main/tasks/receptor.py
Normal file
@@ -0,0 +1,542 @@
|
||||
# Python
|
||||
from base64 import b64encode
|
||||
from collections import namedtuple
|
||||
import concurrent.futures
|
||||
from enum import Enum
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
# Runner
|
||||
import ansible_runner
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||
from awx.main.exceptions import ReceptorNodeNotFound
|
||||
from awx.main.utils.common import (
|
||||
deepmerge,
|
||||
parse_yaml_or_json,
|
||||
cleanup_new_process,
|
||||
)
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.receptor')
|
||||
__RECEPTOR_CONF = '/etc/receptor/receptor.conf'
|
||||
RECEPTOR_ACTIVE_STATES = ('Pending', 'Running')
|
||||
|
||||
|
||||
class ReceptorConnectionType(Enum):
|
||||
DATAGRAM = 0
|
||||
STREAM = 1
|
||||
STREAMTLS = 2
|
||||
|
||||
|
||||
def get_receptor_sockfile():
|
||||
with open(__RECEPTOR_CONF, 'r') as f:
|
||||
data = yaml.safe_load(f)
|
||||
for section in data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'control-service':
|
||||
if 'filename' in entry_data:
|
||||
return entry_data['filename']
|
||||
else:
|
||||
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} control-service entry does not have a filename parameter')
|
||||
else:
|
||||
raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')
|
||||
|
||||
|
||||
def get_tls_client(use_stream_tls=None):
|
||||
if not use_stream_tls:
|
||||
return None
|
||||
|
||||
with open(__RECEPTOR_CONF, 'r') as f:
|
||||
data = yaml.safe_load(f)
|
||||
for section in data:
|
||||
for entry_name, entry_data in section.items():
|
||||
if entry_name == 'tls-client':
|
||||
if 'name' in entry_data:
|
||||
return entry_data['name']
|
||||
return None
|
||||
|
||||
|
||||
def get_receptor_ctl():
|
||||
receptor_sockfile = get_receptor_sockfile()
|
||||
try:
|
||||
return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(True))
|
||||
except RuntimeError:
|
||||
return ReceptorControl(receptor_sockfile)
|
||||
|
||||
|
||||
def get_conn_type(node_name, receptor_ctl):
|
||||
all_nodes = receptor_ctl.simple_command("status").get('Advertisements', None)
|
||||
for node in all_nodes:
|
||||
if node.get('NodeID') == node_name:
|
||||
return ReceptorConnectionType(node.get('ConnType'))
|
||||
raise ReceptorNodeNotFound(f'Instance {node_name} is not in the receptor mesh')
|
||||
|
||||
|
||||
def administrative_workunit_reaper(work_list=None):
|
||||
"""
|
||||
This releases completed work units that were spawned by actions inside of this module
|
||||
specifically, this should catch any completed work unit left by
|
||||
- worker_info
|
||||
- worker_cleanup
|
||||
These should ordinarily be released when the method finishes, but this is a
|
||||
cleanup of last-resort, in case something went awry
|
||||
"""
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
if work_list is None:
|
||||
work_list = receptor_ctl.simple_command("work list")
|
||||
|
||||
for unit_id, work_data in work_list.items():
|
||||
extra_data = work_data.get('ExtraData')
|
||||
if (extra_data is None) or (extra_data.get('RemoteWorkType') != 'ansible-runner'):
|
||||
continue # if this is not ansible-runner work, we do not want to touch it
|
||||
params = extra_data.get('RemoteParams', {}).get('params')
|
||||
if not params:
|
||||
continue
|
||||
if not (params == '--worker-info' or params.startswith('cleanup')):
|
||||
continue # if this is not a cleanup or health check, we do not want to touch it
|
||||
if work_data.get('StateName') in RECEPTOR_ACTIVE_STATES:
|
||||
continue # do not want to touch active work units
|
||||
logger.info(f'Reaping orphaned work unit {unit_id} with params {params}')
|
||||
receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
|
||||
|
||||
class RemoteJobError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
def run_until_complete(node, timing_data=None, **kwargs):
|
||||
"""
|
||||
Runs an ansible-runner work_type on remote node, waits until it completes, then returns stdout.
|
||||
"""
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
|
||||
use_stream_tls = getattr(get_conn_type(node, receptor_ctl), 'name', None) == "STREAMTLS"
|
||||
kwargs.setdefault('tlsclient', get_tls_client(use_stream_tls))
|
||||
kwargs.setdefault('ttl', '20s')
|
||||
kwargs.setdefault('payload', '')
|
||||
|
||||
transmit_start = time.time()
|
||||
sign_work = False if settings.IS_K8S else True
|
||||
result = receptor_ctl.submit_work(worktype='ansible-runner', node=node, signwork=sign_work, **kwargs)
|
||||
|
||||
unit_id = result['unitid']
|
||||
run_start = time.time()
|
||||
if timing_data:
|
||||
timing_data['transmit_timing'] = run_start - transmit_start
|
||||
run_timing = 0.0
|
||||
stdout = ''
|
||||
|
||||
try:
|
||||
|
||||
resultfile = receptor_ctl.get_work_results(unit_id)
|
||||
|
||||
while run_timing < 20.0:
|
||||
status = receptor_ctl.simple_command(f'work status {unit_id}')
|
||||
state_name = status.get('StateName')
|
||||
if state_name not in RECEPTOR_ACTIVE_STATES:
|
||||
break
|
||||
run_timing = time.time() - run_start
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
raise RemoteJobError(f'Receptor job timeout on {node} after {run_timing} seconds, state remains in {state_name}')
|
||||
|
||||
if timing_data:
|
||||
timing_data['run_timing'] = run_timing
|
||||
|
||||
stdout = resultfile.read()
|
||||
stdout = str(stdout, encoding='utf-8')
|
||||
|
||||
finally:
|
||||
|
||||
if settings.RECEPTOR_RELEASE_WORK:
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
if res != {'released': unit_id}:
|
||||
logger.warn(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
|
||||
receptor_ctl.close()
|
||||
|
||||
if state_name.lower() == 'failed':
|
||||
work_detail = status.get('Detail', '')
|
||||
if work_detail:
|
||||
raise RemoteJobError(f'Receptor error from {node}, detail:\n{work_detail}')
|
||||
else:
|
||||
raise RemoteJobError(f'Unknown ansible-runner error on node {node}, stdout:\n{stdout}')
|
||||
|
||||
return stdout
|
||||
|
||||
|
||||
def worker_info(node_name, work_type='ansible-runner'):
|
||||
error_list = []
|
||||
data = {'errors': error_list, 'transmit_timing': 0.0}
|
||||
|
||||
try:
|
||||
stdout = run_until_complete(node=node_name, timing_data=data, params={"params": "--worker-info"})
|
||||
|
||||
yaml_stdout = stdout.strip()
|
||||
remote_data = {}
|
||||
try:
|
||||
remote_data = yaml.safe_load(yaml_stdout)
|
||||
except Exception as json_e:
|
||||
error_list.append(f'Failed to parse node {node_name} --worker-info output as YAML, error: {json_e}, data:\n{yaml_stdout}')
|
||||
|
||||
if not isinstance(remote_data, dict):
|
||||
error_list.append(f'Remote node {node_name} --worker-info output is not a YAML dict, output:{stdout}')
|
||||
else:
|
||||
error_list.extend(remote_data.pop('errors', [])) # merge both error lists
|
||||
data.update(remote_data)
|
||||
|
||||
except RemoteJobError as exc:
|
||||
details = exc.args[0]
|
||||
if 'unrecognized arguments: --worker-info' in details:
|
||||
error_list.append(f'Old version (2.0.1 or earlier) of ansible-runner on node {node_name} without --worker-info')
|
||||
else:
|
||||
error_list.append(details)
|
||||
|
||||
except (ReceptorNodeNotFound, RuntimeError) as exc:
|
||||
error_list.append(str(exc))
|
||||
|
||||
# If we have a connection error, missing keys would be trivial consequence of that
|
||||
if not data['errors']:
|
||||
# see tasks.py usage of keys
|
||||
missing_keys = set(('runner_version', 'mem_in_bytes', 'cpu_count')) - set(data.keys())
|
||||
if missing_keys:
|
||||
data['errors'].append('Worker failed to return keys {}'.format(' '.join(missing_keys)))
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _convert_args_to_cli(vargs):
|
||||
"""
|
||||
For the ansible-runner worker cleanup command
|
||||
converts the dictionary (parsed argparse variables) used for python interface
|
||||
into a string of CLI options, which has to be used on execution nodes.
|
||||
"""
|
||||
args = ['cleanup']
|
||||
for option in ('exclude_strings', 'remove_images'):
|
||||
if vargs.get(option):
|
||||
args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
|
||||
for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'):
|
||||
if vargs.get(option) is True:
|
||||
args.append('--{}'.format(option.replace('_', '-')))
|
||||
elif vargs.get(option) not in (None, ''):
|
||||
args.append('--{}={}'.format(option.replace('_', '-'), vargs.get(option)))
|
||||
return args
|
||||
|
||||
|
||||
def worker_cleanup(node_name, vargs, timeout=300.0):
|
||||
args = _convert_args_to_cli(vargs)
|
||||
|
||||
remote_command = ' '.join(args)
|
||||
logger.debug(f'Running command over receptor mesh on {node_name}: ansible-runner worker {remote_command}')
|
||||
|
||||
stdout = run_until_complete(node=node_name, params={"params": remote_command})
|
||||
|
||||
return stdout
|
||||
|
||||
|
||||
class TransmitterThread(threading.Thread):
|
||||
def run(self):
|
||||
self.exc = None
|
||||
|
||||
try:
|
||||
super().run()
|
||||
except Exception:
|
||||
self.exc = sys.exc_info()
|
||||
|
||||
|
||||
class AWXReceptorJob:
|
||||
def __init__(self, task, runner_params=None):
|
||||
self.task = task
|
||||
self.runner_params = runner_params
|
||||
self.unit_id = None
|
||||
|
||||
if self.task and not self.task.instance.is_container_group_task:
|
||||
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
|
||||
self.runner_params.update(execution_environment_params)
|
||||
|
||||
if not settings.IS_K8S and self.work_type == 'local' and 'only_transmit_kwargs' not in self.runner_params:
|
||||
self.runner_params['only_transmit_kwargs'] = True
|
||||
|
||||
def run(self):
|
||||
# We establish a connection to the Receptor socket
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
|
||||
res = None
|
||||
try:
|
||||
res = self._run_internal(receptor_ctl)
|
||||
return res
|
||||
finally:
|
||||
# Make sure to always release the work unit if we established it
|
||||
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
|
||||
try:
|
||||
receptor_ctl.simple_command(f"work release {self.unit_id}")
|
||||
except Exception:
|
||||
logger.exception(f"Error releasing work unit {self.unit_id}.")
|
||||
|
||||
@property
|
||||
def sign_work(self):
|
||||
return False if settings.IS_K8S else True
|
||||
|
||||
def _run_internal(self, receptor_ctl):
|
||||
# Create a socketpair. Where the left side will be used for writing our payload
|
||||
# (private data dir, kwargs). The right side will be passed to Receptor for
|
||||
# reading.
|
||||
sockin, sockout = socket.socketpair()
|
||||
|
||||
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
|
||||
transmitter_thread.start()
|
||||
|
||||
# submit our work, passing
|
||||
# in the right side of our socketpair for reading.
|
||||
_kw = {}
|
||||
if self.work_type == 'ansible-runner':
|
||||
_kw['node'] = self.task.instance.execution_node
|
||||
use_stream_tls = get_conn_type(_kw['node'], receptor_ctl).name == "STREAMTLS"
|
||||
_kw['tlsclient'] = get_tls_client(use_stream_tls)
|
||||
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params, signwork=self.sign_work, **_kw)
|
||||
self.unit_id = result['unitid']
|
||||
# Update the job with the work unit in-memory so that the log_lifecycle
|
||||
# will print out the work unit that is to be associated with the job in the database
|
||||
# via the update_model() call.
|
||||
# We want to log the work_unit_id as early as possible. A failure can happen in between
|
||||
# when we start the job in receptor and when we associate the job <-> work_unit_id.
|
||||
# In that case, there will be work running in receptor and Controller will not know
|
||||
# which Job it is associated with.
|
||||
# We do not programatically handle this case. Ideally, we would handle this with a reaper case.
|
||||
# The two distinct job lifecycle log events below allow for us to at least detect when this
|
||||
# edge case occurs. If the lifecycle event work_unit_id_received occurs without the
|
||||
# work_unit_id_assigned event then this case may have occured.
|
||||
self.task.instance.work_unit_id = result['unitid'] # Set work_unit_id in-memory only
|
||||
self.task.instance.log_lifecycle("work_unit_id_received")
|
||||
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
|
||||
self.task.instance.log_lifecycle("work_unit_id_assigned")
|
||||
|
||||
sockin.close()
|
||||
sockout.close()
|
||||
|
||||
if transmitter_thread.exc:
|
||||
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
|
||||
|
||||
transmitter_thread.join()
|
||||
|
||||
# Artifacts are an output, but sometimes they are an input as well
|
||||
# this is the case with fact cache, where clearing facts deletes a file, and this must be captured
|
||||
artifact_dir = os.path.join(self.runner_params['private_data_dir'], 'artifacts')
|
||||
if os.path.exists(artifact_dir):
|
||||
shutil.rmtree(artifact_dir)
|
||||
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
|
||||
# Both "processor" and "cancel_watcher" are spawned in separate threads.
|
||||
# We wait for the first one to return. If cancel_watcher returns first,
|
||||
# we yank the socket out from underneath the processor, which will cause it
|
||||
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
|
||||
# Which exits if the job has finished normally. The context manager ensures we do not
|
||||
# leave any threads laying around.
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
|
||||
processor_future = executor.submit(self.processor, resultfile)
|
||||
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
|
||||
futures = [processor_future, cancel_watcher_future]
|
||||
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
|
||||
|
||||
res = list(first_future.done)[0].result()
|
||||
if res.status == 'canceled':
|
||||
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
|
||||
resultsock.shutdown(socket.SHUT_RDWR)
|
||||
resultfile.close()
|
||||
elif res.status == 'error':
|
||||
try:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
state_name = unit_status.get('StateName', None)
|
||||
except Exception:
|
||||
detail = ''
|
||||
state_name = ''
|
||||
logger.exception(f'An error was encountered while getting status for work unit {self.unit_id}')
|
||||
|
||||
if 'exceeded quota' in detail:
|
||||
logger.warn(detail)
|
||||
log_name = self.task.instance.log_format
|
||||
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||
self.task.update_model(self.task.instance.pk, status='pending')
|
||||
return
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if state_name == 'Succeeded':
|
||||
return res
|
||||
|
||||
if not self.task.instance.result_traceback:
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.instance.result_traceback = receptor_output
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
elif detail:
|
||||
self.task.instance.result_traceback = detail
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.warn(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
|
||||
# Spawned in a thread so Receptor can start reading before we finish writing, we
|
||||
# write our payload to the left side of our socketpair.
|
||||
@cleanup_new_process
|
||||
def transmit(self, _socket):
|
||||
try:
|
||||
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
|
||||
finally:
|
||||
# Socket must be shutdown here, or the reader will hang forever.
|
||||
_socket.shutdown(socket.SHUT_WR)
|
||||
|
||||
@cleanup_new_process
|
||||
def processor(self, resultfile):
|
||||
return ansible_runner.interface.run(
|
||||
streamer='process',
|
||||
quiet=True,
|
||||
_input=resultfile,
|
||||
event_handler=self.task.runner_callback.event_handler,
|
||||
finished_callback=self.task.runner_callback.finished_callback,
|
||||
status_handler=self.task.runner_callback.status_handler,
|
||||
**self.runner_params,
|
||||
)
|
||||
|
||||
@property
|
||||
def receptor_params(self):
|
||||
if self.task.instance.is_container_group_task:
|
||||
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
|
||||
|
||||
receptor_params = {
|
||||
"secret_kube_pod": spec_yaml,
|
||||
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
|
||||
}
|
||||
|
||||
if self.credential:
|
||||
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
|
||||
receptor_params["secret_kube_config"] = kubeconfig_yaml
|
||||
else:
|
||||
private_data_dir = self.runner_params['private_data_dir']
|
||||
if self.work_type == 'ansible-runner' and settings.AWX_CLEANUP_PATHS:
|
||||
# on execution nodes, we rely on the private data dir being deleted
|
||||
cli_params = f"--private-data-dir={private_data_dir} --delete"
|
||||
else:
|
||||
# on hybrid nodes, we rely on the private data dir NOT being deleted
|
||||
cli_params = f"--private-data-dir={private_data_dir}"
|
||||
receptor_params = {"params": cli_params}
|
||||
|
||||
return receptor_params
|
||||
|
||||
@property
|
||||
def work_type(self):
|
||||
if self.task.instance.is_container_group_task:
|
||||
if self.credential:
|
||||
return 'kubernetes-runtime-auth'
|
||||
return 'kubernetes-incluster-auth'
|
||||
if self.task.instance.execution_node == settings.CLUSTER_HOST_ID or self.task.instance.execution_node == self.task.instance.controller_node:
|
||||
return 'local'
|
||||
return 'ansible-runner'
|
||||
|
||||
@cleanup_new_process
|
||||
def cancel_watcher(self, processor_future):
|
||||
while True:
|
||||
if processor_future.done():
|
||||
return processor_future.result()
|
||||
|
||||
if self.task.runner_callback.cancel_callback():
|
||||
result = namedtuple('result', ['status', 'rc'])
|
||||
return result('canceled', 1)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
@property
|
||||
def pod_definition(self):
|
||||
ee = self.task.instance.execution_environment
|
||||
|
||||
default_pod_spec = get_default_pod_spec()
|
||||
|
||||
pod_spec_override = {}
|
||||
if self.task and self.task.instance.instance_group.pod_spec_override:
|
||||
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
|
||||
# According to the deepmerge docstring, the second dictionary will override when
|
||||
# they share keys, which is the desired behavior.
|
||||
# This allows user to only provide elements they want to override, and for us to still provide any
|
||||
# defaults they don't want to change
|
||||
pod_spec = deepmerge(default_pod_spec, pod_spec_override)
|
||||
|
||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||
|
||||
# Enforce EE Pull Policy
|
||||
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
|
||||
if self.task and self.task.instance.execution_environment:
|
||||
if self.task.instance.execution_environment.pull:
|
||||
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
|
||||
|
||||
if self.task and self.task.instance.is_container_group_task:
|
||||
# If EE credential is passed, create an imagePullSecret
|
||||
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
|
||||
# Create pull secret in k8s cluster based on ee cred
|
||||
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
|
||||
|
||||
pm = PodManager(self.task.instance)
|
||||
secret_name = pm.create_secret(job=self.task.instance)
|
||||
|
||||
# Inject secret name into podspec
|
||||
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
|
||||
|
||||
if self.task:
|
||||
pod_spec['metadata'] = deepmerge(
|
||||
pod_spec.get('metadata', {}),
|
||||
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
|
||||
)
|
||||
|
||||
return pod_spec
|
||||
|
||||
@property
|
||||
def pod_name(self):
|
||||
return f"automation-job-{self.task.instance.id}"
|
||||
|
||||
@property
|
||||
def credential(self):
|
||||
return self.task.instance.instance_group.credential
|
||||
|
||||
@property
|
||||
def namespace(self):
|
||||
return self.pod_definition['metadata']['namespace']
|
||||
|
||||
@property
|
||||
def kube_config(self):
|
||||
host_input = self.credential.get_input('host')
|
||||
config = {
|
||||
"apiVersion": "v1",
|
||||
"kind": "Config",
|
||||
"preferences": {},
|
||||
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
|
||||
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
|
||||
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
|
||||
"current-context": host_input,
|
||||
}
|
||||
|
||||
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
|
||||
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
|
||||
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
|
||||
).decode() # decode the base64 data into a str
|
||||
else:
|
||||
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
|
||||
return config
|
||||
906
awx/main/tasks/system.py
Normal file
906
awx/main/tasks/system.py
Normal file
@@ -0,0 +1,906 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import functools
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from io import StringIO
|
||||
from contextlib import redirect_stdout
|
||||
import shutil
|
||||
import time
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import gettext_noop
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
# Django-CRUM
|
||||
from crum import impersonate
|
||||
|
||||
|
||||
# Runner
|
||||
import ansible_runner.cleanup
|
||||
|
||||
# dateutil
|
||||
from dateutil.parser import parse as parse_date
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models import (
|
||||
Schedule,
|
||||
TowerScheduleState,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
UnifiedJob,
|
||||
Notification,
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
schedule_task_manager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.system')
|
||||
|
||||
OPENSSH_KEY_ERROR = u'''\
|
||||
It looks like you're trying to use a private key in OpenSSH format, which \
|
||||
isn't supported by the installed version of OpenSSH on this instance. \
|
||||
Try upgrading OpenSSH or providing your private key in an different format. \
|
||||
'''
|
||||
|
||||
|
||||
def dispatch_startup():
|
||||
startup_logger = logging.getLogger('awx.main.tasks')
|
||||
startup_logger.debug("Syncing Schedules")
|
||||
for sch in Schedule.objects.all():
|
||||
try:
|
||||
sch.update_computed_fields()
|
||||
except Exception:
|
||||
logger.exception("Failed to rebuild schedule {}.".format(sch))
|
||||
|
||||
#
|
||||
# When the dispatcher starts, if the instance cannot be found in the database,
|
||||
# automatically register it. This is mostly useful for openshift-based
|
||||
# deployments where:
|
||||
#
|
||||
# 2 Instances come online
|
||||
# Instance B encounters a network blip, Instance A notices, and
|
||||
# deprovisions it
|
||||
# Instance B's connectivity is restored, the dispatcher starts, and it
|
||||
# re-registers itself
|
||||
#
|
||||
# In traditional container-less deployments, instances don't get
|
||||
# deprovisioned when they miss their heartbeat, so this code is mostly a
|
||||
# no-op.
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
Metrics().clear_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
try:
|
||||
reaper.reap(this_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
started_waiting = time.time()
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
lock_time = time.time() - started_waiting
|
||||
if lock_time > 1.0:
|
||||
to_log = logger.info
|
||||
else:
|
||||
to_log = logger.debug
|
||||
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
|
||||
started_compute = time.time()
|
||||
# Hop nodes should never get assigned to an InstanceGroup.
|
||||
all_instances = list(Instance.objects.exclude(node_type='hop').order_by('id'))
|
||||
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
|
||||
|
||||
total_instances = len(all_instances)
|
||||
actual_groups = []
|
||||
actual_instances = []
|
||||
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
|
||||
Node = namedtuple('Instance', ['obj', 'groups'])
|
||||
|
||||
# Process policy instance list first, these will represent manually managed memberships
|
||||
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
|
||||
for ig in all_groups:
|
||||
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
|
||||
for hostname in ig.policy_instance_list:
|
||||
if hostname not in instance_hostnames_map:
|
||||
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
|
||||
continue
|
||||
inst = instance_hostnames_map[hostname]
|
||||
group_actual.instances.append(inst.id)
|
||||
# NOTE: arguable behavior: policy-list-group is not added to
|
||||
# instance's group count for consideration in minimum-policy rules
|
||||
if group_actual.instances:
|
||||
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
|
||||
|
||||
actual_groups.append(group_actual)
|
||||
|
||||
# Process Instance minimum policies next, since it represents a concrete lower bound to the
|
||||
# number of instances to make available to instance groups
|
||||
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
|
||||
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
|
||||
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
|
||||
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
|
||||
policy_min_added = []
|
||||
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
|
||||
if i.obj.node_type == exclude_type:
|
||||
continue # never place execution instances in controlplane group or control instances in other groups
|
||||
if len(g.instances) >= g.obj.policy_instance_minimum:
|
||||
break
|
||||
if i.obj.id in g.instances:
|
||||
# If the instance is already _in_ the group, it was
|
||||
# applied earlier via the policy list
|
||||
continue
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
policy_min_added.append(i.obj.id)
|
||||
if policy_min_added:
|
||||
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
|
||||
|
||||
# Finally, process instance policy percentages
|
||||
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
|
||||
exclude_type = 'execution' if g.obj.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME else 'control'
|
||||
candidate_pool_ct = sum(1 for i in actual_instances if i.obj.node_type != exclude_type)
|
||||
if not candidate_pool_ct:
|
||||
continue
|
||||
policy_per_added = []
|
||||
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
|
||||
if i.obj.node_type == exclude_type:
|
||||
continue
|
||||
if i.obj.id in g.instances:
|
||||
# If the instance is already _in_ the group, it was
|
||||
# applied earlier via a minimum policy or policy list
|
||||
continue
|
||||
if 100 * float(len(g.instances)) / candidate_pool_ct >= g.obj.policy_instance_percentage:
|
||||
break
|
||||
g.instances.append(i.obj.id)
|
||||
i.groups.append(g.obj.id)
|
||||
policy_per_added.append(i.obj.id)
|
||||
if policy_per_added:
|
||||
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
|
||||
|
||||
# Determine if any changes need to be made
|
||||
needs_change = False
|
||||
for g in actual_groups:
|
||||
if set(g.instances) != set(g.prior_instances):
|
||||
needs_change = True
|
||||
break
|
||||
if not needs_change:
|
||||
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
|
||||
return
|
||||
|
||||
# On a differential basis, apply instances to groups
|
||||
with transaction.atomic():
|
||||
with disable_activity_stream():
|
||||
for g in actual_groups:
|
||||
if g.obj.is_container_group:
|
||||
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
|
||||
continue
|
||||
instances_to_add = set(g.instances) - set(g.prior_instances)
|
||||
instances_to_remove = set(g.prior_instances) - set(g.instances)
|
||||
if instances_to_add:
|
||||
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
|
||||
g.obj.instances.add(*instances_to_add)
|
||||
if instances_to_remove:
|
||||
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
|
||||
g.obj.instances.remove(*instances_to_remove)
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
setting_keys.append(dependent_key)
|
||||
cache_keys = set(setting_keys)
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
# TODO: possibly implement some retry logic
|
||||
lock_file = project_path + '.lock'
|
||||
if os.path.exists(project_path):
|
||||
try:
|
||||
shutil.rmtree(project_path)
|
||||
logger.debug('Success removing project files {}'.format(project_path))
|
||||
except Exception:
|
||||
logger.exception('Could not remove project directory {}'.format(project_path))
|
||||
if os.path.exists(lock_file):
|
||||
try:
|
||||
os.remove(lock_file)
|
||||
logger.debug('Success removing {}'.format(lock_file))
|
||||
except Exception:
|
||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def profile_sql(threshold=1, minutes=1):
|
||||
if threshold <= 0:
|
||||
cache.delete('awx-profile-sql-threshold')
|
||||
logger.error('SQL PROFILING DISABLED')
|
||||
else:
|
||||
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
if job_id is not None:
|
||||
job_actual = UnifiedJob.objects.get(id=job_id)
|
||||
|
||||
notifications = Notification.objects.filter(id__in=notification_list)
|
||||
if job_id is not None:
|
||||
job_actual.notifications.add(*notifications)
|
||||
|
||||
for notification in notifications:
|
||||
update_fields = ['status', 'notifications_sent']
|
||||
try:
|
||||
sent = notification.notification_template.send(notification.subject, notification.body)
|
||||
notification.status = "successful"
|
||||
notification.notifications_sent = sent
|
||||
if job_id is not None:
|
||||
job_actual.log_lifecycle("notifications_sent")
|
||||
except Exception as e:
|
||||
logger.exception("Send Notification Failed {}".format(e))
|
||||
notification.status = "failed"
|
||||
notification.error = smart_str(e)
|
||||
update_fields.append('error')
|
||||
finally:
|
||||
try:
|
||||
notification.save(update_fields=update_fields)
|
||||
except Exception:
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
|
||||
gather_time = now()
|
||||
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
|
||||
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
|
||||
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
|
||||
|
||||
|
||||
def _cleanup_images_and_files(**kwargs):
|
||||
if settings.IS_K8S:
|
||||
return
|
||||
this_inst = Instance.objects.me()
|
||||
runner_cleanup_kwargs = this_inst.get_cleanup_task_kwargs(**kwargs)
|
||||
if runner_cleanup_kwargs:
|
||||
stdout = ''
|
||||
with StringIO() as buffer:
|
||||
with redirect_stdout(buffer):
|
||||
ansible_runner.cleanup.run_cleanup(runner_cleanup_kwargs)
|
||||
stdout = buffer.getvalue()
|
||||
if '(changed: True)' in stdout:
|
||||
logger.info(f'Performed local cleanup with kwargs {kwargs}, output:\n{stdout}')
|
||||
|
||||
# if we are the first instance alphabetically, then run cleanup on execution nodes
|
||||
checker_instance = Instance.objects.filter(node_type__in=['hybrid', 'control'], enabled=True, capacity__gt=0).order_by('-hostname').first()
|
||||
if checker_instance and this_inst.hostname == checker_instance.hostname:
|
||||
for inst in Instance.objects.filter(node_type='execution', enabled=True, capacity__gt=0):
|
||||
runner_cleanup_kwargs = inst.get_cleanup_task_kwargs(**kwargs)
|
||||
if not runner_cleanup_kwargs:
|
||||
continue
|
||||
try:
|
||||
stdout = worker_cleanup(inst.hostname, runner_cleanup_kwargs)
|
||||
if '(changed: True)' in stdout:
|
||||
logger.info(f'Performed cleanup on execution node {inst.hostname} with output:\n{stdout}')
|
||||
except RuntimeError:
|
||||
logger.exception(f'Error running cleanup on execution node {inst.hostname}')
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_removed_image(remove_images=None):
|
||||
"""Special broadcast invocation of this method to handle case of deleted EE"""
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
"""
|
||||
if node == '':
|
||||
logger.warn('Local health check incorrectly called with blank string')
|
||||
return
|
||||
elif node != settings.CLUSTER_HOST_ID:
|
||||
logger.warn(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
|
||||
return
|
||||
try:
|
||||
this_inst = Instance.objects.me()
|
||||
except Instance.DoesNotExist:
|
||||
logger.warn(f'Instance record for {node} missing, could not check capacity.')
|
||||
return
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warn('Remote health check incorrectly called with blank string')
|
||||
return
|
||||
try:
|
||||
instance = Instance.objects.get(hostname=node)
|
||||
except Instance.DoesNotExist:
|
||||
logger.warn(f'Instance record for {node} missing, could not check capacity.')
|
||||
return
|
||||
|
||||
if instance.node_type != 'execution':
|
||||
raise RuntimeError(f'Execution node health check ran against {instance.node_type} node {instance.hostname}')
|
||||
|
||||
data = worker_info(node)
|
||||
|
||||
prior_capacity = instance.capacity
|
||||
|
||||
instance.save_health_data(
|
||||
version='ansible-runner-' + data.get('runner_version', '???'),
|
||||
cpu=data.get('cpu_count', 0),
|
||||
memory=data.get('mem_in_bytes', 0),
|
||||
uuid=data.get('uuid'),
|
||||
errors='\n'.join(data.get('errors', [])),
|
||||
)
|
||||
|
||||
if data['errors']:
|
||||
formatted_error = "\n".join(data["errors"])
|
||||
if prior_capacity:
|
||||
logger.warn(f'Health check marking execution node {node} as lost, errors:\n{formatted_error}')
|
||||
else:
|
||||
logger.info(f'Failed to find capacity of new or lost execution node {node}, errors:\n{formatted_error}')
|
||||
else:
|
||||
logger.info('Set capacity of execution node {} to {}, worker info data:\n{}'.format(node, instance.capacity, json.dumps(data, indent=2)))
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def inspect_execution_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
|
||||
ctl = get_receptor_ctl()
|
||||
mesh_status = ctl.simple_command('status')
|
||||
|
||||
nowtime = now()
|
||||
workers = mesh_status['Advertisements']
|
||||
for ad in workers:
|
||||
hostname = ad['NodeID']
|
||||
changed = False
|
||||
|
||||
if hostname in node_lookup:
|
||||
instance = node_lookup[hostname]
|
||||
else:
|
||||
logger.warn(f"Unrecognized node advertising on mesh: {hostname}")
|
||||
continue
|
||||
|
||||
# Control-plane nodes are dealt with via local_health_check instead.
|
||||
if instance.node_type in ('control', 'hybrid'):
|
||||
continue
|
||||
|
||||
was_lost = instance.is_lost(ref_time=nowtime)
|
||||
last_seen = parse_date(ad['Time'])
|
||||
|
||||
if instance.last_seen and instance.last_seen >= last_seen:
|
||||
continue
|
||||
instance.last_seen = last_seen
|
||||
instance.save(update_fields=['last_seen'])
|
||||
|
||||
# Only execution nodes should be dealt with by execution_node_health_check
|
||||
if instance.node_type == 'hop':
|
||||
continue
|
||||
|
||||
if changed:
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
elif was_lost:
|
||||
# if the instance *was* lost, but has appeared again,
|
||||
# attempt to re-establish the initial capacity and version
|
||||
# check
|
||||
logger.warn(f'Execution node attempting to rejoin as instance {hostname}.')
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
elif instance.capacity == 0 and instance.enabled:
|
||||
# nodes with proven connection but need remediation run health checks are reduced frequency
|
||||
if not instance.last_health_check or (nowtime - instance.last_health_check).total_seconds() >= settings.EXECUTION_NODE_REMEDIATION_CHECKS:
|
||||
# Periodically re-run the health check of errored nodes, in case someone fixed it
|
||||
# TODO: perhaps decrease the frequency of these checks
|
||||
logger.debug(f'Restarting health check for execution node {hostname} with known errors.')
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_heartbeat():
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
instance_list = list(Instance.objects.all())
|
||||
this_inst = None
|
||||
lost_instances = []
|
||||
|
||||
for inst in instance_list:
|
||||
if inst.hostname == settings.CLUSTER_HOST_ID:
|
||||
this_inst = inst
|
||||
break
|
||||
else:
|
||||
(changed, this_inst) = Instance.objects.get_or_register()
|
||||
if changed:
|
||||
logger.info("Registered tower control node '{}'".format(this_inst.hostname))
|
||||
|
||||
inspect_execution_nodes(instance_list)
|
||||
|
||||
for inst in list(instance_list):
|
||||
if inst == this_inst:
|
||||
continue
|
||||
if inst.is_lost(ref_time=nowtime):
|
||||
lost_instances.append(inst)
|
||||
instance_list.remove(inst)
|
||||
|
||||
if this_inst:
|
||||
startup_event = this_inst.is_lost(ref_time=nowtime)
|
||||
this_inst.local_health_check()
|
||||
if startup_event and this_inst.capacity != 0:
|
||||
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
|
||||
return
|
||||
else:
|
||||
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||
# IFF any node has a greater version than we do, then we'll shutdown services
|
||||
for other_inst in instance_list:
|
||||
if other_inst.node_type in ('execution', 'hop'):
|
||||
continue
|
||||
if other_inst.version == "" or other_inst.version.startswith('ansible-runner'):
|
||||
continue
|
||||
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
|
||||
logger.error(
|
||||
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
|
||||
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
|
||||
)
|
||||
)
|
||||
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
|
||||
# The heartbeat task will reset the capacity to the system capacity after upgrade.
|
||||
stop_local_services(communicate=False)
|
||||
raise RuntimeError("Shutting down.")
|
||||
|
||||
for other_inst in lost_instances:
|
||||
try:
|
||||
reaper.reap(other_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
|
||||
try:
|
||||
# Capacity could already be 0 because:
|
||||
# * It's a new node and it never had a heartbeat
|
||||
# * It was set to 0 by another tower node running this method
|
||||
# * It was set to 0 by this node, but auto deprovisioning is off
|
||||
#
|
||||
# If auto deprovisioning is on, don't bother setting the capacity to 0
|
||||
# since we will delete the node anyway.
|
||||
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
other_inst.mark_offline(errors=_('Another cluster node has determined this instance to be unresponsive'))
|
||||
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
||||
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
deprovision_hostname = other_inst.hostname
|
||||
other_inst.delete()
|
||||
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
||||
else:
|
||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
|
||||
This is also called the work Unit ID in receptor, and is used in various receptor commands,
|
||||
e.g. "work results qLL2JFNT"
|
||||
After an AWX job executes, the receptor work unit directory is cleaned up by
|
||||
issuing the work release command. In some cases the release process might fail, or
|
||||
if AWX crashes during a job's execution, the work release command is never issued to begin with.
|
||||
As such, this periodic task will obtain a list of all receptor work units, and find which ones
|
||||
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
|
||||
This task will call "work release" on each of these work units to clean up the files on disk.
|
||||
|
||||
Note that when we call "work release" on a work unit that actually represents remote work
|
||||
both the local and remote work units are cleaned up.
|
||||
|
||||
Since we are cleaning up jobs that controller considers to be inactive, we take the added
|
||||
precaution of calling "work cancel" in case the work unit is still active.
|
||||
"""
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
logger.debug("Checking for unreleased receptor work units")
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||
|
||||
unit_ids = [id for id in receptor_work_list]
|
||||
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
||||
for job in jobs_with_unreleased_receptor_units:
|
||||
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
|
||||
receptor_ctl.simple_command(f"work cancel {job.work_unit_id}")
|
||||
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
|
||||
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
|
||||
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
|
||||
|
||||
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
|
||||
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
|
||||
pods = PodManager.list_active_jobs(group)
|
||||
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
|
||||
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
|
||||
try:
|
||||
pm = PodManager(job)
|
||||
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
|
||||
except Exception:
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug("Not running periodic scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting periodic scheduler")
|
||||
|
||||
run_now = now()
|
||||
state = TowerScheduleState.get_solo()
|
||||
last_run = state.schedule_last_run
|
||||
logger.debug("Last scheduler run was: %s", last_run)
|
||||
state.schedule_last_run = run_now
|
||||
state.save()
|
||||
|
||||
old_schedules = Schedule.objects.enabled().before(last_run)
|
||||
for schedule in old_schedules:
|
||||
schedule.update_computed_fields()
|
||||
schedules = Schedule.objects.enabled().between(last_run, run_now)
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
for schedule in schedules:
|
||||
template = schedule.unified_job_template
|
||||
schedule.update_computed_fields() # To update next_run timestamp.
|
||||
if template.cache_timeout_blocked:
|
||||
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
|
||||
continue
|
||||
try:
|
||||
job_kwargs = schedule.get_job_kwargs()
|
||||
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
|
||||
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
|
||||
|
||||
if invalid_license:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = str(invalid_license)
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
raise invalid_license
|
||||
can_start = new_unified_job.signal_start()
|
||||
except Exception:
|
||||
logger.exception('Error spawning scheduled job.')
|
||||
continue
|
||||
if not can_start:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = gettext_noop(
|
||||
"Scheduled job could not start because it \
|
||||
was not in the right state or required manual credentials"
|
||||
)
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
schedule_task_manager()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
first_instance = None
|
||||
first_instance_type = ''
|
||||
if subtasks is not None:
|
||||
for each_task in subtasks:
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
|
||||
if not instance:
|
||||
# Unknown task type
|
||||
logger.warn("Unknown task type: {}".format(each_task['type']))
|
||||
continue
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
|
||||
continue
|
||||
|
||||
if first_instance is None:
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
first_instance_type,
|
||||
first_instance.name,
|
||||
first_instance.id,
|
||||
)
|
||||
instance.save()
|
||||
instance.websocket_emit_status("failed")
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
if first_instance:
|
||||
schedule_task_manager()
|
||||
pass
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_success_and_failure_notifications(job_id):
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
return
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
"""
|
||||
i = Inventory.objects.filter(id=inventory_id)
|
||||
if not i.exists():
|
||||
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
return
|
||||
raise
|
||||
|
||||
|
||||
def update_smart_memberships_for_inventory(smart_inventory):
|
||||
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
|
||||
new = set(smart_inventory.hosts.values_list('id', flat=True))
|
||||
additions = new - current
|
||||
removals = current - new
|
||||
if additions or removals:
|
||||
with transaction.atomic():
|
||||
if removals:
|
||||
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
|
||||
if additions:
|
||||
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
|
||||
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
|
||||
logger.debug(
|
||||
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
|
||||
smart_inventory.pk, len(additions), len(removals), len(new)
|
||||
)
|
||||
)
|
||||
return True # changed
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
for smart_inventory in smart_inventories:
|
||||
try:
|
||||
changed = update_smart_memberships_for_inventory(smart_inventory)
|
||||
if changed:
|
||||
changed_inventories.add(smart_inventory)
|
||||
except IntegrityError:
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
user = None
|
||||
else:
|
||||
try:
|
||||
user = User.objects.get(id=user_id)
|
||||
except Exception:
|
||||
user = None
|
||||
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
|
||||
try:
|
||||
i = Inventory.objects.get(id=inventory_id)
|
||||
for host in i.hosts.iterator():
|
||||
host.job_events_as_primary_host.update(host=None)
|
||||
i.delete()
|
||||
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
|
||||
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
|
||||
except Inventory.DoesNotExist:
|
||||
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
|
||||
return
|
||||
except DatabaseError:
|
||||
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
|
||||
if retries > 0:
|
||||
time.sleep(10)
|
||||
delete_inventory(inventory_id, user_id, retries=retries - 1)
|
||||
|
||||
|
||||
def with_path_cleanup(f):
|
||||
@functools.wraps(f)
|
||||
def _wrapped(self, *args, **kwargs):
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
finally:
|
||||
for p in self.cleanup_paths:
|
||||
try:
|
||||
if os.path.isdir(p):
|
||||
shutil.rmtree(p, ignore_errors=True)
|
||||
elif os.path.exists(p):
|
||||
os.remove(p)
|
||||
except OSError:
|
||||
logger.exception("Failed to remove tmp file: {}".format(p))
|
||||
self.cleanup_paths = []
|
||||
|
||||
return _wrapped
|
||||
|
||||
|
||||
def _reconstruct_relationships(copy_mapping):
|
||||
for old_obj, new_obj in copy_mapping.items():
|
||||
model = type(old_obj)
|
||||
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
|
||||
field = model._meta.get_field(field_name)
|
||||
if isinstance(field, ForeignKey):
|
||||
if getattr(new_obj, field_name, None):
|
||||
continue
|
||||
related_obj = getattr(old_obj, field_name)
|
||||
related_obj = copy_mapping.get(related_obj, related_obj)
|
||||
setattr(new_obj, field_name, related_obj)
|
||||
elif field.many_to_many:
|
||||
for related_obj in getattr(old_obj, field_name).all():
|
||||
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
|
||||
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
if model is None:
|
||||
return
|
||||
try:
|
||||
obj = model.objects.get(pk=obj_pk)
|
||||
new_obj = model.objects.get(pk=new_obj_pk)
|
||||
creater = User.objects.get(pk=user_pk)
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning("Object or user no longer exists.")
|
||||
return
|
||||
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
|
||||
copy_mapping = {}
|
||||
for sub_obj_setup in sub_obj_list:
|
||||
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
|
||||
if sub_model is None:
|
||||
continue
|
||||
try:
|
||||
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
|
||||
except ObjectDoesNotExist:
|
||||
continue
|
||||
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
|
||||
_reconstruct_relationships(copy_mapping)
|
||||
if permission_check_func:
|
||||
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
@@ -15,6 +15,7 @@ from awx.main.tests.factories import (
|
||||
)
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
@@ -80,13 +81,44 @@ def instance_group_factory():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_instance_group(instance_factory, instance_group_factory):
|
||||
return create_instance_group("default", instances=[create_instance("hostA")])
|
||||
def controlplane_instance_group(instance_factory, instance_group_factory):
|
||||
"""There always has to be a controlplane instancegroup and at least one instance in it"""
|
||||
return create_instance_group(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, create_instance('hybrid-1', node_type='hybrid', capacity=500))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def controlplane_instance_group(instance_factory, instance_group_factory):
|
||||
return create_instance_group("controlplane", instances=[create_instance("hostA")])
|
||||
def default_instance_group(instance_factory, instance_group_factory):
|
||||
return create_instance_group("default", instances=[create_instance("hostA", node_type='execution')])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def control_instance():
|
||||
'''Control instance in the controlplane automatic IG'''
|
||||
inst = create_instance('control-1', node_type='control', capacity=500)
|
||||
return inst
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def control_instance_low_capacity():
|
||||
'''Control instance in the controlplane automatic IG that has low capacity'''
|
||||
inst = create_instance('control-1', node_type='control', capacity=5)
|
||||
return inst
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def execution_instance():
|
||||
'''Execution node in the automatic default IG'''
|
||||
ig = create_instance_group('default')
|
||||
inst = create_instance('receptor-1', node_type='execution', capacity=500)
|
||||
ig.instances.add(inst)
|
||||
return inst
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hybrid_instance():
|
||||
'''Hybrid node in the default controlplane IG'''
|
||||
inst = create_instance('hybrid-1', node_type='hybrid', capacity=500)
|
||||
return inst
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -28,12 +28,15 @@ from awx.main.models import (
|
||||
#
|
||||
|
||||
|
||||
def mk_instance(persisted=True, hostname='instance.example.org'):
|
||||
def mk_instance(persisted=True, hostname='instance.example.org', node_type='hybrid', capacity=100):
|
||||
if not persisted:
|
||||
raise RuntimeError('creating an Instance requires persisted=True')
|
||||
from django.conf import settings
|
||||
|
||||
return Instance.objects.get_or_create(uuid=settings.SYSTEM_UUID, hostname=hostname)[0]
|
||||
instance = Instance.objects.get_or_create(uuid=settings.SYSTEM_UUID, hostname=hostname, node_type=node_type, capacity=capacity)[0]
|
||||
if node_type in ('control', 'hybrid'):
|
||||
mk_instance_group(name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, instance=instance)
|
||||
return instance
|
||||
|
||||
|
||||
def mk_instance_group(name='default', instance=None, minimum=0, percentage=0):
|
||||
@@ -52,7 +55,9 @@ def mk_organization(name, description=None, persisted=True):
|
||||
description = description or '{}-description'.format(name)
|
||||
org = Organization(name=name, description=description)
|
||||
if persisted:
|
||||
mk_instance(persisted)
|
||||
instances = Instance.objects.all()
|
||||
if not instances:
|
||||
mk_instance(persisted)
|
||||
org.save()
|
||||
return org
|
||||
|
||||
|
||||
@@ -132,8 +132,8 @@ def generate_teams(organization, persisted, **kwargs):
|
||||
return teams
|
||||
|
||||
|
||||
def create_instance(name, instance_groups=None):
|
||||
return mk_instance(hostname=name)
|
||||
def create_instance(name, instance_groups=None, node_type='hybrid', capacity=200):
|
||||
return mk_instance(hostname=name, node_type=node_type, capacity=capacity)
|
||||
|
||||
|
||||
def create_instance_group(name, instances=None, minimum=0, percentage=0):
|
||||
|
||||
@@ -3,6 +3,7 @@ import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
import redis
|
||||
@@ -17,6 +18,7 @@ INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, memory=36000000000, cpu_c
|
||||
@pytest.mark.django_db
|
||||
def test_disabled_zeros_capacity(patch, admin_user):
|
||||
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 1
|
||||
|
||||
url = reverse('api:instance_detail', kwargs={'pk': instance.pk})
|
||||
|
||||
@@ -25,12 +27,14 @@ def test_disabled_zeros_capacity(patch, admin_user):
|
||||
|
||||
instance.refresh_from_db()
|
||||
assert instance.capacity == 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_enabled_sets_capacity(patch, admin_user):
|
||||
instance = Instance.objects.create(enabled=False, capacity=0, **INSTANCE_KWARGS)
|
||||
assert instance.capacity == 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 1
|
||||
|
||||
url = reverse('api:instance_detail', kwargs={'pk': instance.pk})
|
||||
|
||||
@@ -39,6 +43,7 @@ def test_enabled_sets_capacity(patch, admin_user):
|
||||
|
||||
instance.refresh_from_db()
|
||||
assert instance.capacity > 0
|
||||
assert ActivityStream.objects.filter(instance=instance).count() == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -50,6 +55,20 @@ def test_auditor_user_health_check(get, post, system_auditor):
|
||||
post(url=url, user=system_auditor, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_health_check_throws_error(post, admin_user):
|
||||
instance = Instance.objects.create(node_type='execution', **INSTANCE_KWARGS)
|
||||
url = reverse('api:instance_health_check', kwargs={'pk': instance.pk})
|
||||
# we will simulate a receptor error, similar to this one
|
||||
# https://github.com/ansible/receptor/blob/156e6e24a49fbf868734507f9943ac96208ed8f5/receptorctl/receptorctl/socket_interface.py#L204
|
||||
# related to issue https://github.com/ansible/tower/issues/5315
|
||||
with mock.patch('awx.main.tasks.receptor.run_until_complete', side_effect=RuntimeError('Remote error: foobar')):
|
||||
post(url=url, user=admin_user, expect=200)
|
||||
instance.refresh_from_db()
|
||||
assert 'Remote error: foobar' in instance.errors
|
||||
assert instance.capacity == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(redis.client.Redis, 'ping', lambda self: True)
|
||||
def test_health_check_usage(get, post, admin_user):
|
||||
|
||||
@@ -4,6 +4,7 @@ import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
ProjectUpdate,
|
||||
@@ -213,9 +214,23 @@ def test_containerized_group_default_fields(instance_group, kube_credential):
|
||||
def test_instance_attach_to_instance_group(post, instance_group, node_type_instance, admin, node_type):
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
|
||||
post(url, {'associate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 2 # the second is an update of the instance group policy
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'associate'
|
||||
assert new_activity.object1 == 'instance_group'
|
||||
assert new_activity.object2 == 'instance'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
@@ -223,18 +238,46 @@ def test_instance_unattach_from_instance_group(post, instance_group, node_type_i
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
instance_group.instances.add(instance)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_group_instance_list', kwargs={'pk': instance_group.pk})
|
||||
post(url, {'disassociate': True, 'id': instance.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 1
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'disassociate'
|
||||
assert new_activity.object1 == 'instance_group'
|
||||
assert new_activity.object2 == 'instance'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
def test_instance_group_attach_to_instance(post, instance_group, node_type_instance, admin, node_type):
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
|
||||
post(url, {'associate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 2 # the second is an update of the instance group policy
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'associate'
|
||||
assert new_activity.object1 == 'instance'
|
||||
assert new_activity.object2 == 'instance_group'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'execution'])
|
||||
@@ -242,5 +285,19 @@ def test_instance_group_unattach_from_instance(post, instance_group, node_type_i
|
||||
instance = node_type_instance(hostname=node_type, node_type=node_type)
|
||||
instance_group.instances.add(instance)
|
||||
|
||||
count = ActivityStream.objects.count()
|
||||
|
||||
url = reverse(f'api:instance_instance_groups_list', kwargs={'pk': instance.pk})
|
||||
post(url, {'disassociate': True, 'id': instance_group.id}, admin, expect=204 if node_type != 'control' else 400)
|
||||
|
||||
new_activity = ActivityStream.objects.all()[count:]
|
||||
if node_type != 'control':
|
||||
assert len(new_activity) == 1
|
||||
new_activity = new_activity[0]
|
||||
assert new_activity.operation == 'disassociate'
|
||||
assert new_activity.object1 == 'instance'
|
||||
assert new_activity.object2 == 'instance_group'
|
||||
assert new_activity.instance.first() == instance
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
@@ -127,7 +127,7 @@ class TestApprovalNodes:
|
||||
]
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_approval_node_approve(self, post, admin_user, job_template):
|
||||
def test_approval_node_approve(self, post, admin_user, job_template, controlplane_instance_group):
|
||||
# This test ensures that a user (with permissions to do so) can APPROVE
|
||||
# workflow approvals. Also asserts that trying to APPROVE approvals
|
||||
# that have already been dealt with will throw an error.
|
||||
@@ -152,7 +152,7 @@ class TestApprovalNodes:
|
||||
post(reverse('api:workflow_approval_approve', kwargs={'pk': approval.pk}), user=admin_user, expect=400)
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_approval_node_deny(self, post, admin_user, job_template):
|
||||
def test_approval_node_deny(self, post, admin_user, job_template, controlplane_instance_group):
|
||||
# This test ensures that a user (with permissions to do so) can DENY
|
||||
# workflow approvals. Also asserts that trying to DENY approvals
|
||||
# that have already been dealt with will throw an error.
|
||||
|
||||
26
awx/main/tests/functional/commands/test_register_queue.py
Normal file
26
awx/main/tests/functional/commands/test_register_queue.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from io import StringIO
|
||||
from contextlib import redirect_stdout
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_openshift_idempotence():
|
||||
def perform_register():
|
||||
with StringIO() as buffer:
|
||||
with redirect_stdout(buffer):
|
||||
RegisterQueue('default', 100, 0, [], is_container_group=True).register()
|
||||
return buffer.getvalue()
|
||||
|
||||
assert '(changed: True)' in perform_register()
|
||||
assert '(changed: True)' not in perform_register()
|
||||
assert '(changed: True)' not in perform_register()
|
||||
|
||||
ig = InstanceGroup.objects.get(name='default')
|
||||
assert ig.policy_instance_percentage == 100
|
||||
assert ig.policy_instance_minimum == 0
|
||||
assert ig.policy_instance_list == []
|
||||
assert ig.is_container_group is True
|
||||
@@ -170,7 +170,7 @@ def test_activity_stream_actor(admin_user):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_annon_user_action():
|
||||
def test_anon_user_action():
|
||||
with mock.patch('awx.main.signals.get_current_user') as u_mock:
|
||||
u_mock.return_value = AnonymousUser()
|
||||
inv = Inventory.objects.create(name='ainventory')
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cleanup_patch(mocker):
|
||||
return mocker.patch('awx.main.signals.handle_removed_image')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_image_unchanged_no_delete_task(cleanup_patch):
|
||||
"""When an irrelevant EE field is changed, we do not run the image cleanup task"""
|
||||
execution_environment = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
execution_environment.description = 'foobar'
|
||||
execution_environment.save()
|
||||
|
||||
cleanup_patch.delay.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_image_changed_creates_delete_task(cleanup_patch):
|
||||
execution_environment = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
execution_environment.image = 'quay.io/new/image'
|
||||
execution_environment.save()
|
||||
|
||||
cleanup_patch.delay.assert_called_once_with(remove_images=['quay.io/foo/bar'])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_image_still_in_use(cleanup_patch):
|
||||
"""When an image is still in use by another EE, we do not clean it up"""
|
||||
ExecutionEnvironment.objects.create(name='unrelated-ee', image='quay.io/foo/bar')
|
||||
execution_environment = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
execution_environment.image = 'quay.io/new/image'
|
||||
execution_environment.save()
|
||||
|
||||
cleanup_patch.delay.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_image_deletion_creates_delete_task(cleanup_patch):
|
||||
execution_environment = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
execution_environment.delete()
|
||||
|
||||
cleanup_patch.delay.assert_called_once_with(remove_images=['quay.io/foo/bar'])
|
||||
@@ -308,7 +308,7 @@ def test_beginning_of_time(job_template):
|
||||
'rrule, tz',
|
||||
[
|
||||
['DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1', 'UTC'],
|
||||
['DTSTART;TZID=America/New_York:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1', 'America/New_York'],
|
||||
['DTSTART;TZID=US/Eastern:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1', 'US/Eastern'],
|
||||
],
|
||||
)
|
||||
def test_timezone_property(job_template, rrule, tz):
|
||||
|
||||
@@ -5,7 +5,7 @@ from collections import namedtuple
|
||||
from unittest import mock # noqa
|
||||
import pytest
|
||||
|
||||
from awx.main.tasks import AWXReceptorJob
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
@@ -3,11 +3,11 @@ from unittest import mock
|
||||
from datetime import timedelta
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.models import InstanceGroup, WorkflowJob
|
||||
from awx.main.tasks import apply_cluster_membership_policies
|
||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multi_group_basic_job_launch(instance_factory, default_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
def test_multi_group_basic_job_launch(instance_factory, controlplane_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
@@ -67,7 +67,7 @@ def test_multi_group_with_shared_dependency(instance_factory, controlplane_insta
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_no_instancegroup(workflow_job_template_factory, default_instance_group, mocker):
|
||||
def test_workflow_job_no_instancegroup(workflow_job_template_factory, controlplane_instance_group, mocker):
|
||||
wfjt = workflow_job_template_factory('anicedayforawalk').workflow_job_template
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt)
|
||||
wfj.status = "pending"
|
||||
@@ -79,9 +79,10 @@ def test_workflow_job_no_instancegroup(workflow_job_template_factory, default_in
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_overcapacity_blocking_other_groups_unaffected(instance_factory, default_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
def test_overcapacity_blocking_other_groups_unaffected(instance_factory, controlplane_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
i1 = instance_factory("i1")
|
||||
i1.capacity = 1000
|
||||
# need to account a little extra for controller node capacity impact
|
||||
i1.capacity = 1020
|
||||
i1.save()
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
@@ -120,7 +121,7 @@ def test_overcapacity_blocking_other_groups_unaffected(instance_factory, default
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_failover_group_run(instance_factory, default_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
def test_failover_group_run(instance_factory, controlplane_instance_group, mocker, instance_group_factory, job_template_factory):
|
||||
i1 = instance_factory("i1")
|
||||
i2 = instance_factory("i2")
|
||||
ig1 = instance_group_factory("ig1", instances=[i1])
|
||||
|
||||
@@ -7,19 +7,20 @@ from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.ha import Instance
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_job_scheduler_launch(default_instance_group, job_template_factory, mocker):
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
def test_single_job_scheduler_launch(hybrid_instance, controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -47,7 +48,7 @@ class TestJobLifeCycle:
|
||||
if expect_commit is not None:
|
||||
assert mock_commit.mock_calls == expect_commit
|
||||
|
||||
def test_task_manager_workflow_rescheduling(self, job_template_factory, inventory, project, default_instance_group):
|
||||
def test_task_manager_workflow_rescheduling(self, job_template_factory, inventory, project, controlplane_instance_group):
|
||||
jt = JobTemplate.objects.create(allow_simultaneous=True, inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
wfjt = WorkflowJobTemplate.objects.create(name='foo')
|
||||
for i in range(2):
|
||||
@@ -80,7 +81,7 @@ class TestJobLifeCycle:
|
||||
# no further action is necessary, so rescheduling should not happen
|
||||
self.run_tm(tm, [mock.call('successful')], [])
|
||||
|
||||
def test_task_manager_workflow_workflow_rescheduling(self):
|
||||
def test_task_manager_workflow_workflow_rescheduling(self, controlplane_instance_group):
|
||||
wfjts = [WorkflowJobTemplate.objects.create(name='foo')]
|
||||
for i in range(5):
|
||||
wfjt = WorkflowJobTemplate.objects.create(name='foo{}'.format(i))
|
||||
@@ -100,22 +101,6 @@ class TestJobLifeCycle:
|
||||
self.run_tm(tm, expect_schedule=[mock.call()])
|
||||
wfjts[0].refresh_from_db()
|
||||
|
||||
@pytest.fixture
|
||||
def control_instance(self):
|
||||
'''Control instance in the controlplane automatic IG'''
|
||||
ig = InstanceGroup.objects.create(name='controlplane')
|
||||
inst = Instance.objects.create(hostname='control-1', node_type='control', capacity=500)
|
||||
ig.instances.add(inst)
|
||||
return inst
|
||||
|
||||
@pytest.fixture
|
||||
def execution_instance(self):
|
||||
'''Execution node in the automatic default IG'''
|
||||
ig = InstanceGroup.objects.create(name='default')
|
||||
inst = Instance.objects.create(hostname='receptor-1', node_type='execution', capacity=500)
|
||||
ig.instances.add(inst)
|
||||
return inst
|
||||
|
||||
def test_control_and_execution_instance(self, project, system_job_template, job_template, inventory_source, control_instance, execution_instance):
|
||||
assert Instance.objects.count() == 2
|
||||
|
||||
@@ -142,10 +127,78 @@ class TestJobLifeCycle:
|
||||
assert uj.capacity_type == 'execution'
|
||||
assert [uj.execution_node, uj.controller_node] == [execution_instance.hostname, control_instance.hostname], uj
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_fails_to_launch_when_no_control_capacity(self, job_template, control_instance_low_capacity, execution_instance):
|
||||
enough_capacity = job_template.create_unified_job()
|
||||
insufficient_capacity = job_template.create_unified_job()
|
||||
all_ujs = [enough_capacity, insufficient_capacity]
|
||||
for uj in all_ujs:
|
||||
uj.signal_start()
|
||||
|
||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
|
||||
for uj in all_ujs:
|
||||
uj.refresh_from_db()
|
||||
assert enough_capacity.status == 'waiting'
|
||||
assert insufficient_capacity.status == 'pending'
|
||||
assert [enough_capacity.execution_node, enough_capacity.controller_node] == [
|
||||
execution_instance.hostname,
|
||||
control_instance_low_capacity.hostname,
|
||||
], enough_capacity
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_hybrid_capacity(self, job_template, hybrid_instance):
|
||||
enough_capacity = job_template.create_unified_job()
|
||||
insufficient_capacity = job_template.create_unified_job()
|
||||
expected_task_impact = enough_capacity.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
all_ujs = [enough_capacity, insufficient_capacity]
|
||||
for uj in all_ujs:
|
||||
uj.signal_start()
|
||||
|
||||
# There is only enough control capacity to run one of the jobs so one should end up in pending and the other in waiting
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
|
||||
for uj in all_ujs:
|
||||
uj.refresh_from_db()
|
||||
assert enough_capacity.status == 'waiting'
|
||||
assert insufficient_capacity.status == 'pending'
|
||||
assert [enough_capacity.execution_node, enough_capacity.controller_node] == [
|
||||
hybrid_instance.hostname,
|
||||
hybrid_instance.hostname,
|
||||
], enough_capacity
|
||||
assert expected_task_impact == hybrid_instance.consumed_capacity
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_update_capacity(self, project, hybrid_instance, instance_group_factory, controlplane_instance_group):
|
||||
pu = project.create_unified_job()
|
||||
instance_group_factory(name='second_ig', instances=[hybrid_instance])
|
||||
expected_task_impact = pu.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
pu.signal_start()
|
||||
|
||||
tm = TaskManager()
|
||||
self.run_tm(tm)
|
||||
|
||||
pu.refresh_from_db()
|
||||
assert pu.status == 'waiting'
|
||||
assert [pu.execution_node, pu.controller_node] == [
|
||||
hybrid_instance.hostname,
|
||||
hybrid_instance.hostname,
|
||||
], pu
|
||||
assert expected_task_impact == hybrid_instance.consumed_capacity
|
||||
# The hybrid node is in both instance groups, but the project update should
|
||||
# always get assigned to the controlplane
|
||||
assert pu.instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME
|
||||
pu.status = 'successful'
|
||||
pu.save()
|
||||
assert hybrid_instance.consumed_capacity == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_jt_multi_job_launch_blocks_last(default_instance_group, job_template_factory, mocker):
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
def test_single_jt_multi_job_launch_blocks_last(controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory(
|
||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
||||
)
|
||||
@@ -157,17 +210,17 @@ def test_single_jt_multi_job_launch_blocks_last(default_instance_group, job_temp
|
||||
j2.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j1, default_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mocker.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j2, default_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_jt_multi_job_launch_allow_simul_allowed(default_instance_group, job_template_factory, mocker):
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
def test_single_jt_multi_job_launch_allow_simul_allowed(controlplane_instance_group, job_template_factory, mocker):
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
objects = job_template_factory(
|
||||
'jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start", "job_should_not_start"]
|
||||
)
|
||||
@@ -184,12 +237,15 @@ def test_single_jt_multi_job_launch_allow_simul_allowed(default_instance_group,
|
||||
j2.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(j1, default_instance_group, [], instance), mock.call(j2, default_instance_group, [], instance)])
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(j1, controlplane_instance_group, [], instance), mock.call(j2, controlplane_instance_group, [], instance)]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multi_jt_capacity_blocking(default_instance_group, job_template_factory, mocker):
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
def test_multi_jt_capacity_blocking(hybrid_instance, job_template_factory, mocker):
|
||||
instance = hybrid_instance
|
||||
controlplane_instance_group = instance.rampart_groups.first()
|
||||
objects1 = job_template_factory('jt1', organization='org1', project='proj1', inventory='inv1', credential='cred1', jobs=["job_should_start"])
|
||||
objects2 = job_template_factory('jt2', organization='org2', project='proj2', inventory='inv2', credential='cred2', jobs=["job_should_not_start"])
|
||||
j1 = objects1.jobs["job_should_start"]
|
||||
@@ -200,15 +256,15 @@ def test_multi_jt_capacity_blocking(default_instance_group, job_template_factory
|
||||
j2.save()
|
||||
tm = TaskManager()
|
||||
with mock.patch('awx.main.models.Job.task_impact', new_callable=mock.PropertyMock) as mock_task_impact:
|
||||
mock_task_impact.return_value = 500
|
||||
mock_task_impact.return_value = 505
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j1, default_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j1, controlplane_instance_group, [], instance)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mock.patch.object(TaskManager, "start_task", wraps=tm.start_task) as mock_job:
|
||||
tm.schedule()
|
||||
mock_job.assert_called_once_with(j2, default_instance_group, [], instance)
|
||||
mock_job.assert_called_once_with(j2, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -240,9 +296,9 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_single_job_dependencies_inventory_update_launch(default_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
def test_single_job_dependencies_inventory_update_launch(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
@@ -260,18 +316,18 @@ def test_single_job_dependencies_inventory_update_launch(default_instance_group,
|
||||
mock_iu.assert_called_once_with(j, ii)
|
||||
iu = [x for x in ii.inventory_updates.all()]
|
||||
assert len(iu) == 1
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], default_instance_group, [j], instance)
|
||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, [j], instance)
|
||||
iu[0].status = "successful"
|
||||
iu[0].save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_dependency_with_already_updated(default_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
def test_job_dependency_with_already_updated(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
instance = default_instance_group.instances.all()[0]
|
||||
instance = controlplane_instance_group.instances.all()[0]
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
@@ -293,7 +349,7 @@ def test_job_dependency_with_already_updated(default_instance_group, job_templat
|
||||
mock_iu.assert_not_called()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, default_instance_group, [], instance)
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -349,10 +405,10 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_not_blocking_project_update(default_instance_group, job_template_factory):
|
||||
def test_job_not_blocking_project_update(controlplane_instance_group, job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
||||
job = objects.jobs["job"]
|
||||
job.instance_group = default_instance_group
|
||||
job.instance_group = controlplane_instance_group
|
||||
job.status = "running"
|
||||
job.save()
|
||||
|
||||
@@ -362,7 +418,7 @@ def test_job_not_blocking_project_update(default_instance_group, job_template_fa
|
||||
|
||||
proj = objects.project
|
||||
project_update = proj.create_project_update()
|
||||
project_update.instance_group = default_instance_group
|
||||
project_update.instance_group = controlplane_instance_group
|
||||
project_update.status = "pending"
|
||||
project_update.save()
|
||||
assert not task_manager.job_blocked_by(project_update)
|
||||
@@ -373,10 +429,10 @@ def test_job_not_blocking_project_update(default_instance_group, job_template_fa
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_not_blocking_inventory_update(default_instance_group, job_template_factory, inventory_source_factory):
|
||||
def test_job_not_blocking_inventory_update(controlplane_instance_group, job_template_factory, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job"])
|
||||
job = objects.jobs["job"]
|
||||
job.instance_group = default_instance_group
|
||||
job.instance_group = controlplane_instance_group
|
||||
job.status = "running"
|
||||
job.save()
|
||||
|
||||
@@ -389,7 +445,7 @@ def test_job_not_blocking_inventory_update(default_instance_group, job_template_
|
||||
inv_source.source = "ec2"
|
||||
inv.inventory_sources.add(inv_source)
|
||||
inventory_update = inv_source.create_inventory_update()
|
||||
inventory_update.instance_group = default_instance_group
|
||||
inventory_update.instance_group = controlplane_instance_group
|
||||
inventory_update.status = "pending"
|
||||
inventory_update.save()
|
||||
|
||||
|
||||
31
awx/main/tests/functional/test_api_generics.py
Normal file
31
awx/main/tests/functional/test_api_generics.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import pytest
|
||||
|
||||
from django.test.utils import override_settings
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_change_400_error_log(caplog, post, admin_user):
|
||||
with override_settings(API_400_ERROR_LOG_FORMAT='Test'):
|
||||
post(url=reverse('api:setting_logging_test'), data={}, user=admin_user, expect=409)
|
||||
assert 'Test' in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bad_400_error_log(caplog, post, admin_user):
|
||||
with override_settings(API_400_ERROR_LOG_FORMAT="Not good {junk}"):
|
||||
post(url=reverse('api:setting_logging_test'), data={}, user=admin_user, expect=409)
|
||||
assert "Unable to format API_400_ERROR_LOG_FORMAT setting, defaulting log message: 'junk'" in caplog.text
|
||||
assert 'status 409 received by user admin attempting to access /api/v2/settings/logging/test/ from 127.0.0.1' in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_custom_400_error_log(caplog, post, admin_user):
|
||||
with override_settings(API_400_ERROR_LOG_FORMAT="{status_code} {error}"):
|
||||
post(url=reverse('api:setting_logging_test'), data={}, user=admin_user, expect=409)
|
||||
assert '409 Logging not enabled' in caplog.text
|
||||
|
||||
|
||||
# The above tests the generation function with a dict/object.
|
||||
# The tower-qa test tests.api.inventories.test_inventory_update.TestInventoryUpdate.test_update_all_inventory_sources_with_nonfunctional_sources tests the function with a list
|
||||
# Someday it would be nice to test the else condition (not a dict/list) but we need to find an API test which will do this. For now it was added just as a catch all
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user