mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 04:28:23 -03:30
Compare commits
490 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67f1ab2237 | ||
|
|
71be8fadcb | ||
|
|
c41becec13 | ||
|
|
6446b627ad | ||
|
|
fcebd188a6 | ||
|
|
65771b7629 | ||
|
|
86a67abbce | ||
|
|
d555093325 | ||
|
|
95a099acc5 | ||
|
|
d1fc2702ec | ||
|
|
734899228b | ||
|
|
87f729c642 | ||
|
|
62fc3994fb | ||
|
|
0d097964be | ||
|
|
9f8b3948e1 | ||
|
|
1ce8240192 | ||
|
|
1bcfc8f28e | ||
|
|
71925de902 | ||
|
|
54057f1c80 | ||
|
|
ae388d943d | ||
|
|
2d310dc4e5 | ||
|
|
fe1a767f4f | ||
|
|
8c6581d80a | ||
|
|
33e445f4f6 | ||
|
|
9bcb60d9e0 | ||
|
|
40109d58c7 | ||
|
|
2ef3f5f9e8 | ||
|
|
389c4a3180 | ||
|
|
bee48671cd | ||
|
|
21f551f48a | ||
|
|
cbb019ed09 | ||
|
|
bf5dfdaba7 | ||
|
|
0f7f8af9b8 | ||
|
|
0237402390 | ||
|
|
84d7fa882d | ||
|
|
cd2fae3471 | ||
|
|
8be64145f9 | ||
|
|
23d28fb4c8 | ||
|
|
aeffd6f393 | ||
|
|
ab6b4bad03 | ||
|
|
769c253ac2 | ||
|
|
8031b3d402 | ||
|
|
bd93ac7edd | ||
|
|
37ff9913d3 | ||
|
|
9cb44a7e52 | ||
|
|
6279295541 | ||
|
|
de17cff39c | ||
|
|
22ca49e673 | ||
|
|
008a4b4d30 | ||
|
|
8d4089c7f3 | ||
|
|
e296d0adad | ||
|
|
df38650aee | ||
|
|
401b30b3ed | ||
|
|
20cc54694c | ||
|
|
e6ec0952fb | ||
|
|
db1dec3a98 | ||
|
|
1853d3850e | ||
|
|
1e57c84383 | ||
|
|
3cf120c6a7 | ||
|
|
fd671ecc9d | ||
|
|
a0d5f1fb03 | ||
|
|
ff882a322b | ||
|
|
b70231f7d0 | ||
|
|
93d1aa0a9d | ||
|
|
c586f8bbc6 | ||
|
|
26912a06d1 | ||
|
|
218a3d333b | ||
|
|
d2013bd416 | ||
|
|
6a3f9690b0 | ||
|
|
d59b6f834c | ||
|
|
cbea36745e | ||
|
|
ae7be525e1 | ||
|
|
5062ce1e61 | ||
|
|
566665ee8c | ||
|
|
96423af160 | ||
|
|
a01bef8d2c | ||
|
|
0522233892 | ||
|
|
63ea6bb5b3 | ||
|
|
c2715d7c29 | ||
|
|
783b744bdb | ||
|
|
f7982a0d64 | ||
|
|
2147ac226e | ||
|
|
6cc22786bc | ||
|
|
861a9f581e | ||
|
|
e57a8183ba | ||
|
|
8a7163ffad | ||
|
|
439b351c95 | ||
|
|
14afab918e | ||
|
|
ef8d4e73ae | ||
|
|
61f483ae32 | ||
|
|
21bed7473d | ||
|
|
31d8ddcf84 | ||
|
|
9419270897 | ||
|
|
f755d93a58 | ||
|
|
05df2ebad2 | ||
|
|
b44442c460 | ||
|
|
989b389ba4 | ||
|
|
5bd4aade0e | ||
|
|
470910b612 | ||
|
|
dbb81551c8 | ||
|
|
f7c5cb2979 | ||
|
|
babd6f0975 | ||
|
|
7bcceb7e98 | ||
|
|
c92619a2dc | ||
|
|
923cc671db | ||
|
|
db105c21e4 | ||
|
|
372aa36207 | ||
|
|
173318764b | ||
|
|
1dd535a859 | ||
|
|
e7d37b26f3 | ||
|
|
f4ef7d6927 | ||
|
|
7cbe112e4e | ||
|
|
c441db2aab | ||
|
|
fb292d9706 | ||
|
|
35a5f93182 | ||
|
|
116dc0c480 | ||
|
|
b87ba1c53d | ||
|
|
59691b71bb | ||
|
|
cc0bb3e401 | ||
|
|
7ef90bd9f4 | ||
|
|
f820c49b82 | ||
|
|
ac62d86f2a | ||
|
|
b9e67e7972 | ||
|
|
48a2ebd48c | ||
|
|
ee13ddd87d | ||
|
|
3fcf7429a3 | ||
|
|
51a8790d56 | ||
|
|
c231e4d05e | ||
|
|
987e5a084d | ||
|
|
70ac7b2920 | ||
|
|
bda335cb19 | ||
|
|
30c060cb27 | ||
|
|
9b0a2b0b76 | ||
|
|
2f82b75748 | ||
|
|
84fcd2ff00 | ||
|
|
3bc0c53e37 | ||
|
|
bc2dbcfce8 | ||
|
|
876edf54a3 | ||
|
|
b31bf8fab1 | ||
|
|
e8b2998578 | ||
|
|
8a92a01652 | ||
|
|
705f86f8cf | ||
|
|
9ab6a6d57e | ||
|
|
791eb4c1e1 | ||
|
|
870ca29388 | ||
|
|
816518cfab | ||
|
|
9e981583a6 | ||
|
|
d6fb8d6cd7 | ||
|
|
7dbf5f7138 | ||
|
|
aaec9487e6 | ||
|
|
96fa881df1 | ||
|
|
b7057fdc3e | ||
|
|
2679c99cad | ||
|
|
ea3a8d4912 | ||
|
|
63d9cd7b57 | ||
|
|
b692bbaa12 | ||
|
|
186af73e5d | ||
|
|
fddf292d47 | ||
|
|
1180634ba7 | ||
|
|
9abdafe101 | ||
|
|
48ebcd5918 | ||
|
|
fe6d0ce9cc | ||
|
|
62dabcae63 | ||
|
|
0b63af8d4d | ||
|
|
b05ebe9623 | ||
|
|
c836fafb61 | ||
|
|
96330f608d | ||
|
|
23aaf5b3ad | ||
|
|
a3e86dcd73 | ||
|
|
81b8028ea2 | ||
|
|
a4bfb032ff | ||
|
|
2704b202bf | ||
|
|
550d9d5e42 | ||
|
|
ab2d05a07d | ||
|
|
4543f6935f | ||
|
|
78d3d6dc94 | ||
|
|
02e7424f51 | ||
|
|
2d6ca4cbb1 | ||
|
|
e244644a1d | ||
|
|
d216457c09 | ||
|
|
20a1da61c0 | ||
|
|
bf7ab1ede7 | ||
|
|
3b6b449545 | ||
|
|
781cf531e6 | ||
|
|
9b7475247c | ||
|
|
44dc7f8d1d | ||
|
|
60eaf9e235 | ||
|
|
f5102ed24d | ||
|
|
309178e4e2 | ||
|
|
76ffdbb993 | ||
|
|
d8037618c8 | ||
|
|
e94e15977c | ||
|
|
f37951249f | ||
|
|
9191079dda | ||
|
|
fdd560747d | ||
|
|
faa5df19ca | ||
|
|
5f9326b131 | ||
|
|
8e389d40b4 | ||
|
|
e62c77e783 | ||
|
|
48b3a43ec2 | ||
|
|
5f783fd5ee | ||
|
|
e112cf93c2 | ||
|
|
d9f26a411e | ||
|
|
ea84e7a491 | ||
|
|
7fab619fed | ||
|
|
699a35b88a | ||
|
|
8095adb945 | ||
|
|
8d36712860 | ||
|
|
0db34d0498 | ||
|
|
7ab254e5e3 | ||
|
|
dd7ab459e2 | ||
|
|
33df2e8aa4 | ||
|
|
39b8fd433b | ||
|
|
c31d74100d | ||
|
|
3af89c1e2b | ||
|
|
1d35bba8c3 | ||
|
|
c3c3e24875 | ||
|
|
ab9c97b158 | ||
|
|
5e700c992d | ||
|
|
b548ad21a9 | ||
|
|
127016d36b | ||
|
|
3d0391173b | ||
|
|
ce560bcd5f | ||
|
|
d553c37d7d | ||
|
|
8a5e89e24b | ||
|
|
8c3e289170 | ||
|
|
9364c8e562 | ||
|
|
5831949ebf | ||
|
|
7fe98a670f | ||
|
|
6f68f3cba6 | ||
|
|
4dc956c76f | ||
|
|
11a56117eb | ||
|
|
10eed6286a | ||
|
|
d36befd9ce | ||
|
|
0c4ddc7f6f | ||
|
|
3ef9679de3 | ||
|
|
d36441489a | ||
|
|
d26c12dd7c | ||
|
|
7fa7ed3658 | ||
|
|
2c68e7a3d2 | ||
|
|
0c9b1c3c79 | ||
|
|
e10b0e513e | ||
|
|
68c66edada | ||
|
|
6eb17e7af7 | ||
|
|
9a24da3098 | ||
|
|
8ed0543b8b | ||
|
|
73a84444d1 | ||
|
|
451767c179 | ||
|
|
8366386126 | ||
|
|
997686a2ea | ||
|
|
f02212b1fe | ||
|
|
2ba68ef5d0 | ||
|
|
2041665880 | ||
|
|
1e6ca01686 | ||
|
|
e15a76e7aa | ||
|
|
64db44acef | ||
|
|
9972389a8d | ||
|
|
e0b1274eee | ||
|
|
973facebba | ||
|
|
df649e2c56 | ||
|
|
a778017efb | ||
|
|
6a9305818e | ||
|
|
2669904c72 | ||
|
|
35529b5eeb | ||
|
|
d55ed8713c | ||
|
|
7973f28bed | ||
|
|
8189964cce | ||
|
|
ee4c901dc7 | ||
|
|
78220cad82 | ||
|
|
40279bc6c0 | ||
|
|
f6fb46d99e | ||
|
|
954b32941e | ||
|
|
48b016802c | ||
|
|
35aa5dd79f | ||
|
|
237402068c | ||
|
|
31dda6e9d6 | ||
|
|
bca6e00e37 | ||
|
|
1c9b4af61d | ||
|
|
eba4a3f1c2 | ||
|
|
0ae9fe3624 | ||
|
|
1b662fcca5 | ||
|
|
cfdba959dd | ||
|
|
78660ad0a2 | ||
|
|
70697869d7 | ||
|
|
10e55108ef | ||
|
|
d4223b8877 | ||
|
|
9537d148d7 | ||
|
|
a133a14b70 | ||
|
|
4ca9e9577b | ||
|
|
44986fad36 | ||
|
|
eb2fca86b6 | ||
|
|
458a1fc035 | ||
|
|
6e87b29e92 | ||
|
|
be1d0c525c | ||
|
|
0787cb4fc2 | ||
|
|
19063a2d90 | ||
|
|
e8e2f820d2 | ||
|
|
aaad634483 | ||
|
|
dfa4127bae | ||
|
|
f3725c714a | ||
|
|
cef3ed01ac | ||
|
|
fc1a3f46f9 | ||
|
|
bfa5feb51b | ||
|
|
4c0813bd69 | ||
|
|
9b0b0f2a5f | ||
|
|
e87c121f8f | ||
|
|
65dfc424bc | ||
|
|
dfea9cc526 | ||
|
|
0d97a0364a | ||
|
|
1da57a4a12 | ||
|
|
b73078e9db | ||
|
|
b17f22cd38 | ||
|
|
7b225057ce | ||
|
|
8242078c06 | ||
|
|
a86740c3c9 | ||
|
|
cbde56549d | ||
|
|
385a94866c | ||
|
|
21972c91dd | ||
|
|
36d3f9afdb | ||
|
|
df2d303ab0 | ||
|
|
05eba350b7 | ||
|
|
1e12e12578 | ||
|
|
bbdab82433 | ||
|
|
f7be6b6423 | ||
|
|
ba358eaa4f | ||
|
|
162e09972f | ||
|
|
2cfccdbe16 | ||
|
|
434fa7b7be | ||
|
|
2f8bdf1eab | ||
|
|
e1705738a1 | ||
|
|
4cfb8fe482 | ||
|
|
d52d2af4b4 | ||
|
|
97fd3832d4 | ||
|
|
3cedd0e0bd | ||
|
|
507b1898ce | ||
|
|
e3fe9010b7 | ||
|
|
2c350b8b90 | ||
|
|
d74e258079 | ||
|
|
b03cabd314 | ||
|
|
6a63af83c0 | ||
|
|
452744b67e | ||
|
|
703a68d4fe | ||
|
|
557893e4b0 | ||
|
|
d7051fb6ce | ||
|
|
867c50da19 | ||
|
|
e8d76ec272 | ||
|
|
c102c61532 | ||
|
|
adb2b0da89 | ||
|
|
3610008699 | ||
|
|
3b44838dde | ||
|
|
0205d7deab | ||
|
|
dd47829bdb | ||
|
|
e7e72d13a9 | ||
|
|
4bbdf1ec8a | ||
|
|
4596df449e | ||
|
|
2b0846e8a2 | ||
|
|
ecbb636ba1 | ||
|
|
e3aed9dad4 | ||
|
|
213983a322 | ||
|
|
2977084787 | ||
|
|
b6362a63cc | ||
|
|
7517ba820b | ||
|
|
29d60844a8 | ||
|
|
41b0607d7e | ||
|
|
13f7166a30 | ||
|
|
0cc9b84ead | ||
|
|
68ee4311bf | ||
|
|
6e6c3f676e | ||
|
|
c67f50831b | ||
|
|
50ef234bd6 | ||
|
|
2bef5ce09b | ||
|
|
a49c4796f4 | ||
|
|
9eab9586e5 | ||
|
|
cd35787a86 | ||
|
|
cbe84ff4f3 | ||
|
|
410f38eccf | ||
|
|
b885fc2d86 | ||
|
|
4c93f5794a | ||
|
|
456bb75dcb | ||
|
|
02fd8b0d20 | ||
|
|
fbe6c80f86 | ||
|
|
3d5f302d10 | ||
|
|
856a2c1734 | ||
|
|
4277b73438 | ||
|
|
2888f9f8d0 | ||
|
|
68221cdcbe | ||
|
|
f50501cc2a | ||
|
|
c84fac65e0 | ||
|
|
d64c457b3d | ||
|
|
1bd5a880dc | ||
|
|
47d5a89f40 | ||
|
|
6060e7e29f | ||
|
|
677187a43e | ||
|
|
972cb82d16 | ||
|
|
3102df0bf6 | ||
|
|
cb63d92bbf | ||
|
|
c43424ed09 | ||
|
|
a0ccc8c925 | ||
|
|
47160f0118 | ||
|
|
44f0609314 | ||
|
|
689a216726 | ||
|
|
4b45148614 | ||
|
|
c84e603ac5 | ||
|
|
c7049e1a0e | ||
|
|
0b4c3e3046 | ||
|
|
8a5fd11506 | ||
|
|
b565038fdf | ||
|
|
526b1e692a | ||
|
|
c93155132a | ||
|
|
ae7960e9d7 | ||
|
|
3a1268de1e | ||
|
|
10042df309 | ||
|
|
2530ada9d7 | ||
|
|
11890f0eee | ||
|
|
5cb3f31df0 | ||
|
|
ac0624236e | ||
|
|
13eb174c9f | ||
|
|
a3e29317c5 | ||
|
|
75d7cb5bca | ||
|
|
9059dce8af | ||
|
|
1676c02611 | ||
|
|
86a888f0d0 | ||
|
|
816652a8e2 | ||
|
|
c1817ab19e | ||
|
|
b2dcc0d7e9 | ||
|
|
ba5361b25e | ||
|
|
ae826ed19d | ||
|
|
e24fc43a45 | ||
|
|
b719e5771c | ||
|
|
778862fe51 | ||
|
|
30d185a67f | ||
|
|
89c2a4c6ed | ||
|
|
868e811b3f | ||
|
|
f6496c28fe | ||
|
|
81cda0ba74 | ||
|
|
2e9974133a | ||
|
|
49051c4aaf | ||
|
|
e2a89ad8a2 | ||
|
|
f4b0bd68bd | ||
|
|
5a304db840 | ||
|
|
e3044298bf | ||
|
|
bbb9770a97 | ||
|
|
4328b4cb67 | ||
|
|
a324753180 | ||
|
|
1462af61b0 | ||
|
|
8478a0f70b | ||
|
|
8288655b30 | ||
|
|
ac8204427e | ||
|
|
f6b8ce18d0 | ||
|
|
dc42946ff3 | ||
|
|
44cc934c2b | ||
|
|
933956eccb | ||
|
|
27dc8caabd | ||
|
|
4b98df237e | ||
|
|
0fa3ca8dc0 | ||
|
|
0712affa9b | ||
|
|
b646aa03f8 | ||
|
|
4beea35d9e | ||
|
|
e8948a9d6e | ||
|
|
28f25d5aba | ||
|
|
7cbb783b2c | ||
|
|
1793f94f27 | ||
|
|
0b7c9cd8ad | ||
|
|
51b5b78084 | ||
|
|
bea924ddc6 | ||
|
|
b5fcc6e541 | ||
|
|
ffb46fec52 | ||
|
|
4190cf126c | ||
|
|
58721098d5 | ||
|
|
cfd6df7a3b | ||
|
|
9f6fa4cf97 | ||
|
|
7822da03fb | ||
|
|
58cb3d5bdc | ||
|
|
a3c97a51be | ||
|
|
202dc00f4c | ||
|
|
309e58b6d7 | ||
|
|
34b20e26fa | ||
|
|
1de2487e8f | ||
|
|
8d95b72527 | ||
|
|
a920c9cc20 | ||
|
|
427f6d1687 | ||
|
|
dc64168ed4 | ||
|
|
4b913a0ae8 | ||
|
|
6c56f2b35b | ||
|
|
be6657239d | ||
|
|
0caf263508 | ||
|
|
c77667788a | ||
|
|
efb01f3c36 | ||
|
|
b562d5cc88 | ||
|
|
dfde30798e |
17
.github/BOTMETA.yml
vendored
17
.github/BOTMETA.yml
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
files:
|
||||
awx/ui/:
|
||||
labels: component:ui
|
||||
maintainers: $team_ui
|
||||
awx/api/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
awx/main/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
installer/:
|
||||
labels: component:installer
|
||||
|
||||
macros:
|
||||
team_api: wwitzel3 matburt chrismeyersfsu cchurch AlanCoding ryanpetrello rooftopcellist
|
||||
team_ui: jlmitch5 jaredevantabor mabashian marshmalien benthomasson jakemcdermott
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
workflows/e2e_test.yml @tiagodread @shanemcd @jakemcdermott
|
||||
26
.github/ISSUE_TEMPLATE.md
vendored
26
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,17 +6,37 @@ practices regarding responsible disclosure, see
|
||||
https://www.ansible.com/security
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!--
|
||||
|
||||
PLEASE DO NOT USE A BLANK TEMPLATE IN THE AWX REPO.
|
||||
This is a legacy template used for internal testing ONLY.
|
||||
|
||||
Any issues opened will this template will be automatically closed.
|
||||
|
||||
Instead use the bug or feature request.
|
||||
|
||||
-->
|
||||
|
||||
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Bug Report
|
||||
- Feature Idea
|
||||
- Documentation
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug, Docs Fix or other nominal change
|
||||
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- Docs
|
||||
- CLI
|
||||
- Other
|
||||
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,13 +1,12 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
description: "🐞 Create a report to help us improve"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues are for **concrete, actionable bugs and feature requests** only. For debugging help or technical support, please use:
|
||||
- The #ansible-awx channel on irc.libera.chat
|
||||
- The awx project mailing list, https://groups.google.com/forum/#!forum/awx-project
|
||||
Bug Report issues are for **concrete, actionable bugs** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
@@ -24,7 +23,7 @@ body:
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Summary
|
||||
label: Bug Summary
|
||||
description: Briefly describe the problem.
|
||||
validations:
|
||||
required: false
|
||||
@@ -45,6 +44,9 @@ body:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
- type: dropdown
|
||||
id: awx-install-method
|
||||
@@ -57,9 +59,8 @@ body:
|
||||
- minikube
|
||||
- openshift
|
||||
- minishift
|
||||
- docker on linux
|
||||
- docker for mac
|
||||
- boot2docker
|
||||
- docker development environment
|
||||
- N/A
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: For debugging help or technical support
|
||||
url: https://github.com/ansible/awx#get-involved
|
||||
about: For general debugging or technical support please see the Get Involved section of our readme.
|
||||
- name: 📝 Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: AWX uses the Ansible Code of Conduct; ❤ Be nice to other members of the community. ☮ Behave.
|
||||
- name: 💼 For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
name: "✨ Feature request"
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||
|
||||
- http://web.libera.chat/?channels=#ansible-awx
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||
|
||||
##### ISSUE TYPE
|
||||
- Feature Idea
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem or desired enhancement. -->
|
||||
42
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
42
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: ✨ Feature request
|
||||
description: Suggest an idea for this project
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Feature Request issues are for **feature requests** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
options:
|
||||
- label: I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
required: true
|
||||
- label: I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Feature Summary
|
||||
description: Briefly describe the desired enhancement.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: components
|
||||
attributes:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: "\U0001F525 Security bug report"
|
||||
about: How to report security vulnerabilities
|
||||
|
||||
---
|
||||
|
||||
For all security related bugs, email security@ansible.com instead of using this issue tracker and you will receive a prompt response.
|
||||
|
||||
For more information on the Ansible community's practices regarding responsible disclosure, see https://www.ansible.com/security
|
||||
9
.github/LABEL_MAP.md
vendored
9
.github/LABEL_MAP.md
vendored
@@ -1,9 +0,0 @@
|
||||
Bug Report: type:bug
|
||||
Bugfix Pull Request: type:bug
|
||||
Feature Request: type:enhancement
|
||||
Feature Pull Request: type:enhancement
|
||||
UI: component:ui
|
||||
API: component:api
|
||||
Installer: component:installer
|
||||
Docs Pull Request: component:docs
|
||||
Documentation: component:docs
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,11 +1,3 @@
|
||||
<!--- changelog-entry
|
||||
# Fill in 'msg' below to have an entry automatically added to the next release changelog.
|
||||
# Leaving 'msg' blank will not generate a changelog entry for this PR.
|
||||
# Please ensure this is a simple (and readable) one-line string.
|
||||
---
|
||||
msg: ""
|
||||
-->
|
||||
|
||||
##### SUMMARY
|
||||
<!--- Describe the change, including rationale and design decisions -->
|
||||
|
||||
@@ -17,15 +9,18 @@ the change does.
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Feature Pull Request
|
||||
- Bugfix Pull Request
|
||||
- Docs Pull Request
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug, Docs Fix or other nominal change
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- CLI
|
||||
- Docs
|
||||
- Other
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
|
||||
19
.github/dependabot.yml
vendored
Normal file
19
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/awx/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 5
|
||||
allow:
|
||||
- dependency-type: "production"
|
||||
reviewers:
|
||||
- "AlexSCorey"
|
||||
- "keithjgrant"
|
||||
- "kialam"
|
||||
- "mabashian"
|
||||
- "marshmalien"
|
||||
labels:
|
||||
- "component:ui"
|
||||
- "dependencies"
|
||||
target-branch: "devel"
|
||||
8
.github/issue_labeler.yml
vendored
8
.github/issue_labeler.yml
vendored
@@ -1,12 +1,16 @@
|
||||
needs_triage:
|
||||
- '.*'
|
||||
"type:bug":
|
||||
- "Please confirm the following"
|
||||
- "Bug Summary"
|
||||
"type:enhancement":
|
||||
- "Feature Idea"
|
||||
- "Feature Summary"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
- "\\[X\\] Docs"
|
||||
"component:awx_collection":
|
||||
- "\\[X\\] Collection"
|
||||
"component:cli":
|
||||
- "\\[X\\] awxkit"
|
||||
|
||||
17
.github/pr_labeler.yml
vendored
17
.github/pr_labeler.yml
vendored
@@ -1,14 +1,19 @@
|
||||
"component:api":
|
||||
- any: ['awx/**/*', '!awx/ui/*']
|
||||
- any: ["awx/**/*", "!awx/ui/**"]
|
||||
|
||||
"component:ui":
|
||||
- any: ['awx/ui/**/*']
|
||||
- any: ["awx/ui/**/*"]
|
||||
|
||||
"component:docs":
|
||||
- any: ['docs/**/*']
|
||||
- any: ["docs/**/*"]
|
||||
|
||||
"component:cli":
|
||||
- any: ['awxkit/**/*']
|
||||
- any: ["awxkit/**/*"]
|
||||
|
||||
"component:collection":
|
||||
- any: ['awx_collection/**/*']
|
||||
"component:awx_collection":
|
||||
- any: ["awx_collection/**/*"]
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
|
||||
114
.github/triage_replies.md
vendored
Normal file
114
.github/triage_replies.md
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
## General
|
||||
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
|
||||
- Hello, we think your question is answered in our FAQ. Does this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
- You can find the latest documentation here: https://docs.ansible.com/automation-controller/latest/html/userguide/index.html
|
||||
|
||||
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
|
||||
### Denied Submission
|
||||
|
||||
- Hi! \
|
||||
\
|
||||
Thanks very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
At this time we do not want to merge this PR. Our reasons for this are: \
|
||||
\
|
||||
(A) INSERT ITEM HERE \
|
||||
\
|
||||
Please know that we are always up for discussion but this project is very active. Because of this, we're unlikely to see comments made on closed PRs, and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it is not always necessary. \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress Issue
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for for this issue. It means a lot to us that you have taken time to contribute by opening this report. \
|
||||
\
|
||||
On this issue, there were comments added but it has been some time since then without response. At this time we are closing this issue. If you get time to address the comments we can reopen the issue if you can contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress PR
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
On this PR, changes were requested but it has been some time since then. We think this PR has merit but without the requested changes we are unable to merge it. At this time we are closing your PR. If you get time to address the changes you are welcome to open another PR or we can reopen this PR upon request if you contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
|
||||
|
||||
## Common
|
||||
|
||||
### Give us more info
|
||||
- Hello, we'd love to help, but we need a little more information about the problem you're having. Screenshots, log outputs, or any reproducers would be very helpful.
|
||||
|
||||
### Code of Conduct
|
||||
- Hello. Please keep in mind that Ansible adheres to a Code of Conduct in its community spaces. The spirit of the code of conduct is to be kind, and this is your friendly reminder to be so. Please see the full code of conduct here if you have questions: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
|
||||
### EE Contents / Community General
|
||||
- Hello. The awx-ee contains the collections and dependencies needed for supported AWX features to function. Anything beyond that (like the community.general package) will require you to build your own EE. For information on how to do that, see https://ansible-builder.readthedocs.io/en/stable/ \
|
||||
\
|
||||
The Ansible Community is looking at building an EE that corresponds to all of the collections inside the ansible package. That may help you if and when it happens; see https://github.com/ansible-community/community-topics/issues/31 for details.
|
||||
|
||||
|
||||
|
||||
## Mailing List Triage
|
||||
|
||||
### Create an issue
|
||||
- Hello, thanks for reaching out on list. We think this merits an issue on our Github, https://github.com/ansible/awx/issues. If you could open an issue up on Github it will get tagged and integrated into our planning and workflow. All future work will be tracked there. Issues should include as much information as possible, including screenshots, log outputs, or any reproducers.
|
||||
|
||||
### Create a Pull Request
|
||||
- Hello, we think your idea is good! Please consider contributing a PR for this following our contributing guidelines: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
|
||||
### Receptor
|
||||
- You can find the receptor docs here: https://receptor.readthedocs.io/en/latest/
|
||||
- Hello, your issue seems related to receptor. Could you please open an issue in the receptor repository? https://github.com/ansible/receptor. Thanks!
|
||||
|
||||
### Ansible Engine not AWX
|
||||
- Hello, your question seems to be about Ansible development, not about AWX. Try asking on the Ansible-devel specific mailing list: https://groups.google.com/g/ansible-devel
|
||||
- Hello, your question seems to be about using Ansible, not about AWX. https://groups.google.com/g/ansible-project is the best place to visit for user questions about Ansible. Thanks!
|
||||
|
||||
### Ansible Galaxy not AWX
|
||||
- Hey there. That sounds like an FAQ question. Did this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
|
||||
### Contributing Guidelines
|
||||
- AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
- AWX-Operator: https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md
|
||||
|
||||
### Oracle AWX
|
||||
We'd be happy to help if you can reproduce this with AWX since we do not have Oracle's Linux Automation Manager. If you need help with this specific version of Oracles Linux Automation Manager you will need to contact your Oracle for support.
|
||||
|
||||
### AWX Release
|
||||
Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb
|
||||
|
||||
- Hi all, \
|
||||
\
|
||||
We're happy to announce that the next release of AWX, version <b>`Xa.Ya.za`</b> is now available! \
|
||||
In addition AWX Operator version <b>`Xb.Yb.zb`</b> has also been released! \
|
||||
\
|
||||
Please see the releases pages for more details: \
|
||||
AWX: https://github.com/ansible/awx/releases/tag/Xa.Ya.za \
|
||||
Operator: https://github.com/ansible/awx-operator/releases/tag/Xb.Yb.zb \
|
||||
\
|
||||
The AWX team.
|
||||
|
||||
## Try latest version
|
||||
- Hello, this issue pertains to an older version of AWX. Try upgrading to the latest version and let us know if that resolves your issue.
|
||||
9
.github/workflows/ci.yml
vendored
9
.github/workflows/ci.yml
vendored
@@ -111,6 +111,15 @@ jobs:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
45
.github/workflows/pr_body_check.yml
vendored
Normal file
45
.github/workflows/pr_body_check.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: PR Check
|
||||
env:
|
||||
BRANCH: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Write PR body to a file
|
||||
run: |
|
||||
cat >> pr.body << __SOME_RANDOM_PR_EOF__
|
||||
${{ github.event.pull_request.body }}
|
||||
__SOME_RANDOM_PR_EOF__
|
||||
|
||||
- name: Display the received body for troubleshooting
|
||||
run: cat pr.body
|
||||
|
||||
# We want to write these out individually just incase the options were joined on a single line
|
||||
- name: Check for each of the lines
|
||||
run: |
|
||||
grep "Bug, Docs Fix or other nominal change" pr.body > Z
|
||||
grep "New or Enhanced Feature" pr.body > Y
|
||||
grep "Breaking Change" pr.body > X
|
||||
exit 0
|
||||
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
||||
shell: bash {0}
|
||||
|
||||
- name: Check for exactly one item
|
||||
run: |
|
||||
if [ $(cat X Y Z | wc -l) != 1 ] ; then
|
||||
echo "The PR body must contain exactly one of [ 'Bug, Docs Fix or other nominal change', 'New or Enhanced Feature', 'Breaking Change' ]"
|
||||
echo "We counted $(cat X Y Z | wc -l)"
|
||||
echo "See the default PR body for examples"
|
||||
exit 255;
|
||||
else
|
||||
exit 0;
|
||||
fi
|
||||
6
.github/workflows/promote.yml
vendored
6
.github/workflows/promote.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python${{ env.py_version }} -m pip install wheel twine
|
||||
python${{ env.py_version }} -m pip install wheel twine setuptools-scm
|
||||
|
||||
- name: Set official collection namespace
|
||||
run: echo collection_namespace=awx >> $GITHUB_ENV
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
run: |
|
||||
COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz
|
||||
@@ -70,4 +70,4 @@ jobs:
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
|
||||
|
||||
|
||||
13
.github/workflows/stage.yml
vendored
13
.github/workflows/stage.yml
vendored
@@ -100,23 +100,10 @@ jobs:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
|
||||
- name: Generate changelog
|
||||
uses: shanemcd/simple-changelog-generator@v1
|
||||
id: changelog
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
|
||||
- name: Write changelog to file
|
||||
run: |
|
||||
cat << 'EOF' > /tmp/awx-changelog
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
EOF
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/stage.yml \
|
||||
-e changelog_path=/tmp/awx-changelog \
|
||||
-e repo=${{ github.repository }} \
|
||||
-e awx_image=ghcr.io/${{ github.repository }} \
|
||||
-e version=${{ github.event.inputs.version }} \
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -38,7 +38,6 @@ awx/ui/build
|
||||
awx/ui/.env.local
|
||||
awx/ui/instrumented
|
||||
rsyslog.pid
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/ansible/awx_dump.sql
|
||||
tools/docker-compose/Dockerfile
|
||||
tools/docker-compose/_build
|
||||
@@ -154,6 +153,9 @@ use_dev_supervisor.txt
|
||||
/sanity/
|
||||
/awx_collection_build/
|
||||
|
||||
# Setup for metrics gathering
|
||||
tools/prometheus/prometheus.yml
|
||||
|
||||
.idea/*
|
||||
*.unison.tmp
|
||||
*.#
|
||||
|
||||
@@ -19,16 +19,17 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
- [Purging containers and images](#purging-containers-and-images)
|
||||
- [Pre commit hooks](#pre-commit-hooks)
|
||||
- [What should I work on?](#what-should-i-work-on)
|
||||
- [Translations](#translations)
|
||||
- [Submitting Pull Requests](#submitting-pull-requests)
|
||||
- [PR Checks run by Zuul](#pr-checks-run-by-zuul)
|
||||
- [Reporting Issues](#reporting-issues)
|
||||
- [Getting Help](#getting-help)
|
||||
|
||||
## Things to know prior to submitting code
|
||||
|
||||
- All code submissions are done through pull requests against the `devel` branch.
|
||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
|
||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see [git push docs](https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt).
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.libera.chat, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
@@ -42,8 +43,7 @@ The AWX development environment workflow and toolchain uses Docker and the docke
|
||||
|
||||
Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases.
|
||||
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
|
||||
respectively.
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows) respectively.
|
||||
|
||||
For Linux platforms, refer to the following from Docker:
|
||||
|
||||
@@ -79,17 +79,13 @@ See the [README.md](./tools/docker-compose/README.md) for docs on how to build t
|
||||
|
||||
### Building API Documentation
|
||||
|
||||
AWX includes support for building [Swagger/OpenAPI
|
||||
documentation](https://swagger.io). To build the documentation locally, run:
|
||||
AWX includes support for building [Swagger/OpenAPI documentation](https://swagger.io). To build the documentation locally, run:
|
||||
|
||||
```bash
|
||||
(container)/awx_devel$ make swagger
|
||||
```
|
||||
|
||||
This will write a file named `swagger.json` that contains the API specification
|
||||
in OpenAPI format. A variety of online tools are available for translating
|
||||
this data into more consumable formats (such as HTML). http://editor.swagger.io
|
||||
is an example of one such service.
|
||||
This will write a file named `swagger.json` that contains the API specification in OpenAPI format. A variety of online tools are available for translating this data into more consumable formats (such as HTML). http://editor.swagger.io is an example of one such service.
|
||||
|
||||
### Accessing the AWX web interface
|
||||
|
||||
@@ -115,20 +111,30 @@ While you can use environment variables to skip the pre-commit hooks GitHub will
|
||||
|
||||
## What should I work on?
|
||||
|
||||
We have a ["good first issue" label](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) we put on some issues that might be a good starting point for new contributors.
|
||||
|
||||
Fixing bugs and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
|
||||
|
||||
For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement).
|
||||
|
||||
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
|
||||
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](./docs/debugging/).
|
||||
**NOTES**
|
||||
|
||||
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
|
||||
|
||||
**NOTE**
|
||||
|
||||
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
**NOTE**
|
||||
|
||||
> If you're planning to develop features or fixes for the UI, please review the [UI Developer doc](./awx/ui/README.md).
|
||||
|
||||
### Translations
|
||||
|
||||
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
|
||||
|
||||
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
|
||||
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch.
|
||||
@@ -152,28 +158,14 @@ We like to keep our commit history clean, and will require resubmission of pull
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
|
||||
|
||||
## PR Checks run by Zuul
|
||||
|
||||
Zuul jobs for awx are defined in the [zuul-jobs](https://github.com/ansible/zuul-jobs) repo.
|
||||
|
||||
Zuul runs the following checks that must pass:
|
||||
|
||||
1. `tox-awx-api-lint`
|
||||
2. `tox-awx-ui-lint`
|
||||
3. `tox-awx-api`
|
||||
4. `tox-awx-ui`
|
||||
5. `tox-awx-swagger`
|
||||
|
||||
Zuul runs the following checks that are non-voting (can not pass but serve to inform PR reviewers):
|
||||
|
||||
1. `tox-awx-detect-schema-change`
|
||||
This check generates the schema and diffs it against a reference copy of the `devel` version of the schema.
|
||||
Reviewers should inspect the `job-output.txt.gz` related to the check if their is a failure (grep for `diff -u -b` to find beginning of diff).
|
||||
If the schema change is expected and makes sense in relation to the changes made by the PR, then you are good to go!
|
||||
If not, the schema changes should be fixed, but this decision must be enforced by reviewers.
|
||||
When your PR is initially submitted the checks will not be run until a maintainer allows them to be. Once a maintainer has done a quick review of your work the PR will have the linter and unit tests run against them via GitHub Actions, and the status reported in the PR.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you require additional assistance, please reach out to us at `#ansible-awx` on irc.libera.chat, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
For extra information on debugging tools, see [Debugging](./docs/debugging/).
|
||||
|
||||
46
Makefile
46
Makefile
@@ -5,8 +5,8 @@ NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) setup.py --version)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) setup.py --version | cut -d . -f 1-3)
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -15,6 +15,12 @@ MAIN_NODE_TYPE ?= hybrid
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
LDAP ?= false
|
||||
# If set to true docker-compose will also start a splunk instance
|
||||
SPLUNK ?= false
|
||||
# If set to true docker-compose will also start a prometheus instance
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
@@ -43,7 +49,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build sdist \
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
@@ -177,7 +183,7 @@ collectstatic:
|
||||
fi; \
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
UWSGI_DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:awx-dispatcher tower-processes:awx-receiver
|
||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -192,12 +198,13 @@ uwsgi: collectstatic
|
||||
--processes=5 \
|
||||
--harakiri=120 --master \
|
||||
--no-orphans \
|
||||
--py-autoreload 1 \
|
||||
--max-requests=1000 \
|
||||
--stats /tmp/stats.socket \
|
||||
--lazy-apps \
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)" \
|
||||
--hook-accepting1="exec: $(UWSGI_DEV_RELOAD_COMMAND)"
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
|
||||
|
||||
awx-autoreload:
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -266,7 +273,7 @@ api-lint:
|
||||
yamllint -s .
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
@@ -285,6 +292,7 @@ COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
@@ -312,7 +320,7 @@ awx_collection_build: $(shell find awx_collection -type f)
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
-e '{"awx_template_version":false}'
|
||||
-e '{"awx_template_version": $(COLLECTION_TEMPLATE_VERSION)}'
|
||||
ansible-galaxy collection build awx_collection_build --force --output-path=awx_collection_build
|
||||
|
||||
build_collection: awx_collection_build
|
||||
@@ -416,21 +424,13 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# Build a pip-installable package into dist/ with a timestamped version number.
|
||||
dev_build:
|
||||
$(PYTHON) setup.py dev_build
|
||||
|
||||
# Build a pip-installable package into dist/ with the release version number.
|
||||
release_build:
|
||||
$(PYTHON) setup.py release_build
|
||||
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
endif
|
||||
$(PYTHON) setup.py $(SDIST_COMMAND)
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
|
||||
sdist: dist/$(SDIST_TAR_FILE)
|
||||
@@ -465,7 +465,10 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP)
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA)
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
@@ -513,7 +516,7 @@ docker-clean:
|
||||
fi
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm tools_awx_db
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -524,9 +527,6 @@ docker-compose-elk: awx/projects docker-compose-sources
|
||||
docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
|
||||
|
||||
@@ -6,9 +6,40 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
__version__ = get_distribution('awx').version
|
||||
def get_version():
|
||||
version_from_file = get_version_from_file()
|
||||
if version_from_file:
|
||||
return version_from_file
|
||||
else:
|
||||
from setuptools_scm import get_version
|
||||
|
||||
version = get_version(root='..', relative_to=__file__)
|
||||
return version
|
||||
|
||||
|
||||
def get_version_from_file():
|
||||
vf = version_file()
|
||||
if vf:
|
||||
with open(vf, 'r') as file:
|
||||
return file.read().strip()
|
||||
|
||||
|
||||
def version_file():
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
version_file = os.path.join(current_dir, '..', 'VERSION')
|
||||
|
||||
if os.path.exists(version_file):
|
||||
return version_file
|
||||
|
||||
|
||||
try:
|
||||
import pkg_resources
|
||||
|
||||
__version__ = pkg_resources.get_distribution('awx').version
|
||||
except pkg_resources.DistributionNotFound:
|
||||
__version__ = get_version()
|
||||
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
@@ -21,7 +52,6 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
@@ -78,9 +108,10 @@ def oauth2_getattribute(self, attr):
|
||||
# Custom method to override
|
||||
# oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__
|
||||
from django.conf import settings
|
||||
from oauth2_provider.settings import DEFAULTS
|
||||
|
||||
val = None
|
||||
if 'migrate' not in sys.argv:
|
||||
if (isinstance(attr, str)) and (attr in DEFAULTS) and (not attr.startswith('_')):
|
||||
# certain Django OAuth Toolkit migrations actually reference
|
||||
# setting lookups for references to model classes (e.g.,
|
||||
# oauth2_settings.REFRESH_TOKEN_MODEL)
|
||||
|
||||
@@ -232,6 +232,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__iexact'):
|
||||
if not isinstance(field, (CharField, TextField)):
|
||||
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
@@ -258,8 +261,8 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
|
||||
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
@@ -398,11 +401,11 @@ class OrderByBackend(BaseFilterBackend):
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
if order_by is None:
|
||||
order_by = self.get_default_ordering(view)
|
||||
default_order_by = self.get_default_ordering(view)
|
||||
# glue the order by and default order by together so that the default is the backup option
|
||||
order_by = list(order_by or []) + list(default_order_by or [])
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
|
||||
# Special handling of the type field for ordering. In this
|
||||
# case, we're not sorting exactly on the type field, but
|
||||
# given the limited number of views with multiple types,
|
||||
|
||||
@@ -638,6 +638,11 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
# attaching/detaching them from the parent.
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
"Override in subclasses to do efficient validation of attaching"
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
"Same as is_valid_relation but called on disassociation"
|
||||
return None
|
||||
|
||||
def get_description_context(self):
|
||||
@@ -722,6 +727,11 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
if not request.user.can_access(self.parent_model, 'unattach', parent, sub, self.relationship, request.data):
|
||||
raise PermissionDenied()
|
||||
|
||||
# Verify that removing the relationship is valid.
|
||||
unattach_errors = self.is_valid_removal(parent, sub)
|
||||
if unattach_errors is not None:
|
||||
return Response(unattach_errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if parent_key:
|
||||
sub.delete()
|
||||
else:
|
||||
|
||||
@@ -29,7 +29,6 @@ from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.text import capfirst
|
||||
from django.utils.timezone import now
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ValidationError, PermissionDenied
|
||||
@@ -113,6 +112,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -1606,7 +1606,6 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
|
||||
class Meta:
|
||||
@@ -1621,14 +1620,6 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except ProjectUpdateEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
@@ -2081,7 +2072,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
|
||||
class Meta:
|
||||
model = InventorySource
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project', 'update_on_project_update') + (
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project') + (
|
||||
'last_update_failed',
|
||||
'last_updated',
|
||||
) # Backwards compatibility.
|
||||
@@ -2144,11 +2135,6 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
|
||||
return value
|
||||
|
||||
def validate_update_on_project_update(self, value):
|
||||
if value and self.instance and self.instance.schedules.exists():
|
||||
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
@@ -2199,7 +2185,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'update_on_project_update']))
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
|
||||
if redundant_scm_fields:
|
||||
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
|
||||
|
||||
@@ -2244,7 +2230,6 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
'source_project_update',
|
||||
'custom_virtualenv',
|
||||
'instance_group',
|
||||
'-controller_node',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -2319,7 +2304,6 @@ class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
|
||||
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', '-controller_node') # field removal undone by UJ serializer
|
||||
|
||||
|
||||
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
|
||||
@@ -2672,6 +2656,13 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
return credential_type
|
||||
|
||||
def validate_inputs(self, inputs):
|
||||
if self.instance and self.instance.credential_type.kind == "vault":
|
||||
if 'vault_id' in inputs and inputs['vault_id'] != self.instance.inputs['vault_id']:
|
||||
raise ValidationError(_('Vault IDs cannot be changed once they have been created.'))
|
||||
|
||||
return inputs
|
||||
|
||||
|
||||
class CredentialSerializerCreate(CredentialSerializer):
|
||||
|
||||
@@ -3106,7 +3097,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
|
||||
class JobDetailSerializer(JobSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
custom_virtualenv = serializers.ReadOnlyField()
|
||||
|
||||
@@ -3122,14 +3112,6 @@ class JobDetailSerializer(JobSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except JobEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class JobCancelSerializer(BaseSerializer):
|
||||
|
||||
@@ -3318,21 +3300,10 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
|
||||
class Meta:
|
||||
model = AdHocCommand
|
||||
fields = ('*', 'host_status_counts')
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except AdHocCommandEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
|
||||
|
||||
@@ -4501,7 +4472,10 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
body = messages[event].get('body', {})
|
||||
if body:
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
rendered_body = (
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
)
|
||||
potential_body = json.loads(rendered_body)
|
||||
if not isinstance(potential_body, dict):
|
||||
error_list.append(
|
||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
@@ -4644,69 +4618,74 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
|
||||
# We reject rrules if:
|
||||
# - DTSTART is not include
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - TZID is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - BYYEARDAY
|
||||
# - BYWEEKNO
|
||||
# - Multiple DTSTART or RRULE elements
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
# - Multiple DTSTART
|
||||
# - At least one of RRULE is not included
|
||||
# - EXDATE or RDATE is included
|
||||
# For any rule in the ruleset:
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
def validate_rrule(self, value):
|
||||
rrule_value = value
|
||||
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RULE\:[^\s]*)", rrule_value)
|
||||
errors = []
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
errors.append(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
errors.append(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
if len(match_multiple_dtstart) > 1:
|
||||
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
|
||||
if not len(match_multiple_rrule):
|
||||
raise serializers.ValidationError(_('RRULE required in rrule.'))
|
||||
if len(match_multiple_rrule) > 1:
|
||||
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
|
||||
if 'interval' not in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
|
||||
if 'secondly' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('SECONDLY is not supported.'))
|
||||
if re.match(multi_by_month_day, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
|
||||
if re.match(multi_by_month, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
|
||||
if re.match(by_day_with_numeric_prefix, rrule_value):
|
||||
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
|
||||
if 'byyearday' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYYEARDAY not supported."))
|
||||
if 'byweekno' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYWEEKNO not supported."))
|
||||
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
|
||||
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
|
||||
errors.append(_('Multiple DTSTART is not supported.'))
|
||||
if "rrule:" not in rrule_value.lower():
|
||||
errors.append(_('One or more rule required in rrule.'))
|
||||
if "exdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('EXDATE not allowed in rrule.'))
|
||||
if "rdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('RDATE not allowed in rrule.'))
|
||||
for a_rule in match_multiple_rrule:
|
||||
if 'interval' not in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('INTERVAL required in rrule'), a_rule))
|
||||
elif 'secondly' in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('SECONDLY is not supported'), a_rule))
|
||||
if re.match(by_day_with_numeric_prefix, a_rule):
|
||||
errors.append("{0}: {1}".format(_("BYDAY with numeric prefix not supported"), a_rule))
|
||||
if 'COUNT' in a_rule and 'UNTIL' in a_rule:
|
||||
errors.append("{0}: {1}".format(_("RRULE may not contain both COUNT and UNTIL"), a_rule))
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", a_rule)
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
errors.append("{0}: {1}".format(_("COUNT > 999 is unsupported"), a_rule))
|
||||
|
||||
try:
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
logger.error(traceback.format_exc())
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
errors.append(_("rrule parsing failed validation: {}").format(e))
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
timezone = serializers.SerializerMethodField()
|
||||
until = serializers.SerializerMethodField()
|
||||
timezone = serializers.SerializerMethodField(
|
||||
help_text=_(
|
||||
'The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field.'
|
||||
),
|
||||
)
|
||||
until = serializers.SerializerMethodField(
|
||||
help_text=_('The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an emptry string will be returned'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Schedule
|
||||
@@ -4760,13 +4739,6 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(
|
||||
_(
|
||||
'Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.'.format(value.source_project.name)
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -4873,7 +4845,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
committed_capacity = serializers.SerializerMethodField()
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.IntegerField(
|
||||
@@ -4922,7 +4893,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
"created",
|
||||
"modified",
|
||||
"capacity",
|
||||
"committed_capacity",
|
||||
"consumed_capacity",
|
||||
"percent_capacity_remaining",
|
||||
"jobs_running",
|
||||
@@ -5003,30 +4973,29 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
return attrs
|
||||
|
||||
def get_capacity_dict(self):
|
||||
def get_ig_mgr(self):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'capacity_map' not in self.context:
|
||||
ig_qs = None
|
||||
if 'task_manager_igs' not in self.context:
|
||||
instance_groups_queryset = None
|
||||
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
|
||||
if self.parent: # Is ListView:
|
||||
ig_qs = self.parent.instance
|
||||
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(qs=ig_qs, tasks=jobs_qs, breakdown=True)
|
||||
return self.context['capacity_map']
|
||||
instance_groups_queryset = self.parent.instance
|
||||
|
||||
instances = TaskManagerInstances(jobs_qs)
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
|
||||
|
||||
self.context['task_manager_igs'] = instance_groups
|
||||
return self.context['task_manager_igs']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['running_capacity']
|
||||
|
||||
def get_committed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['committed_capacity']
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_consumed_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity:
|
||||
return 0.0
|
||||
consumed = self.get_consumed_capacity(obj)
|
||||
if consumed >= obj.capacity:
|
||||
return 0.0
|
||||
else:
|
||||
return float("{0:.2f}".format(((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100))
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
|
||||
|
||||
def get_instances(self, obj):
|
||||
return obj.instances.count()
|
||||
@@ -5038,8 +5007,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
object_association = serializers.SerializerMethodField(help_text=_("When present, shows the field name of the role or relationship that changed."))
|
||||
object_type = serializers.SerializerMethodField(help_text=_("When present, shows the model on which the role or relationship was defined."))
|
||||
|
||||
@cached_property
|
||||
def _local_summarizable_fk_fields(self):
|
||||
def _local_summarizable_fk_fields(self, obj):
|
||||
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
|
||||
# Special requests
|
||||
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
|
||||
@@ -5059,7 +5027,13 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||
('instance', ('id', 'hostname')),
|
||||
]
|
||||
return field_list
|
||||
# Optimization - do not attempt to summarize all fields, pair down to only relations that exist
|
||||
if not obj:
|
||||
return field_list
|
||||
existing_association_types = [obj.object1, obj.object2]
|
||||
if 'user' in existing_association_types:
|
||||
existing_association_types.append('role')
|
||||
return [entry for entry in field_list if entry[0] in existing_association_types]
|
||||
|
||||
class Meta:
|
||||
model = ActivityStream
|
||||
@@ -5143,7 +5117,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
data = {}
|
||||
if obj.actor is not None:
|
||||
data['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
|
||||
for fk, __ in self._local_summarizable_fk_fields:
|
||||
for fk, __ in self._local_summarizable_fk_fields(obj):
|
||||
if not hasattr(obj, fk):
|
||||
continue
|
||||
m2m_list = self._get_related_objects(obj, fk)
|
||||
@@ -5200,7 +5174,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = OrderedDict()
|
||||
for fk, related_fields in self._local_summarizable_fk_fields:
|
||||
for fk, related_fields in self._local_summarizable_fk_fields(obj):
|
||||
try:
|
||||
if not hasattr(obj, fk):
|
||||
continue
|
||||
|
||||
102
awx/api/templates/api/job_job_events_children_summary.md
Normal file
102
awx/api/templates/api/job_job_events_children_summary.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# View a summary of children events
|
||||
|
||||
Special view to facilitate processing job output in the UI.
|
||||
In order to collapse events and their children, the UI needs to know how
|
||||
many children exist for a given event.
|
||||
The UI also needs to know the order of the event (0 based index), which
|
||||
usually matches the counter, but not always.
|
||||
This view returns a JSON object where the key is the event counter, and the value
|
||||
includes the number of children (and grandchildren) events.
|
||||
Only events with children are included in the output.
|
||||
|
||||
## Example
|
||||
|
||||
e.g. Demo Job Template job
|
||||
tuple(event counter, uuid, parent_uuid)
|
||||
|
||||
```
|
||||
(1, '4598d19e-93b4-4e33-a0ae-b387a7348964', '')
|
||||
(2, 'aae0d189-e3cb-102a-9f00-000000000006', '4598d19e-93b4-4e33-a0ae-b387a7348964')
|
||||
(3, 'aae0d189-e3cb-102a-9f00-00000000000c', 'aae0d189-e3cb-102a-9f00-000000000006')
|
||||
(4, 'f4194f14-e406-4124-8519-0fdb08b18f4b', 'aae0d189-e3cb-102a-9f00-00000000000c')
|
||||
(5, '39f7ad99-dbf3-41e0-93f8-9999db4004f2', 'aae0d189-e3cb-102a-9f00-00000000000c')
|
||||
(6, 'aae0d189-e3cb-102a-9f00-000000000008', 'aae0d189-e3cb-102a-9f00-000000000006')
|
||||
(7, '39a49992-5ca4-4b6c-b178-e56d0b0333da', 'aae0d189-e3cb-102a-9f00-000000000008')
|
||||
(8, '504f3b28-3ea8-4f6f-bd82-60cf8e807cc0', 'aae0d189-e3cb-102a-9f00-000000000008')
|
||||
(9, 'a242be54-ebe6-4021-afab-f2878bff2e9f', '4598d19e-93b4-4e33-a0ae-b387a7348964')
|
||||
```
|
||||
|
||||
output
|
||||
|
||||
```
|
||||
{
|
||||
"1": {
|
||||
"rowNumber": 0,
|
||||
"numChildren": 8
|
||||
},
|
||||
"2": {
|
||||
"rowNumber": 1,
|
||||
"numChildren": 6
|
||||
},
|
||||
"3": {
|
||||
"rowNumber": 2,
|
||||
"numChildren": 2
|
||||
},
|
||||
"6": {
|
||||
"rowNumber": 5,
|
||||
"numChildren": 2
|
||||
}
|
||||
}
|
||||
"meta_event_nested_parent_uuid": {}
|
||||
}
|
||||
```
|
||||
|
||||
counter 1 is event 0, and has 8 children
|
||||
counter 2 is event 1, and has 6 children
|
||||
etc.
|
||||
|
||||
The UI also needs to be able to collapse over "meta" events -- events that
|
||||
show up due to verbosity or warnings from the system while the play is running.
|
||||
These events have a 0 level event, with no parent uuid.
|
||||
|
||||
```
|
||||
playbook_on_start
|
||||
verbose
|
||||
playbook_on_play_start
|
||||
playbook_on_task_start
|
||||
runner_on_start <- level 3
|
||||
verbose <- jump to level 0
|
||||
verbose
|
||||
runner_on_ok <- jump back to level 3
|
||||
playbook_on_task_start
|
||||
runner_on_start
|
||||
runner_on_ok
|
||||
verbose
|
||||
verbose
|
||||
playbook_on_stats
|
||||
```
|
||||
|
||||
These verbose statements that fall in the middle of a series of children events
|
||||
are problematic for the UI.
|
||||
To help, this view will attempt to place the events into the hierarchy, without
|
||||
the event level jumps.
|
||||
|
||||
```
|
||||
playbook_on_start
|
||||
verbose
|
||||
playbook_on_play_start
|
||||
playbook_on_task_start
|
||||
runner_on_start <- A
|
||||
verbose <- this maps to the uuid of A
|
||||
verbose
|
||||
runner_on_ok
|
||||
playbook_on_task_start <- B
|
||||
runner_on_start
|
||||
runner_on_ok
|
||||
verbose <- this maps to the uuid of B
|
||||
verbose
|
||||
playbook_on_stats
|
||||
```
|
||||
|
||||
The output will include a JSON object where the key is the event counter,
|
||||
and the value is the assigned nested uuid.
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
JobRelaunch,
|
||||
JobCreateSchedule,
|
||||
JobJobHostSummariesList,
|
||||
JobJobEventsChildrenSummary,
|
||||
JobJobEventsList,
|
||||
JobActivityStreamList,
|
||||
JobStdout,
|
||||
@@ -27,6 +28,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_host_summaries/$', JobJobHostSummariesList.as_view(), name='job_job_host_summaries_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_events/$', JobJobEventsList.as_view(), name='job_job_events_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_events/children_summary/$', JobJobEventsChildrenSummary.as_view(), name='job_job_events_children_summary'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', JobActivityStreamList.as_view(), name='job_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
|
||||
|
||||
@@ -115,7 +115,6 @@ from awx.api.metadata import RoleMetadata
|
||||
from awx.main.constants import ACTIVE_STATES, SURVEY_TYPE_MAPPING
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.api.views.mixin import (
|
||||
ControlledByScmMixin,
|
||||
InstanceGroupMembershipMixin,
|
||||
OrganizationCountsMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
@@ -365,6 +364,7 @@ class InstanceList(ListAPIView):
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
search_fields = ('hostname',)
|
||||
ordering = ('id',)
|
||||
|
||||
|
||||
class InstanceDetail(RetrieveUpdateAPIView):
|
||||
@@ -409,7 +409,15 @@ class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAtta
|
||||
if parent.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {parent.hostname}.")}
|
||||
if parent.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {parent.hostname}.")}
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node : {parent.hostname}.")}
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
res = self.is_valid_relation(parent, sub)
|
||||
if res:
|
||||
return res
|
||||
if sub.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME and parent.node_type == 'hybrid':
|
||||
return {'msg': _(f"Cannot disassociate hybrid instance {parent.hostname} from {sub.name}.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -511,7 +519,15 @@ class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetac
|
||||
if sub.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {sub.hostname}.")}
|
||||
if sub.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {sub.hostname}.")}
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node : {sub.hostname}.")}
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
res = self.is_valid_relation(parent, sub)
|
||||
if res:
|
||||
return res
|
||||
if sub.node_type == 'hybrid' and parent.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
return {'msg': _(f"Cannot disassociate hybrid node {sub.hostname} from {parent.name}.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -520,6 +536,7 @@ class ScheduleList(ListCreateAPIView):
|
||||
name = _("Schedules")
|
||||
model = models.Schedule
|
||||
serializer_class = serializers.ScheduleSerializer
|
||||
ordering = ('id',)
|
||||
|
||||
|
||||
class ScheduleDetail(RetrieveUpdateDestroyAPIView):
|
||||
@@ -560,8 +577,7 @@ class ScheduleZoneInfo(APIView):
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def get(self, request):
|
||||
zones = [{'name': zone} for zone in models.Schedule.get_zoneinfo()]
|
||||
return Response(zones)
|
||||
return Response({'zones': models.Schedule.get_zoneinfo(), 'links': models.Schedule.get_zoneinfo_links()})
|
||||
|
||||
|
||||
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
@@ -1658,7 +1674,7 @@ class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
return Response(dict(error=_(str(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1692,7 +1708,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
return qs
|
||||
|
||||
|
||||
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
'''the list of groups a host is directly a member of'''
|
||||
|
||||
model = models.Group
|
||||
@@ -1808,7 +1824,7 @@ class EnforceParentRelationshipMixin(object):
|
||||
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class GroupChildrenList(ControlledByScmMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupChildrenList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -1854,7 +1870,7 @@ class GroupPotentialChildrenList(SubListAPIView):
|
||||
return qs.exclude(pk__in=except_pks)
|
||||
|
||||
|
||||
class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
'''the list of hosts directly below a group'''
|
||||
|
||||
model = models.Host
|
||||
@@ -1918,7 +1934,7 @@ class GroupActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all()))
|
||||
|
||||
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -3826,6 +3842,112 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||
|
||||
|
||||
class JobJobEventsChildrenSummary(APIView):
|
||||
|
||||
renderer_classes = [JSONRenderer]
|
||||
meta_events = ('debug', 'verbose', 'warning', 'error', 'system_warning', 'deprecated')
|
||||
|
||||
def get(self, request, **kwargs):
|
||||
resp = dict(children_summary={}, meta_event_nested_uuid={}, event_processing_finished=False, is_tree=True)
|
||||
job = get_object_or_404(models.Job, pk=kwargs['pk'])
|
||||
if not job.event_processing_finished:
|
||||
return Response(resp)
|
||||
else:
|
||||
resp["event_processing_finished"] = True
|
||||
|
||||
events = list(job.get_event_queryset().values('counter', 'uuid', 'parent_uuid', 'event').order_by('counter'))
|
||||
if len(events) == 0:
|
||||
return Response(resp)
|
||||
|
||||
# key is counter, value is number of total children (including children of children, etc.)
|
||||
map_counter_children_tally = {i['counter']: {"rowNumber": 0, "numChildren": 0} for i in events}
|
||||
# key is uuid, value is counter
|
||||
map_uuid_counter = {i['uuid']: i['counter'] for i in events}
|
||||
# key is uuid, value is parent uuid. Used as a quick lookup
|
||||
map_uuid_puuid = {i['uuid']: i['parent_uuid'] for i in events}
|
||||
# key is counter of meta events (i.e. verbose), value is uuid of the assigned parent
|
||||
map_meta_counter_nested_uuid = {}
|
||||
|
||||
# collapsable tree view in the UI only makes sense for tree-like
|
||||
# hierarchy. If ansible is ran with a strategy like free or host_pinned, then
|
||||
# events can be out of sequential order, and no longer follow a tree structure
|
||||
# E1
|
||||
# E2
|
||||
# E3
|
||||
# E4 <- parent is E3
|
||||
# E5 <- parent is E1
|
||||
# in the above, there is no clear way to collapse E1, because E5 comes after
|
||||
# E3, which occurs after E1. Thus the tree view should be disabled.
|
||||
|
||||
# mark the last seen uuid at a given level (0-3)
|
||||
# if a parent uuid is not in this list, then we know the events are not tree-like
|
||||
# and return a response with is_tree: False
|
||||
level_current_uuid = [None, None, None, None]
|
||||
|
||||
prev_non_meta_event = events[0]
|
||||
for i, e in enumerate(events):
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
prev_non_meta_event = e
|
||||
if not e['uuid']:
|
||||
continue
|
||||
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
level = models.JobEvent.LEVEL_FOR_EVENT[e['event']]
|
||||
level_current_uuid[level] = e['uuid']
|
||||
# if setting level 1, for example, set levels 2 and 3 back to None
|
||||
for u in range(level + 1, len(level_current_uuid)):
|
||||
level_current_uuid[u] = None
|
||||
|
||||
puuid = e['parent_uuid']
|
||||
if puuid and puuid not in level_current_uuid:
|
||||
# improper tree detected, so bail out early
|
||||
resp['is_tree'] = False
|
||||
return Response(resp)
|
||||
|
||||
# if event is verbose (or debug, etc), we need to "assign" it a
|
||||
# parent. This code looks at the event level of the previous
|
||||
# non-verbose event, and the level of the next (by looking ahead)
|
||||
# non-verbose event. The verbose event is assigned the same parent
|
||||
# uuid of the higher level event.
|
||||
# e.g.
|
||||
# E1
|
||||
# E2
|
||||
# verbose
|
||||
# verbose <- we are on this event currently
|
||||
# E4
|
||||
# We'll compare E2 and E4, and the verbose event
|
||||
# will be assigned the parent uuid of E4 (higher event level)
|
||||
if e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
event_level_before = models.JobEvent.LEVEL_FOR_EVENT[prev_non_meta_event['event']]
|
||||
# find next non meta event
|
||||
z = i
|
||||
next_non_meta_event = events[-1]
|
||||
while z < len(events):
|
||||
if events[z]['event'] not in JobJobEventsChildrenSummary.meta_events:
|
||||
next_non_meta_event = events[z]
|
||||
break
|
||||
z += 1
|
||||
event_level_after = models.JobEvent.LEVEL_FOR_EVENT[next_non_meta_event['event']]
|
||||
if event_level_after and event_level_after > event_level_before:
|
||||
puuid = next_non_meta_event['parent_uuid']
|
||||
else:
|
||||
puuid = prev_non_meta_event['parent_uuid']
|
||||
if puuid:
|
||||
map_meta_counter_nested_uuid[e['counter']] = puuid
|
||||
map_counter_children_tally[e['counter']]['rowNumber'] = i
|
||||
if not puuid:
|
||||
continue
|
||||
# now traverse up the parent, grandparent, etc. events and tally those
|
||||
while puuid:
|
||||
map_counter_children_tally[map_uuid_counter[puuid]]['numChildren'] += 1
|
||||
puuid = map_uuid_puuid.get(puuid, None)
|
||||
|
||||
# create new dictionary, dropping events with 0 children
|
||||
resp["children_summary"] = {k: v for k, v in map_counter_children_tally.items() if v['numChildren'] != 0}
|
||||
resp["meta_event_nested_uuid"] = map_meta_counter_nested_uuid
|
||||
return Response(resp)
|
||||
|
||||
|
||||
class AdHocCommandList(ListCreateAPIView):
|
||||
|
||||
model = models.AdHocCommand
|
||||
|
||||
@@ -41,7 +41,7 @@ from awx.api.serializers import (
|
||||
JobTemplateSerializer,
|
||||
LabelSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
|
||||
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
@@ -75,7 +75,7 @@ class InventoryList(ListCreateAPIView):
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
@@ -10,13 +10,12 @@ from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400, parse_yaml_or_json
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
@@ -68,49 +67,27 @@ class InstanceGroupMembershipMixin(object):
|
||||
membership.
|
||||
"""
|
||||
|
||||
def attach_validate(self, request):
|
||||
parent = self.get_parent_object()
|
||||
sub_id, res = super().attach_validate(request)
|
||||
if res: # handle an error
|
||||
return sub_id, res
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(parent, sub)
|
||||
if attach_errors:
|
||||
return sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
return sub_id, res
|
||||
|
||||
def attach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
instance_groups_queryset = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
ig_obj = get_object_or_400(instance_groups_queryset, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {self.lookup_field: self.kwargs.get(self.lookup_field, None)}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
ig_obj = get_object_or_404(instance_groups_queryset, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def unattach_validate(self, request):
|
||||
parent = self.get_parent_object()
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(parent, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
if status.is_success(response.status_code):
|
||||
@@ -120,13 +97,13 @@ class InstanceGroupMembershipMixin(object):
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
instance_groups_queryset = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
ig_obj = get_object_or_400(instance_groups_queryset, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {self.lookup_field: self.kwargs.get(self.lookup_field, None)}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
ig_obj = get_object_or_404(instance_groups_queryset, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
@@ -208,35 +185,6 @@ class OrganizationCountsMixin(object):
|
||||
return full_context
|
||||
|
||||
|
||||
class ControlledByScmMixin(object):
|
||||
"""
|
||||
Special method to reset SCM inventory commit hash
|
||||
if anything that it manages changes.
|
||||
"""
|
||||
|
||||
def _reset_inv_src_rev(self, obj):
|
||||
if self.request.method in SAFE_METHODS or not obj:
|
||||
return
|
||||
project_following_sources = obj.inventory_sources.filter(update_on_project_update=True, source='scm')
|
||||
if project_following_sources:
|
||||
# Allow inventory changes unrelated to variables
|
||||
if self.model == Inventory and (
|
||||
not self.request or not self.request.data or parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)
|
||||
):
|
||||
return
|
||||
project_following_sources.update(scm_last_revision='')
|
||||
|
||||
def get_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
def get_parent_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_parent_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class NoTruncateMixin(object):
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
|
||||
@@ -204,7 +204,7 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||
return h.hexdigest()
|
||||
|
||||
def get_event_status_api(self):
|
||||
if self.get_event_type() != 'Merge Request Hook':
|
||||
if self.get_event_type() not in self.ref_keys.keys():
|
||||
return
|
||||
project = self.request.data.get('project', {})
|
||||
repo_url = project.get('web_url')
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
@@ -31,7 +30,7 @@ from awx.conf.models import Setting
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
SETTING_MEMORY_TTL = 5 if 'callback_receiver' in ' '.join(sys.argv) else 0
|
||||
SETTING_MEMORY_TTL = 5
|
||||
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
@@ -81,17 +80,16 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
yield
|
||||
except DBError as exc:
|
||||
if trans_safe:
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
level = logger.exception
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
level = logger.exception
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
@@ -235,6 +233,8 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.__dict__['_awx_conf_init_readonly'] = False
|
||||
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
|
||||
self.__dict__['registry'] = registry
|
||||
self.__dict__['_awx_conf_memoizedcache'] = cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL)
|
||||
self.__dict__['_awx_conf_memoizedcache_lock'] = threading.Lock()
|
||||
|
||||
# record the current pid so we compare it post-fork for
|
||||
# processes like the dispatcher and callback receiver
|
||||
@@ -397,12 +397,20 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def SETTINGS_MODULE(self):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
@cachetools.cached(cache=cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL))
|
||||
@cachetools.cachedmethod(
|
||||
cache=lambda self: self.__dict__['_awx_conf_memoizedcache'],
|
||||
key=lambda *args, **kwargs: SettingsWrapper.hashkey(*args, **kwargs),
|
||||
lock=lambda self: self.__dict__['_awx_conf_memoizedcache_lock'],
|
||||
)
|
||||
def _get_local_with_cache(self, name):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
value = self._get_local_with_cache(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
@@ -476,6 +484,23 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
return set_locally or set_on_default
|
||||
|
||||
@classmethod
|
||||
def hashkey(cls, *args, **kwargs):
|
||||
"""
|
||||
Usage of @cachetools.cached has changed to @cachetools.cachedmethod
|
||||
The previous cachetools decorator called the hash function and passed in (self, key).
|
||||
The new cachtools decorator calls the hash function with just (key).
|
||||
Ideally, we would continue to pass self, however, the cachetools decorator interface
|
||||
does not allow us to.
|
||||
|
||||
This hashkey function is to maintain that the key generated looks like
|
||||
('<SettingsWrapper>', key). The thought is that maybe it is important to namespace
|
||||
our cache to the SettingsWrapper scope in case some other usage of this cache exists.
|
||||
I can not think of how any other system could and would use our private cache, but
|
||||
for safety sake we are ensuring the key schema does not change.
|
||||
"""
|
||||
return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
|
||||
|
||||
|
||||
def __getattr_without_cache__(self, name):
|
||||
# Django 1.10 added an optimization to settings lookup:
|
||||
|
||||
@@ -28,6 +28,9 @@ def handle_setting_change(key, for_delete=False):
|
||||
cache_keys = {Setting.get_cache_key(k) for k in setting_keys}
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
# if we have changed a setting, we want to avoid mucking with the in-memory cache entirely
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
# Send setting_changed signal with new value for each setting.
|
||||
for setting_key in setting_keys:
|
||||
setting_changed.send(sender=Setting, setting=setting_key, value=getattr(settings, setting_key, None), enter=not bool(for_delete))
|
||||
|
||||
@@ -8,6 +8,8 @@ import codecs
|
||||
from uuid import uuid4
|
||||
import time
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
@@ -299,3 +301,33 @@ def test_readonly_sensitive_cache_data_is_encrypted(settings):
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_only_for_registered_settings(settings):
|
||||
"Test that we only make use of the in-memory TTL cache for registered settings"
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.MIDDLEWARE
|
||||
assert len(settings._awx_conf_memoizedcache) == 0 # does not cache MIDDLEWARE
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR'] # because it is cached_property
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
assert len(settings._awx_conf_memoizedcache) == 1 # caches registered settings
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_works(settings):
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR']
|
||||
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('awx.conf.settings.SettingsWrapper._get_local', return_value='DEFAULT') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_called_once_with('AWX_VAR')
|
||||
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -8,8 +11,7 @@ msgstr ""
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: es \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Language: \n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
@@ -21,9 +23,7 @@ msgstr "Tiempo de inactividad fuerza desconexión"
|
||||
msgid ""
|
||||
"Number of seconds that a user is inactive before they will need to login "
|
||||
"again."
|
||||
msgstr ""
|
||||
"Número de segundos que un usuario es inactivo antes de que ellos vuelvan a "
|
||||
"conectarse de nuevo."
|
||||
msgstr "Número de segundos que un usuario es inactivo antes de que ellos vuelvan a conectarse de nuevo."
|
||||
|
||||
#: awx/api/conf.py:21 awx/api/conf.py:31 awx/api/conf.py:42 awx/api/conf.py:50
|
||||
#: awx/api/conf.py:70 awx/api/conf.py:85 awx/api/conf.py:96 awx/sso/conf.py:105
|
||||
@@ -45,9 +45,7 @@ msgstr "Número máximo de sesiones activas en simultáneo"
|
||||
msgid ""
|
||||
"Maximum number of simultaneous logged in sessions a user may have. To "
|
||||
"disable enter -1."
|
||||
msgstr ""
|
||||
"Número máximo de sesiones activas en simultáneo que un usuario puede tener. "
|
||||
"Para deshabilitar, introduzca -1."
|
||||
msgstr "Número máximo de sesiones activas en simultáneo que un usuario puede tener. Para deshabilitar, introduzca -1."
|
||||
|
||||
#: awx/api/conf.py:37
|
||||
msgid "Disable the built-in authentication system"
|
||||
@@ -58,10 +56,7 @@ msgid ""
|
||||
"Controls whether users are prevented from using the built-in authentication "
|
||||
"system. You probably want to do this if you are using an LDAP or SAML "
|
||||
"integration."
|
||||
msgstr ""
|
||||
"Controla si se impide que los usuarios utilicen el sistema de autenticación "
|
||||
"integrado. Probablemente desee hacer esto si está usando una integración de "
|
||||
"LDAP o SAML."
|
||||
msgstr "Controla si se impide que los usuarios utilicen el sistema de autenticación integrado. Probablemente desee hacer esto si está usando una integración de LDAP o SAML."
|
||||
|
||||
#: awx/api/conf.py:48
|
||||
msgid "Enable HTTP Basic Auth"
|
||||
@@ -83,13 +78,7 @@ msgid ""
|
||||
"authorization codes in the number of seconds, and "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after "
|
||||
"expired access tokens, in the number of seconds."
|
||||
msgstr ""
|
||||
"Diccionario para personalizar los tiempos de espera de OAuth2; los elementos "
|
||||
"disponibles son `ACCESS_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de "
|
||||
"acceso en cantidad de segundos; `AUTHORIZATION_CODE_EXPIRE_SECONDS`, la "
|
||||
"duración de los códigos de autorización en cantidad de segundos; y "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de actualización, "
|
||||
"después de los tokens de acceso expirados, en cantidad de segundos."
|
||||
msgstr "Diccionario para personalizar los tiempos de espera de OAuth2; los elementos disponibles son `ACCESS_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de acceso en cantidad de segundos; `AUTHORIZATION_CODE_EXPIRE_SECONDS`, la duración de los códigos de autorización en cantidad de segundos; y `REFRESH_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de actualización, después de los tokens de acceso expirados, en cantidad de segundos."
|
||||
|
||||
#: awx/api/conf.py:78
|
||||
msgid "Allow External Users to Create OAuth2 Tokens"
|
||||
@@ -101,11 +90,7 @@ msgid ""
|
||||
"Radius, and others) are not allowed to create OAuth2 tokens. To change this "
|
||||
"behavior, enable this setting. Existing tokens will not be deleted when this "
|
||||
"setting is toggled off."
|
||||
msgstr ""
|
||||
"Por motivos de seguridad, los usuarios de proveedores de autenticación "
|
||||
"externos (LDAP, SAML, SSO, Radius y otros) no tienen permitido crear tokens "
|
||||
"de OAuth2. Habilite este ajuste para cambiar este comportamiento. Los tokens "
|
||||
"existentes no se eliminarán cuando se desactive este ajuste."
|
||||
msgstr "Por motivos de seguridad, los usuarios de proveedores de autenticación externos (LDAP, SAML, SSO, Radius y otros) no tienen permitido crear tokens de OAuth2. Habilite este ajuste para cambiar este comportamiento. Los tokens existentes no se eliminarán cuando se desactive este ajuste."
|
||||
|
||||
#: awx/api/conf.py:94
|
||||
msgid "Login redirect override URL"
|
||||
@@ -115,10 +100,7 @@ msgstr "URL de invalidación de redireccionamiento de inicio de sesión"
|
||||
msgid ""
|
||||
"URL to which unauthorized users will be redirected to log in. If blank, "
|
||||
"users will be sent to the login page."
|
||||
msgstr ""
|
||||
"URL a la que los usuarios no autorizados serán redirigidos para iniciar "
|
||||
"sesión. Si está en blanco, los usuarios serán enviados a la página de inicio "
|
||||
"de sesión."
|
||||
msgstr "URL a la que los usuarios no autorizados serán redirigidos para iniciar sesión. Si está en blanco, los usuarios serán enviados a la página de inicio de sesión."
|
||||
|
||||
#: awx/api/conf.py:114
|
||||
msgid "There are no remote authentication systems configured."
|
||||
@@ -167,17 +149,13 @@ msgstr "ID{field_name} no válido: {field_id}"
|
||||
msgid ""
|
||||
"Cannot apply role_level filter to this list because its model does not use "
|
||||
"roles for access control."
|
||||
msgstr ""
|
||||
"No se puede aplicar el filtro role_level a esta lista debido a que su modelo "
|
||||
"no usa roles para el control de acceso."
|
||||
msgstr "No se puede aplicar el filtro role_level a esta lista debido a que su modelo no usa roles para el control de acceso."
|
||||
|
||||
#: awx/api/generics.py:179
|
||||
msgid ""
|
||||
"You did not use correct Content-Type in your HTTP request. If you are using "
|
||||
"our REST API, the Content-Type must be application/json"
|
||||
msgstr ""
|
||||
"No utilizó el Tipo de contenido correcto en su solicitud HTTP. Si está "
|
||||
"usando nuestra API REST, el Tipo de contenido debe ser aplicación/json."
|
||||
msgstr "No utilizó el Tipo de contenido correcto en su solicitud HTTP. Si está usando nuestra API REST, el Tipo de contenido debe ser aplicación/json."
|
||||
|
||||
#: awx/api/generics.py:220
|
||||
msgid " To establish a login session, visit"
|
||||
@@ -223,9 +201,7 @@ msgstr "Estructura de datos con URLs de recursos relacionados."
|
||||
msgid ""
|
||||
"Data structure with name/description for related resources. The output for "
|
||||
"some objects may be limited for performance reasons."
|
||||
msgstr ""
|
||||
"Estructura de datos con nombre/descripción de los recursos relacionados. La "
|
||||
"salida de algunos objetos puede estar limitada por motivos de rendimiento."
|
||||
msgstr "Estructura de datos con nombre/descripción de los recursos relacionados. La salida de algunos objetos puede estar limitada por motivos de rendimiento."
|
||||
|
||||
#: awx/api/metadata.py:75
|
||||
msgid "Timestamp when this {} was created."
|
||||
@@ -248,17 +224,14 @@ msgstr "Error de análisis JSON; no es un objeto JSON"
|
||||
msgid ""
|
||||
"JSON parse error - %s\n"
|
||||
"Possible cause: trailing comma."
|
||||
msgstr ""
|
||||
"Error de análisis JSON - %s\n"
|
||||
msgstr "Error de análisis JSON - %s\n"
|
||||
"Posible causa: coma final."
|
||||
|
||||
#: awx/api/serializers.py:205
|
||||
msgid ""
|
||||
"The original object is already named {}, a copy from it cannot have the same "
|
||||
"name."
|
||||
msgstr ""
|
||||
"El objeto original ya tiene el nombre {}, por lo que una copia de este no "
|
||||
"puede tener el mismo nombre."
|
||||
msgstr "El objeto original ya tiene el nombre {}, por lo que una copia de este no puede tener el mismo nombre."
|
||||
|
||||
#: awx/api/serializers.py:334
|
||||
#, python-format
|
||||
@@ -301,9 +274,7 @@ msgstr "Plantilla de trabajo"
|
||||
msgid ""
|
||||
"Indicates whether all of the events generated by this unified job have been "
|
||||
"saved to the database."
|
||||
msgstr ""
|
||||
"Indica si todos los eventos generados por esta tarea unificada se guardaron "
|
||||
"en la base de datos."
|
||||
msgstr "Indica si todos los eventos generados por esta tarea unificada se guardaron en la base de datos."
|
||||
|
||||
#: awx/api/serializers.py:939
|
||||
msgid "Write-only field used to change the password."
|
||||
@@ -324,8 +295,7 @@ msgstr "No se puede cambiar %s en el usuario gestionado por LDAP."
|
||||
|
||||
#: awx/api/serializers.py:1153
|
||||
msgid "Must be a simple space-separated string with allowed scopes {}."
|
||||
msgstr ""
|
||||
"Debe ser una cadena simple separada por espacios con alcances permitidos {}."
|
||||
msgstr "Debe ser una cadena simple separada por espacios con alcances permitidos {}."
|
||||
|
||||
#: awx/api/serializers.py:1238
|
||||
msgid "Authorization Grant Type"
|
||||
@@ -378,9 +348,7 @@ msgstr "SCM track_submodules solo puede usarse con proyectos git."
|
||||
msgid ""
|
||||
"Only Container Registry credentials can be associated with an Execution "
|
||||
"Environment"
|
||||
msgstr ""
|
||||
"Solo las credenciales del registro de contenedores pueden asociarse a un "
|
||||
"entorno de ejecución"
|
||||
msgstr "Solo las credenciales del registro de contenedores pueden asociarse a un entorno de ejecución"
|
||||
|
||||
#: awx/api/serializers.py:1440
|
||||
msgid "Cannot change the organization of an execution environment"
|
||||
@@ -390,9 +358,7 @@ msgstr "No se puede modificar la organización de un entorno de ejecución"
|
||||
msgid ""
|
||||
"One or more job templates depend on branch override behavior for this "
|
||||
"project (ids: {})."
|
||||
msgstr ""
|
||||
"Una o más plantillas de trabajo dependen del comportamiento de invalidación "
|
||||
"de ramas para este proyecto (ids: {})."
|
||||
msgstr "Una o más plantillas de trabajo dependen del comportamiento de invalidación de ramas para este proyecto (ids: {})."
|
||||
|
||||
#: awx/api/serializers.py:1530
|
||||
msgid "Update options must be set to false for manual projects."
|
||||
@@ -406,9 +372,7 @@ msgstr "Colección de playbooks disponibles dentro de este proyecto."
|
||||
msgid ""
|
||||
"Array of inventory files and directories available within this project, not "
|
||||
"comprehensive."
|
||||
msgstr ""
|
||||
"Colección de archivos de inventario y directorios disponibles dentro de este "
|
||||
"proyecto, no global."
|
||||
msgstr "Colección de archivos de inventario y directorios disponibles dentro de este proyecto, no global."
|
||||
|
||||
#: awx/api/serializers.py:1599 awx/api/serializers.py:3098
|
||||
#: awx/api/serializers.py:3311
|
||||
@@ -560,7 +524,7 @@ msgstr "Playbook no encontrado para el proyecto."
|
||||
|
||||
#: awx/api/serializers.py:2842
|
||||
msgid "Must select playbook for project."
|
||||
msgstr "Debe seleccionar un playbook para el proyecto"
|
||||
msgstr "Debe seleccionar un playbook para el proyecto."
|
||||
|
||||
#: awx/api/serializers.py:2844 awx/api/serializers.py:2846
|
||||
msgid "Project does not allow overriding branch."
|
||||
@@ -893,8 +857,7 @@ msgid "Containerized instances may not be managed via the API"
|
||||
msgstr "Las instancias contenedorizadas no pueden ser gestionadas a través de la API."
|
||||
|
||||
#: awx/api/serializers.py:4919 awx/api/serializers.py:4922
|
||||
#, fuzzy, python-format
|
||||
#| msgid "tower instance group name may not be changed."
|
||||
#, python-format
|
||||
msgid "%s instance group name may not be changed."
|
||||
msgstr "El nombre del grupo de instancia %s no puede modificarse."
|
||||
|
||||
@@ -1049,8 +1012,6 @@ msgid ""
|
||||
msgstr "No puede asignar credenciales de acceso a un equipo cuando el campo de organización no está establecido o pertenezca a una organización diferente."
|
||||
|
||||
#: awx/api/views/__init__.py:720
|
||||
#, fuzzy
|
||||
#| msgid "The instance that managed the execution environment."
|
||||
msgid "Only the 'pull' field can be edited for managed execution environments."
|
||||
msgstr "Sólo se puede editar el campo \"pull\" para los entornos de ejecución gestionados."
|
||||
|
||||
@@ -2087,8 +2048,6 @@ msgid "Unique identifier for an installation"
|
||||
msgstr "Identificador único para una instalación"
|
||||
|
||||
#: awx/main/conf.py:183
|
||||
#, fuzzy
|
||||
#| msgid "The Instance group the job was run under"
|
||||
msgid "The instance group where control plane tasks run"
|
||||
msgstr "Grupo de instancias donde se ejecutan las tareas del plano de control"
|
||||
|
||||
@@ -2796,8 +2755,6 @@ msgid "The identifier for the secret e.g., /some/identifier"
|
||||
msgstr "El identificador para el secreto; por ejemplo, /some/identifier"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:12
|
||||
#, fuzzy
|
||||
#| msgid "Tenant ID"
|
||||
msgid "Tenant"
|
||||
msgstr "Inquilino"
|
||||
|
||||
@@ -2816,8 +2773,6 @@ msgid ""
|
||||
msgstr "El TLD del inquilino, por ejemplo, \"com\" cuando la URL es https://ex.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:34
|
||||
#, fuzzy
|
||||
#| msgid "Secret Name"
|
||||
msgid "Secret Path"
|
||||
msgstr "Ruta secreta"
|
||||
|
||||
@@ -2826,8 +2781,6 @@ msgid "The secret path e.g. /test/secret1"
|
||||
msgstr "La ruta secreta, por ejemplo, /test/secret1"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:46
|
||||
#, fuzzy
|
||||
#| msgid "Job Template"
|
||||
msgid "URL template"
|
||||
msgstr "Plantilla URL"
|
||||
|
||||
@@ -2960,8 +2913,6 @@ msgid ""
|
||||
msgstr "Principales válidos (ya sea nombres de usuario o nombres de host) para los que se debería firmar el certificado:"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:10
|
||||
#, fuzzy
|
||||
#| msgid "Auth Server URL"
|
||||
msgid "Secret Server URL"
|
||||
msgstr "URL del servidor secreto"
|
||||
|
||||
@@ -2972,8 +2923,6 @@ msgid ""
|
||||
msgstr "La URL base del servidor secreto, por ejemplo, https://myserver/SecretServer o https://mytenant.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:17
|
||||
#, fuzzy
|
||||
#| msgid "Red Hat customer username"
|
||||
msgid "The (Application) user username"
|
||||
msgstr "El nombre de usuario (de la aplicación)"
|
||||
|
||||
@@ -2995,20 +2944,14 @@ msgid "The corresponding password"
|
||||
msgstr "La contraseña correspondiente"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:31
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret ID"
|
||||
msgstr "Identificación secreta"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:32
|
||||
#, fuzzy
|
||||
#| msgid "The name of the secret to look up."
|
||||
msgid "The integer ID of the secret"
|
||||
msgstr "El ID entero del secreto"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:37
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret Field"
|
||||
msgstr "Campo secreto"
|
||||
|
||||
@@ -3568,22 +3511,14 @@ msgstr "Ruta de archivo absoluta al archivo CA por usar (opcional)"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1023
|
||||
#: awx/main/models/credential/__init__.py:1029 awx/main/models/inventory.py:813
|
||||
#, fuzzy
|
||||
#| msgid "Gather data for Insights for Ansible Automation Platform"
|
||||
msgid "Red Hat Ansible Automation Platform"
|
||||
msgstr "Plataforma Red Hat Ansible Automation"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1031
|
||||
#, fuzzy
|
||||
#| msgid "The Ansible Tower base URL to authenticate with."
|
||||
msgid "Red Hat Ansible Automation Platform base URL to authenticate with."
|
||||
msgstr "URL base de Red Hat Ansible Automation Platform para autenticar."
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1038
|
||||
#, fuzzy
|
||||
#| msgid ""
|
||||
#| "The Ansible Tower user to authenticate as.This should not be set if an "
|
||||
#| "OAuth token is being used."
|
||||
msgid ""
|
||||
"Red Hat Ansible Automation Platform username id to authenticate as.This "
|
||||
"should not be set if an OAuth token is being used."
|
||||
@@ -4488,7 +4423,7 @@ msgstr "El número de segundos desde de que la última actualización del proyec
|
||||
msgid ""
|
||||
"Allow changing the SCM branch or revision in a job template that uses this "
|
||||
"project."
|
||||
msgstr "Permitir el cambio de la rama o revisión del SCM en una plantilla de trabajo que utilice este proyecto."
|
||||
msgstr "Permitir el cambio de la rama o revisión de SCM en una plantilla de trabajo que utilice este proyecto."
|
||||
|
||||
#: awx/main/models/projects.py:294
|
||||
msgid "The last revision fetched by a project update"
|
||||
@@ -4925,7 +4860,7 @@ msgid "Exception connecting to PagerDuty: {}"
|
||||
msgstr "Excepción conectando a PagerDuty: {}"
|
||||
|
||||
#: awx/main/notifications/pagerduty_backend.py:87
|
||||
#: awx/main/notifications/slack_backend.py:48
|
||||
#: awx/main/notifications/slack_backend.py:49
|
||||
#: awx/main/notifications/twilio_backend.py:47
|
||||
msgid "Exception sending messages: {}"
|
||||
msgstr "Excepción enviando mensajes: {}"
|
||||
@@ -5181,7 +5116,7 @@ msgstr "Al menos un certificado es necesario."
|
||||
msgid ""
|
||||
"At least %(min_certs)d certificates are required, only %(cert_count)d "
|
||||
"provided."
|
||||
msgstr "Se requieren al menos %(min_certs)d certificados, sólo %(cert_count)d proporcionados."
|
||||
msgstr "Al menos %(min_certs)d certificados son necesarios, solo se proporcionó %(cert_count)d."
|
||||
|
||||
#: awx/main/validators.py:152
|
||||
#, python-format
|
||||
@@ -5195,8 +5130,7 @@ msgid ""
|
||||
msgstr "No se permiten más de %(max_certs)d certificados, %(cert_count)d proporcionado."
|
||||
|
||||
#: awx/main/validators.py:289
|
||||
#, fuzzy, python-brace-format
|
||||
#| msgid "The container image to be used for execution."
|
||||
#, python-brace-format
|
||||
msgid "The container image name {value} is not valid"
|
||||
msgstr "El nombre de la imagen del contenedor {value} no es válido"
|
||||
|
||||
@@ -5666,17 +5600,17 @@ msgstr "La clave secreta OAuth2 (Clave secreta del cliente) from your GitHub org
|
||||
|
||||
#: awx/sso/conf.py:751
|
||||
msgid "GitHub Organization Name"
|
||||
msgstr "Nombre de la organización de GitHub"
|
||||
msgstr "Nombre para la organización GitHub"
|
||||
|
||||
#: awx/sso/conf.py:752
|
||||
msgid ""
|
||||
"The name of your GitHub organization, as used in your organization's URL: "
|
||||
"https://github.com/<yourorg>/."
|
||||
msgstr "El nombre de su organización de GitHub, como se utiliza en la URL de su organización: https://github.com/<votreorg>/."
|
||||
msgstr "El nombre de su organización de GitHub, como se utiliza en la URL de su organización: https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:762
|
||||
msgid "GitHub Organization OAuth2 Organization Map"
|
||||
msgstr "Mapa de organización de OAuth2 de la organización de GitHub"
|
||||
msgstr "Mapa de organización OAuth2 para organizaciones GitHub"
|
||||
|
||||
#: awx/sso/conf.py:774
|
||||
msgid "GitHub Organization OAuth2 Team Map"
|
||||
@@ -5684,7 +5618,7 @@ msgstr "Mapa de equipos OAuth2 para equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:790
|
||||
msgid "GitHub Team OAuth2 Callback URL"
|
||||
msgstr "URL de devolución de OAuth2 del equipo de GitHub"
|
||||
msgstr "URL callback OAuth2 para los equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:792 awx/sso/conf.py:1060
|
||||
msgid ""
|
||||
@@ -5692,12 +5626,12 @@ msgid ""
|
||||
"<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and "
|
||||
"secret (Client Secret). Provide this URL as the callback URL for your "
|
||||
"application."
|
||||
msgstr "Cree una aplicación propiedad de la organización en https://github.com/organizations/<votreorg>/settings/applications y obtenga una clave de OAuth2 (ID del cliente) y secreta (clave secreta de cliente). Proporcione esta URL como URL de devolución para su aplicación."
|
||||
msgstr "Cree una aplicación propiedad de la organización en https://github.com/organizations/<yourorg>/settings/applications y obtenga una clave de OAuth2 (ID del cliente) y secreta (clave secreta de cliente). Proporcione esta URL como URL de devolución para su aplicación."
|
||||
|
||||
#: awx/sso/conf.py:797 awx/sso/conf.py:809 awx/sso/conf.py:820
|
||||
#: awx/sso/conf.py:832 awx/sso/conf.py:843 awx/sso/conf.py:855
|
||||
msgid "GitHub Team OAuth2"
|
||||
msgstr "OAuth2 de equipo de GitHub"
|
||||
msgstr "OAuth2 para equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:807
|
||||
msgid "GitHub Team OAuth2 Key"
|
||||
@@ -5828,7 +5762,7 @@ msgstr "Nombre de la organización de GitHub Enterprise"
|
||||
msgid ""
|
||||
"The name of your GitHub Enterprise organization, as used in your "
|
||||
"organization's URL: https://github.com/<yourorg>/."
|
||||
msgstr "El nombre de su organización de GitHub Enterprise, como se utiliza en la URL de su organización: https://github.com/<votreorg>/."
|
||||
msgstr "El nombre de su organización de GitHub Enterprise, como se utiliza en la URL de su organización: https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:1030
|
||||
msgid "GitHub Enterprise Organization OAuth2 Organization Map"
|
||||
@@ -6303,68 +6237,4 @@ msgstr "%s se está actualizando."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
#~ msgid "SSLError while trying to connect to {}"
|
||||
#~ msgstr "SSLError al intentar conectarse a {}"
|
||||
|
||||
#~ msgid "Request to {} timed out."
|
||||
#~ msgstr "El tiempo de solicitud {} caducó."
|
||||
|
||||
#~ msgid "Unknown exception {} while trying to GET {}"
|
||||
#~ msgstr "Excepción desconocida {} al intentar usar GET {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Unauthorized access. Please check your Insights Credential username and "
|
||||
#~ "password."
|
||||
#~ msgstr ""
|
||||
#~ "Acceso no autorizado. Verifique su nombre de usuario y contraseña de "
|
||||
#~ "Insights."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Failed to access the Insights API at URL {}. Server responded with {} "
|
||||
#~ "status code and message {}"
|
||||
#~ msgstr ""
|
||||
#~ "No se pudo acceder a la API de Insights en la URL {}. El servidor "
|
||||
#~ "respondió con el código de estado {} y el mensaje {}"
|
||||
|
||||
#~ msgid "Expected JSON response from Insights at URL {} but instead got {}"
|
||||
#~ msgstr ""
|
||||
#~ "Respuesta JSON esperada de Insights en la URL {}; en cambio, se recibió {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Could not translate Insights system ID {} into an Insights platform ID."
|
||||
#~ msgstr ""
|
||||
#~ "No se pudo traducir el ID del sistema Insights {} en un ID de plataforma "
|
||||
#~ "de Insights."
|
||||
|
||||
#~ msgid "This host is not recognized as an Insights host."
|
||||
#~ msgstr "Este host no se reconoce como un host de Insights."
|
||||
|
||||
#~ msgid "The Insights Credential for \"{}\" was not found."
|
||||
#~ msgstr "No se encontró la credencial de Insights para \"{}\"."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "The path to the secret stored in the secret backend e.g, /some/secret/"
|
||||
#~ msgstr ""
|
||||
#~ "La ruta al secreto almacenado en el backend de secretos; por ejemplo, /"
|
||||
#~ "some/secret/"
|
||||
|
||||
#~ msgid "Ansible Tower"
|
||||
#~ msgstr "Ansible Tower"
|
||||
|
||||
#~ msgid "Ansible Tower Hostname"
|
||||
#~ msgstr "Nombre de host de Ansible Tower"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Credentials to be used by hosts belonging to this inventory when "
|
||||
#~ "accessing Red Hat Insights API."
|
||||
#~ msgstr ""
|
||||
#~ "Credenciales que utilizarán los hosts que pertenecen a este inventario "
|
||||
#~ "cuando accedan a la API de Red Hat Insights."
|
||||
|
||||
#~ msgid "Assignment not allowed for Smart Inventory"
|
||||
#~ msgstr "Tarea no permitida para el inventario inteligente"
|
||||
|
||||
#~ msgid "Red Hat Insights host unique identifier."
|
||||
#~ msgstr "Identificador único de host de Red Hat Insights."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6240
awx/locale/ko/LC_MESSAGES/django.po
Normal file
6240
awx/locale/ko/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,6 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -8,8 +11,7 @@ msgstr ""
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: nl \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Language: \n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
@@ -21,9 +23,7 @@ msgstr "Niet-actieve tijd voor forceren van afmelding"
|
||||
msgid ""
|
||||
"Number of seconds that a user is inactive before they will need to login "
|
||||
"again."
|
||||
msgstr ""
|
||||
"Maximumaantal seconden dat een gebruiker niet-actief is voordat deze zich "
|
||||
"opnieuw moet aanmelden."
|
||||
msgstr "Maximumaantal seconden dat een gebruiker niet-actief is voordat deze zich opnieuw moet aanmelden."
|
||||
|
||||
#: awx/api/conf.py:21 awx/api/conf.py:31 awx/api/conf.py:42 awx/api/conf.py:50
|
||||
#: awx/api/conf.py:70 awx/api/conf.py:85 awx/api/conf.py:96 awx/sso/conf.py:105
|
||||
@@ -45,9 +45,7 @@ msgstr "Maximumaantal gelijktijdige aangemelde sessies"
|
||||
msgid ""
|
||||
"Maximum number of simultaneous logged in sessions a user may have. To "
|
||||
"disable enter -1."
|
||||
msgstr ""
|
||||
"Maximumaantal gelijktijdige aangemelde sessies dat een gebruiker kan hebben. "
|
||||
"Voer -1 in om dit uit te schakelen."
|
||||
msgstr "Maximumaantal gelijktijdige aangemelde sessies dat een gebruiker kan hebben. Voer -1 in om dit uit te schakelen."
|
||||
|
||||
#: awx/api/conf.py:37
|
||||
msgid "Disable the built-in authentication system"
|
||||
@@ -58,10 +56,7 @@ msgid ""
|
||||
"Controls whether users are prevented from using the built-in authentication "
|
||||
"system. You probably want to do this if you are using an LDAP or SAML "
|
||||
"integration."
|
||||
msgstr ""
|
||||
"Bepaalt of gebruikers het ingebouwde authenticatiesysteem niet mogen "
|
||||
"gebruiken. U wilt dit waarschijnlijk doen als u een LDAP- of SAML-integratie "
|
||||
"gebruikt."
|
||||
msgstr "Bepaalt of gebruikers het ingebouwde authenticatiesysteem niet mogen gebruiken. U wilt dit waarschijnlijk doen als u een LDAP- of SAML-integratie gebruikt."
|
||||
|
||||
#: awx/api/conf.py:48
|
||||
msgid "Enable HTTP Basic Auth"
|
||||
@@ -83,13 +78,7 @@ msgid ""
|
||||
"authorization codes in the number of seconds, and "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after "
|
||||
"expired access tokens, in the number of seconds."
|
||||
msgstr ""
|
||||
"Termenlijst voor het aanpassen van OAuth 2-time-outs. Beschikbare items zijn "
|
||||
"`ACCESS_TOKEN_EXPIRE_SECONDS`, de duur van de toegangstokens in het aantal "
|
||||
"seconden, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, de duur van de "
|
||||
"autorisatiecodes in het aantal seconden. en `REFRESH_TOKEN_EXPIRE_SECONDS`, "
|
||||
"de duur van de verversingstokens na verlopen toegangstokens, in het aantal "
|
||||
"seconden."
|
||||
msgstr "Termenlijst voor het aanpassen van OAuth 2-time-outs. Beschikbare items zijn `ACCESS_TOKEN_EXPIRE_SECONDS`, de duur van de toegangstokens in het aantal seconden, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, de duur van de autorisatiecodes in het aantal seconden. en `REFRESH_TOKEN_EXPIRE_SECONDS`, de duur van de verversingstokens na verlopen toegangstokens, in het aantal seconden."
|
||||
|
||||
#: awx/api/conf.py:78
|
||||
msgid "Allow External Users to Create OAuth2 Tokens"
|
||||
@@ -476,7 +465,7 @@ msgstr "'ask_at_runtime' wordt niet ondersteund voor aangepaste referenties."
|
||||
|
||||
#: awx/api/serializers.py:2547
|
||||
msgid "Credential Type"
|
||||
msgstr "Type toegangsgegevens"
|
||||
msgstr "Soort toegangsgegevens"
|
||||
|
||||
#: awx/api/serializers.py:2614
|
||||
msgid "Modifications not allowed for managed credentials"
|
||||
@@ -868,8 +857,7 @@ msgid "Containerized instances may not be managed via the API"
|
||||
msgstr "Geclusterde instanties worden mogelijk niet beheerd via de API"
|
||||
|
||||
#: awx/api/serializers.py:4919 awx/api/serializers.py:4922
|
||||
#, fuzzy, python-format
|
||||
#| msgid "tower instance group name may not be changed."
|
||||
#, python-format
|
||||
msgid "%s instance group name may not be changed."
|
||||
msgstr "Naam van de %s-instantiegroep mag niet worden gewijzigd."
|
||||
|
||||
@@ -1024,8 +1012,6 @@ msgid ""
|
||||
msgstr "U kunt een team geen referentietoegang verlenen wanneer het veld Organisatie niet is ingesteld of behoort tot een andere organisatie"
|
||||
|
||||
#: awx/api/views/__init__.py:720
|
||||
#, fuzzy
|
||||
#| msgid "The instance that managed the execution environment."
|
||||
msgid "Only the 'pull' field can be edited for managed execution environments."
|
||||
msgstr "Alleen het veld \"pull\" kan worden bewerkt voor beheerde uitvoeringsomgevingen."
|
||||
|
||||
@@ -2062,8 +2048,6 @@ msgid "Unique identifier for an installation"
|
||||
msgstr "Unieke identificatiecode voor installatie"
|
||||
|
||||
#: awx/main/conf.py:183
|
||||
#, fuzzy
|
||||
#| msgid "The Instance group the job was run under"
|
||||
msgid "The instance group where control plane tasks run"
|
||||
msgstr "De instantiegroep waar control plane-taken worden uitgevoerd"
|
||||
|
||||
@@ -2637,7 +2621,7 @@ msgstr "Vault-URL (DNS-naam)"
|
||||
#: awx/main/credential_plugins/dsv.py:23
|
||||
#: awx/main/models/credential/__init__.py:895
|
||||
msgid "Client ID"
|
||||
msgstr "Client-id"
|
||||
msgstr "Klant-ID"
|
||||
|
||||
#: awx/main/credential_plugins/azure_kv.py:29
|
||||
#: awx/main/models/credential/__init__.py:902
|
||||
@@ -2771,8 +2755,6 @@ msgid "The identifier for the secret e.g., /some/identifier"
|
||||
msgstr "De identificatiecode voor het geheim, bijv. /some/identifier"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:12
|
||||
#, fuzzy
|
||||
#| msgid "Tenant ID"
|
||||
msgid "Tenant"
|
||||
msgstr "Tenant"
|
||||
|
||||
@@ -2791,8 +2773,6 @@ msgid ""
|
||||
msgstr "Het TLD van de tenant, bv. \"com\" wanneer de URL https://ex.secretservercloud.com is"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:34
|
||||
#, fuzzy
|
||||
#| msgid "Secret Name"
|
||||
msgid "Secret Path"
|
||||
msgstr "Geheim pad"
|
||||
|
||||
@@ -2801,8 +2781,6 @@ msgid "The secret path e.g. /test/secret1"
|
||||
msgstr "Het geheime pad, bv. /test/secret1"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:46
|
||||
#, fuzzy
|
||||
#| msgid "Job Template"
|
||||
msgid "URL template"
|
||||
msgstr "URL-sjabloon"
|
||||
|
||||
@@ -2935,8 +2913,6 @@ msgid ""
|
||||
msgstr "Geldige principes (gebruikersnamen of hostnamen) waarvoor het certificaat moet worden ondertekend."
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:10
|
||||
#, fuzzy
|
||||
#| msgid "Auth Server URL"
|
||||
msgid "Secret Server URL"
|
||||
msgstr "Geheime-server-URL"
|
||||
|
||||
@@ -2947,8 +2923,6 @@ msgid ""
|
||||
msgstr "De basis-URL van de geheime server, bv. https://myserver/SecretServer of https://mytenant.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:17
|
||||
#, fuzzy
|
||||
#| msgid "Red Hat customer username"
|
||||
msgid "The (Application) user username"
|
||||
msgstr "De gebruikersnaam van de (applicatie) gebruiker"
|
||||
|
||||
@@ -2970,20 +2944,14 @@ msgid "The corresponding password"
|
||||
msgstr "Het bijbehorende wachtwoord"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:31
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret ID"
|
||||
msgstr "Geheime id"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:32
|
||||
#, fuzzy
|
||||
#| msgid "The name of the secret to look up."
|
||||
msgid "The integer ID of the secret"
|
||||
msgstr "De id van het geheim als geheel getal"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:37
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret Field"
|
||||
msgstr "Geheim veld"
|
||||
|
||||
@@ -3543,22 +3511,14 @@ msgstr "Absoluut bestandspad naar het CA-bestand om te gebruiken (optioneel)"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1023
|
||||
#: awx/main/models/credential/__init__.py:1029 awx/main/models/inventory.py:813
|
||||
#, fuzzy
|
||||
#| msgid "Gather data for Insights for Ansible Automation Platform"
|
||||
msgid "Red Hat Ansible Automation Platform"
|
||||
msgstr "Automatiseringsplatform voor Red Hat Ansible"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1031
|
||||
#, fuzzy
|
||||
#| msgid "The Ansible Tower base URL to authenticate with."
|
||||
msgid "Red Hat Ansible Automation Platform base URL to authenticate with."
|
||||
msgstr "De basis-URL van het automatiseringsplatform voor Red Hat Ansible voor authenticatie."
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1038
|
||||
#, fuzzy
|
||||
#| msgid ""
|
||||
#| "The Ansible Tower user to authenticate as.This should not be set if an "
|
||||
#| "OAuth token is being used."
|
||||
msgid ""
|
||||
"Red Hat Ansible Automation Platform username id to authenticate as.This "
|
||||
"should not be set if an OAuth token is being used."
|
||||
@@ -4463,7 +4423,7 @@ msgstr "Het aantal seconden na uitvoering van de laatste projectupdate waarna ee
|
||||
msgid ""
|
||||
"Allow changing the SCM branch or revision in a job template that uses this "
|
||||
"project."
|
||||
msgstr "Wijzigen van de SCM-vertakking of de revisie toelaten in een taaksjabloon die gebruik maakt van dit project."
|
||||
msgstr "Maak het mogelijk om de SCM-tak of de revisie te wijzigen in een taaksjabloon die gebruik maakt van dit project."
|
||||
|
||||
#: awx/main/models/projects.py:294
|
||||
msgid "The last revision fetched by a project update"
|
||||
@@ -4900,7 +4860,7 @@ msgid "Exception connecting to PagerDuty: {}"
|
||||
msgstr "Uitzondering bij het maken van de verbinding met PagerDuty: {}"
|
||||
|
||||
#: awx/main/notifications/pagerduty_backend.py:87
|
||||
#: awx/main/notifications/slack_backend.py:48
|
||||
#: awx/main/notifications/slack_backend.py:49
|
||||
#: awx/main/notifications/twilio_backend.py:47
|
||||
msgid "Exception sending messages: {}"
|
||||
msgstr "Uitzondering bij het verzenden van berichten: {}"
|
||||
@@ -5170,8 +5130,7 @@ msgid ""
|
||||
msgstr "Niet meer dan %(max_certs)d certificaten zijn toegestaan, %(cert_count)d geleverd."
|
||||
|
||||
#: awx/main/validators.py:289
|
||||
#, fuzzy, python-brace-format
|
||||
#| msgid "The container image to be used for execution."
|
||||
#, python-brace-format
|
||||
msgid "The container image name {value} is not valid"
|
||||
msgstr "De naam van de containerafbeelding {value} is ongeldig"
|
||||
|
||||
@@ -6278,68 +6237,4 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
#~ msgid "SSLError while trying to connect to {}"
|
||||
#~ msgstr "SSLError tijdens poging om verbinding te maken met {}"
|
||||
|
||||
#~ msgid "Request to {} timed out."
|
||||
#~ msgstr "Er is een time-out opgetreden voor de aanvraag naar {}"
|
||||
|
||||
#~ msgid "Unknown exception {} while trying to GET {}"
|
||||
#~ msgstr "Onbekende uitzondering {} tijdens poging tot OPHALEN {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Unauthorized access. Please check your Insights Credential username and "
|
||||
#~ "password."
|
||||
#~ msgstr ""
|
||||
#~ "Geen toegang. Controleer uw Insights Credential gebruikersnaam en "
|
||||
#~ "wachtwoord."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Failed to access the Insights API at URL {}. Server responded with {} "
|
||||
#~ "status code and message {}"
|
||||
#~ msgstr ""
|
||||
#~ "Openen van Insights API via URL {} mislukt. Server reageerde met {} "
|
||||
#~ "statuscode en de melding {}"
|
||||
|
||||
#~ msgid "Expected JSON response from Insights at URL {} but instead got {}"
|
||||
#~ msgstr ""
|
||||
#~ "Verwachte JSON-reactie van Insights via URL {}, maar in plaats daarvan {} "
|
||||
#~ "verkregen."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Could not translate Insights system ID {} into an Insights platform ID."
|
||||
#~ msgstr ""
|
||||
#~ "Omzetten van Insights systeem-ID {} naar een Insights platform-ID mislukt."
|
||||
|
||||
#~ msgid "This host is not recognized as an Insights host."
|
||||
#~ msgstr "Deze host wordt niet herkend als een Insights-host."
|
||||
|
||||
#~ msgid "The Insights Credential for \"{}\" was not found."
|
||||
#~ msgstr "De Insights-referentie voor ‘{}‘ is niet gevonden."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "The path to the secret stored in the secret backend e.g, /some/secret/"
|
||||
#~ msgstr ""
|
||||
#~ "De pad naar het geheim dat in de geheime back-end is opgeslagen, bijv. /"
|
||||
#~ "some/secret/"
|
||||
|
||||
#~ msgid "Ansible Tower"
|
||||
#~ msgstr "Ansible Tower"
|
||||
|
||||
#~ msgid "Ansible Tower Hostname"
|
||||
#~ msgstr "Hostnaam Ansible Tower"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Credentials to be used by hosts belonging to this inventory when "
|
||||
#~ "accessing Red Hat Insights API."
|
||||
#~ msgstr ""
|
||||
#~ "Referenties die worden gebruikt door hosts die behoren tot deze "
|
||||
#~ "inventaris bij toegang tot de Red Hat Insights API."
|
||||
|
||||
#~ msgid "Assignment not allowed for Smart Inventory"
|
||||
#~ msgstr "Toewijzing niet toegestaan voor Smart-inventaris"
|
||||
|
||||
#~ msgid "Red Hat Insights host unique identifier."
|
||||
#~ msgstr "Unieke id van Red Hat Insights-host."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,6 @@ from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from psycopg2.errors import UntranslatableCharacter
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
@@ -84,7 +82,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.3', description=_('General platform configuration.'))
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -104,6 +102,22 @@ def config(since, **kwargs):
|
||||
'tower_url_base': settings.TOWER_URL_BASE,
|
||||
'tower_version': get_awx_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'license_date': license_info.get('license_date'),
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
'trial': license_info.get('trial'),
|
||||
'grace_period_remaining': license_info.get('grace_period_remaining'),
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@@ -115,7 +129,7 @@ def config(since, **kwargs):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
@register('counts', '1.2', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (
|
||||
@@ -158,6 +172,13 @@ def counts(since, **kwargs):
|
||||
.count()
|
||||
)
|
||||
counts['pending_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').filter(status__in=('pending',)).count()
|
||||
if connection.vendor == 'postgresql':
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"select count(*) from pg_stat_activity where datname=\'{connection.settings_dict['NAME']}\'")
|
||||
counts['database_connections'] = cursor.fetchone()[0]
|
||||
else:
|
||||
# We should be using postgresql, but if we do that change that ever we should change the below value
|
||||
counts['database_connections'] = 1
|
||||
return counts
|
||||
|
||||
|
||||
@@ -338,6 +359,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
{tbl}.event,
|
||||
task_action,
|
||||
resolved_action,
|
||||
resolved_role,
|
||||
-- '-' operator listed here:
|
||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
||||
-- note that operator is only supported by jsonb objects
|
||||
@@ -357,27 +379,24 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
x.duration AS duration,
|
||||
x.res->'warnings' AS warnings,
|
||||
x.res->'deprecations' AS deprecations
|
||||
FROM {tbl}, jsonb_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "resolved_action" text, "start" text, "end" text)
|
||||
FROM {tbl}, jsonb_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "resolved_action" text, "resolved_role" text, "start" text, "end" text)
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::jsonb"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::jsonb"), path=full_path)
|
||||
return _copy_table(table='events', query=query(fr"replace({tbl}.event_data, '\u', '\u005cu')::jsonb"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.4', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
@register('events_table', '1.5', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_unpartitioned(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, '_unpartitioned_main_jobevent', 'created', **kwargs)
|
||||
|
||||
|
||||
@register('events_table', '1.4', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
@register('events_table', '1.5', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.3', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
@register('unified_jobs_table', '1.4', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
@@ -403,7 +422,8 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id,
|
||||
main_unifiedjob.installed_collections,
|
||||
main_unifiedjob.ansible_version
|
||||
main_unifiedjob.ansible_version,
|
||||
main_job.forks
|
||||
FROM main_unifiedjob
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
LEFT JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
|
||||
@@ -177,7 +177,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
logger.log(log_level, "Insights for Ansible Automation Platform not enabled. Use --dry-run to gather locally without sending.")
|
||||
logger.log(log_level, "Automation Analytics not enabled. Use --dry-run to gather locally without sending.")
|
||||
return None
|
||||
|
||||
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
||||
@@ -332,10 +332,10 @@ def ship(path):
|
||||
Ship gathered metrics to the Insights API
|
||||
"""
|
||||
if not path:
|
||||
logger.error('Insights for Ansible Automation Platform TAR not found')
|
||||
logger.error('Automation Analytics TAR not found')
|
||||
return False
|
||||
if not os.path.exists(path):
|
||||
logger.error('Insights for Ansible Automation Platform TAR {} not found'.format(path))
|
||||
logger.error('Automation Analytics TAR {} not found'.format(path))
|
||||
return False
|
||||
if "Error:" in str(path):
|
||||
return False
|
||||
|
||||
@@ -126,6 +126,8 @@ def metrics():
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
||||
|
||||
DATABASE_CONNECTIONS = Gauge('awx_database_connections_total', 'Number of connections to database', registry=REGISTRY)
|
||||
|
||||
license_info = get_license()
|
||||
SYSTEM_INFO.info(
|
||||
{
|
||||
@@ -163,6 +165,8 @@ def metrics():
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions'])
|
||||
|
||||
DATABASE_CONNECTIONS.set(current_counts['database_connections'])
|
||||
|
||||
all_job_data = job_counts(None)
|
||||
statuses = all_job_data.get('status', {})
|
||||
for status, value in statuses.items():
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class BaseM:
|
||||
@@ -16,16 +16,22 @@ class BaseM:
|
||||
self.field = field
|
||||
self.help_text = help_text
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.current_value = 0
|
||||
|
||||
def inc(self, value):
|
||||
self.current_value += value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def set(self, value):
|
||||
self.current_value = value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def get(self):
|
||||
return self.current_value
|
||||
|
||||
def decode(self, conn):
|
||||
value = conn.hget(root_key, self.field)
|
||||
@@ -34,7 +40,9 @@ class BaseM:
|
||||
def to_prometheus(self, instance_data):
|
||||
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
|
||||
for instance in instance_data:
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
if self.field in instance_data[instance]:
|
||||
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
return output_text
|
||||
|
||||
|
||||
@@ -46,8 +54,10 @@ class FloatM(BaseM):
|
||||
return 0.0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class IntM(BaseM):
|
||||
@@ -58,8 +68,10 @@ class IntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetIntM(BaseM):
|
||||
@@ -70,10 +82,9 @@ class SetIntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
# do not set value if it has not changed since last time this was called
|
||||
if self.current_value is not None:
|
||||
if self.metric_has_changed:
|
||||
conn.hset(root_key, self.field, self.current_value)
|
||||
self.current_value = None
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetFloatM(SetIntM):
|
||||
@@ -94,13 +105,13 @@ class HistogramM(BaseM):
|
||||
self.sum = IntM(field + '_sum', '')
|
||||
super(HistogramM, self).__init__(field, help_text)
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.inf.clear_value(conn)
|
||||
self.sum.clear_value(conn)
|
||||
self.inf.reset_value(conn)
|
||||
self.sum.reset_value(conn)
|
||||
for b in self.buckets_to_keys.values():
|
||||
b.clear_value(conn)
|
||||
super(HistogramM, self).clear_value(conn)
|
||||
b.reset_value(conn)
|
||||
super(HistogramM, self).reset_value(conn)
|
||||
|
||||
def observe(self, value):
|
||||
for b in self.buckets:
|
||||
@@ -136,7 +147,7 @@ class HistogramM(BaseM):
|
||||
|
||||
|
||||
class Metrics:
|
||||
def __init__(self, auto_pipe_execute=True, instance_name=None):
|
||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.last_pipe_execute = time.time()
|
||||
@@ -152,6 +163,8 @@ class Metrics:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif settings.IS_TESTING():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.me().hostname
|
||||
|
||||
@@ -161,15 +174,29 @@ class Metrics:
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Time spent saving events to database'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading all tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('task_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow jobs'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager_schedule_calls', 'Number of calls to task manager schedule'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
@@ -179,29 +206,39 @@ class Metrics:
|
||||
# track last time metrics were sent to other nodes
|
||||
self.previous_send_metrics = SetFloatM('send_metrics_time', 'Timestamp of previous send_metrics call')
|
||||
|
||||
def clear_values(self):
|
||||
def reset_values(self):
|
||||
# intended to be called once on app startup to reset all metric
|
||||
# values to 0
|
||||
for m in self.METRICS.values():
|
||||
m.clear_value(self.conn)
|
||||
m.reset_value(self.conn)
|
||||
self.metrics_have_changed = True
|
||||
self.conn.delete(root_key + "_lock")
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
self.conn.delete(m)
|
||||
|
||||
def inc(self, field, value):
|
||||
if value != 0:
|
||||
self.METRICS[field].inc(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def set(self, field, value):
|
||||
self.METRICS[field].set(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def get(self, field):
|
||||
return self.METRICS[field].get()
|
||||
|
||||
def decode(self, field):
|
||||
return self.METRICS[field].decode(self.conn)
|
||||
|
||||
def observe(self, field, value):
|
||||
self.METRICS[field].observe(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def serialize_local_metrics(self):
|
||||
@@ -249,8 +286,8 @@ class Metrics:
|
||||
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# get acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock', thread_local=False)
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
try:
|
||||
|
||||
@@ -112,7 +112,7 @@ register(
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer username'),
|
||||
help_text=_('This username is used to send data to Insights for Ansible Automation Platform'),
|
||||
help_text=_('This username is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -125,7 +125,7 @@ register(
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer password'),
|
||||
help_text=_('This password is used to send data to Insights for Ansible Automation Platform'),
|
||||
help_text=_('This password is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -162,8 +162,8 @@ register(
|
||||
default='https://example.com',
|
||||
schemes=('http', 'https'),
|
||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||
label=_('Insights for Ansible Automation Platform upload URL'),
|
||||
help_text=_('This setting is used to to configure the upload URL for data collection for Red Hat Insights.'),
|
||||
label=_('Automation Analytics upload URL'),
|
||||
help_text=_('This setting is used to to configure the upload URL for data collection for Automation Analytics.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -282,12 +282,25 @@ register(
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_TASK_ENV',
|
||||
field_class=fields.KeyValueField,
|
||||
label=_('Environment Variables for Galaxy Commands'),
|
||||
help_text=_(
|
||||
'Additional environment variables set for invocations of ansible-galaxy within project updates. '
|
||||
'Useful if you must use a proxy server for ansible-galaxy but not git.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'INSIGHTS_TRACKING_STATE',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Gather data for Insights for Ansible Automation Platform'),
|
||||
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
|
||||
label=_('Gather data for Automation Analytics'),
|
||||
help_text=_('Enables the service to gather data on automation and send it to Automation Analytics.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -714,7 +727,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Insights for Ansible Automation Platform.'),
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
@@ -722,7 +735,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
field_class=fields.CharField,
|
||||
label=_('Last gathered entries from the data collection service of Insights for Ansible Automation Platform'),
|
||||
label=_('Last gathered entries from the data collection service of Automation Analytics'),
|
||||
default='',
|
||||
allow_blank=True,
|
||||
category=_('System'),
|
||||
@@ -733,7 +746,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Insights for Ansible Automation Platform Gather Interval'),
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
|
||||
@@ -100,3 +100,9 @@ JOB_VARIABLE_PREFIXES = [
|
||||
'awx',
|
||||
'tower',
|
||||
]
|
||||
|
||||
# Note, the \u001b[... are ansi color codes. We don't currenly import any of the python modules which define the codes.
|
||||
# Importing a library just for this message seemed like overkill
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
@@ -10,6 +10,27 @@ from awx.main.models import Instance, UnifiedJob, WorkflowJob
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def startup_reaping():
|
||||
"""
|
||||
If this particular instance is starting, then we know that any running jobs are invalid
|
||||
so we will reap those jobs as a special action here
|
||||
"""
|
||||
me = Instance.objects.me()
|
||||
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
|
||||
job_ids = []
|
||||
for j in jobs:
|
||||
job_ids.append(j.id)
|
||||
j.status = 'failed'
|
||||
j.start_args = ''
|
||||
j.job_explanation += 'Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.'
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status('failed')
|
||||
if job_ids:
|
||||
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
||||
# just in case, don't reap jobs that aren't running
|
||||
|
||||
@@ -169,8 +169,9 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
|
||||
self.pg_down_time = time.time()
|
||||
self.pg_is_down = True
|
||||
if time.time() - self.pg_down_time > self.pg_max_wait:
|
||||
logger.warning(f"Postgres event consumer has not recovered in {self.pg_max_wait} s, exiting")
|
||||
current_downtime = time.time() - self.pg_down_time
|
||||
if current_downtime > self.pg_max_wait:
|
||||
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
|
||||
raise
|
||||
# Wait for a second before next attempt, but still listen for any shutdown signals
|
||||
for i in range(10):
|
||||
@@ -179,6 +180,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
time.sleep(0.1)
|
||||
for conn in db.connections.all():
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in dispatcher main loop')
|
||||
raise
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
@@ -4,10 +4,12 @@ import os
|
||||
import signal
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db import DatabaseError, OperationalError, transaction, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django_guid import set_guid
|
||||
|
||||
@@ -16,8 +18,8 @@ import psutil
|
||||
import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
@@ -26,6 +28,32 @@ from .base import BaseWorker
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
def job_stats_wrapup(job_identifier, event=None):
|
||||
"""Fill in the unified job host_status_counts, fire off notifications if needed"""
|
||||
try:
|
||||
# empty dict (versus default of None) can still indicate that events have been processed
|
||||
# for job types like system jobs, and jobs with no hosts matched
|
||||
host_status_counts = {}
|
||||
if event:
|
||||
host_status_counts = event.get_host_status_counts()
|
||||
|
||||
# Update host_status_counts while holding the row lock
|
||||
with transaction.atomic():
|
||||
uj = UnifiedJob.objects.select_for_update().get(pk=job_identifier)
|
||||
uj.host_status_counts = host_status_counts
|
||||
uj.save(update_fields=['host_status_counts'])
|
||||
|
||||
uj.log_lifecycle("stats_wrapup_finished")
|
||||
|
||||
# If the status was a finished state before this update was made, send notifications
|
||||
# If not, we will send notifications when the status changes
|
||||
if uj.status not in ACTIVE_STATES:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
|
||||
except Exception:
|
||||
logger.exception('Worker failed to save stats or emit notifications: Job {}'.format(job_identifier))
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
"""
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
@@ -44,7 +72,6 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.pid = os.getpid()
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.queue_pop = 0
|
||||
@@ -53,6 +80,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
|
||||
self.redis.delete(key)
|
||||
|
||||
@cached_property
|
||||
def pid(self):
|
||||
"""This needs to be obtained after forking, or else it will give the parent process"""
|
||||
return os.getpid()
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
res = self.redis.blpop(self.queue_name, timeout=1)
|
||||
@@ -120,12 +152,17 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
metrics_singular_events_saved = 0
|
||||
metrics_events_batch_save_errors = 0
|
||||
metrics_events_broadcast = 0
|
||||
metrics_events_missing_created = 0
|
||||
metrics_total_job_event_processing_seconds = datetime.timedelta(seconds=0)
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
e.modified = now # this can be set before created because now is set above on line 149
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
metrics_events_missing_created += 1
|
||||
else: # only calculate the seconds if the created time already has been set
|
||||
metrics_total_job_event_processing_seconds += e.modified - e.created
|
||||
metrics_duration_to_save = time.perf_counter()
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
@@ -146,6 +183,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
metrics_events_broadcast += 1
|
||||
emit_event_detail(e)
|
||||
if getattr(e, '_notification_trigger_event', False):
|
||||
job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
@@ -156,6 +195,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', metrics_bulk_events_saved)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(metrics_bulk_events_saved + metrics_singular_events_saved))
|
||||
self.subsystem_metrics.inc('callback_receiver_events_broadcast', metrics_events_broadcast)
|
||||
self.subsystem_metrics.set(
|
||||
'callback_receiver_event_processing_avg_seconds',
|
||||
metrics_total_job_event_processing_seconds.total_seconds()
|
||||
/ (metrics_bulk_events_saved + metrics_singular_events_saved - metrics_events_missing_created),
|
||||
)
|
||||
if self.subsystem_metrics.should_pipe_execute() is True:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
|
||||
@@ -165,47 +209,32 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
if flush:
|
||||
self.last_event = ''
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls.JOB_REFERENCE in body:
|
||||
job_identifier = body[cls.JOB_REFERENCE]
|
||||
break
|
||||
|
||||
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
|
||||
|
||||
notification_trigger_event = bool(body.get('event') == cls.WRAPUP_EVENT)
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
logger.info('Starting EOF event processing for Job {}'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter))
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
|
||||
if isinstance(uj, Job):
|
||||
# *actual playbooks* send their success/failure
|
||||
# notifications in response to the playbook_on_stats
|
||||
# event handling code in main.models.events
|
||||
pass
|
||||
elif hasattr(uj, 'send_notification_templates'):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
if notification_trigger_event:
|
||||
job_stats_wrapup(job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
logger.exception('Worker failed to perform EOF tasks: Job {}'.format(job_identifier))
|
||||
finally:
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -1)
|
||||
set_guid('')
|
||||
@@ -215,9 +244,12 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
if skip_websocket_message:
|
||||
if skip_websocket_message: # if this event sends websocket messages, fire them off on flush
|
||||
event._skip_websocket_message = True
|
||||
|
||||
if notification_trigger_event: # if this is an Ansible stats event, ensure notifications on flush
|
||||
event._notification_trigger_event = True
|
||||
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
|
||||
@@ -103,7 +103,7 @@ class DeleteMeta:
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
query = "SELECT inhrelid::regclass::text AS child FROM pg_catalog.pg_inherits"
|
||||
query += f" WHERE inhparent = 'public.{tbl_name}'::regclass"
|
||||
query += f" WHERE inhparent = '{tbl_name}'::regclass"
|
||||
query += f" AND TO_TIMESTAMP(LTRIM(inhrelid::regclass::text, '{tbl_name}_'), 'YYYYMMDD_HH24') < '{self.cutoff}'"
|
||||
query += " ORDER BY inhrelid::regclass::text"
|
||||
|
||||
|
||||
@@ -32,8 +32,10 @@ class Command(BaseCommand):
|
||||
name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
status='successful',
|
||||
scm_revision='347e44fea036c94d5f60e544de006453ee5c71ad',
|
||||
playbook_files=['hello_world.yml'],
|
||||
)
|
||||
|
||||
p.organization = o
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
import os
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.conf import settings
|
||||
@@ -14,7 +16,12 @@ class Command(BaseCommand):
|
||||
Register this instance with the database for HA tracking.
|
||||
"""
|
||||
|
||||
help = "Add instance to the database. When no options are provided, the hostname of the current system will be used. Override with `--hostname`."
|
||||
help = (
|
||||
"Add instance to the database. "
|
||||
"When no options are provided, values from Django settings will be used to register the current system, "
|
||||
"as well as the default queues if needed (only used or enabled for Kubernetes installs). "
|
||||
"Override with `--hostname`."
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
@@ -25,7 +32,14 @@ class Command(BaseCommand):
|
||||
if not hostname:
|
||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||
(changed, instance) = Instance.objects.get_or_register()
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
|
||||
if changed:
|
||||
|
||||
@@ -53,7 +53,7 @@ class Command(BaseCommand):
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
reaper.startup_reaping()
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
|
||||
@@ -2,16 +2,14 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
from django.db.models.functions import Lower
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils.common import get_capacity_type
|
||||
from awx.main.constants import RECEPTOR_PENDING
|
||||
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager', 'DeferJobCreatedManager', 'UUID_DEFAULT']
|
||||
___all__ = ['HostManager', 'InstanceManager', 'DeferJobCreatedManager', 'UUID_DEFAULT']
|
||||
|
||||
logger = logging.getLogger('awx.main.managers')
|
||||
UUID_DEFAULT = '00000000-0000-0000-0000-000000000000'
|
||||
@@ -163,136 +161,3 @@ class InstanceManager(models.Manager):
|
||||
create_defaults['version'] = RECEPTOR_PENDING
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||
return (True, instance)
|
||||
|
||||
def get_or_register(self):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
if settings.IS_K8S:
|
||||
registered = self.register(ip_address=pod_ip, node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
else:
|
||||
registered = self.register(ip_address=pod_ip, uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
|
||||
).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
Used for global capacity calculations
|
||||
"""
|
||||
|
||||
def capacity_mapping(self, qs=None):
|
||||
"""
|
||||
Another entry-point to Instance manager method by same name
|
||||
"""
|
||||
if qs is None:
|
||||
qs = self.all().prefetch_related('instances')
|
||||
instance_ig_mapping = {}
|
||||
ig_instance_mapping = {}
|
||||
# Create dictionaries that represent basic m2m memberships
|
||||
for group in qs:
|
||||
ig_instance_mapping[group.name] = set(instance.hostname for instance in group.instances.all() if instance.capacity != 0)
|
||||
for inst in group.instances.all():
|
||||
if inst.capacity == 0:
|
||||
continue
|
||||
instance_ig_mapping.setdefault(inst.hostname, set())
|
||||
instance_ig_mapping[inst.hostname].add(group.name)
|
||||
# Get IG capacity overlap mapping
|
||||
ig_ig_mapping = get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping)
|
||||
|
||||
return instance_ig_mapping, ig_ig_mapping
|
||||
|
||||
@staticmethod
|
||||
def zero_out_group(graph, name, breakdown):
|
||||
if name not in graph:
|
||||
graph[name] = {}
|
||||
graph[name]['consumed_capacity'] = 0
|
||||
for capacity_type in ('execution', 'control'):
|
||||
graph[name][f'consumed_{capacity_type}_capacity'] = 0
|
||||
if breakdown:
|
||||
graph[name]['committed_capacity'] = 0
|
||||
graph[name]['running_capacity'] = 0
|
||||
|
||||
def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):
|
||||
"""
|
||||
Returns a dictionary of capacity values for all IGs
|
||||
"""
|
||||
if qs is None: # Optionally BYOQS - bring your own queryset
|
||||
qs = self.all().prefetch_related('instances')
|
||||
instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)
|
||||
|
||||
if tasks is None:
|
||||
tasks = self.model.unifiedjob_set.related.related_model.objects.filter(status__in=('running', 'waiting'))
|
||||
|
||||
if graph is None:
|
||||
graph = {group.name: {} for group in qs}
|
||||
for group_name in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
for t in tasks:
|
||||
# TODO: dock capacity for isolated job management tasks running in queue
|
||||
impact = t.task_impact
|
||||
control_groups = []
|
||||
if t.controller_node:
|
||||
control_groups = instance_ig_mapping.get(t.controller_node, [])
|
||||
if not control_groups:
|
||||
logger.warning(f"No instance group found for {t.controller_node}, capacity consumed may be innaccurate.")
|
||||
|
||||
if t.status == 'waiting' or (not t.execution_node and not t.is_container_group_task):
|
||||
# Subtract capacity from any peer groups that share instances
|
||||
if not t.instance_group:
|
||||
impacted_groups = []
|
||||
elif t.instance_group.name not in ig_ig_mapping:
|
||||
# Waiting job in group with 0 capacity has no collateral impact
|
||||
impacted_groups = [t.instance_group.name]
|
||||
else:
|
||||
impacted_groups = ig_ig_mapping[t.instance_group.name]
|
||||
for group_name in impacted_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name]['consumed_capacity'] += impact
|
||||
capacity_type = get_capacity_type(t)
|
||||
graph[group_name][f'consumed_{capacity_type}_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['committed_capacity'] += impact
|
||||
for group_name in control_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if breakdown:
|
||||
graph[group_name]['committed_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
elif t.status == 'running':
|
||||
# Subtract capacity from all groups that contain the instance
|
||||
if t.execution_node not in instance_ig_mapping:
|
||||
if not t.is_container_group_task:
|
||||
logger.warning('Detected %s running inside lost instance, ' 'may still be waiting for reaper.', t.log_format)
|
||||
if t.instance_group:
|
||||
impacted_groups = [t.instance_group.name]
|
||||
else:
|
||||
impacted_groups = []
|
||||
else:
|
||||
impacted_groups = instance_ig_mapping[t.execution_node]
|
||||
|
||||
for group_name in impacted_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name]['consumed_capacity'] += impact
|
||||
capacity_type = get_capacity_type(t)
|
||||
graph[group_name][f'consumed_{capacity_type}_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['running_capacity'] += impact
|
||||
for group_name in control_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name][f'consumed_control_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if breakdown:
|
||||
graph[group_name]['running_capacity'] += settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
else:
|
||||
logger.error('Programming error, %s not in ["running", "waiting"]', t.log_format)
|
||||
return graph
|
||||
|
||||
@@ -26,6 +26,17 @@ logger = logging.getLogger('awx.main.middleware')
|
||||
perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
class SettingsCacheMiddleware(MiddlewareMixin):
|
||||
"""
|
||||
Clears the in-memory settings cache at the beginning of a request.
|
||||
We do this so that a script can POST to /api/v2/settings/all/ and then
|
||||
right away GET /api/v2/settings/all/ and see the updated value.
|
||||
"""
|
||||
|
||||
def process_request(self, request):
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
|
||||
class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
|
||||
dest = '/var/log/tower/profile'
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 3.2.12 on 2022-03-31 17:28
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0158_make_instance_cpu_decimal'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='update_on_project_update',
|
||||
field=models.BooleanField(
|
||||
default=False,
|
||||
help_text='This field is deprecated and will be removed in a future release. In future release, functionality will be migrated to source project update_on_launch.',
|
||||
),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0160_alter_schedule_rrule.py
Normal file
18
awx/main/migrations/0160_alter_schedule_rrule.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.12 on 2022-04-18 21:29
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0159_deprecate_inventory_source_UoPU_field'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='rrule',
|
||||
field=models.TextField(help_text='A value representing the schedules iCal recurrence rule.'),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0161_unifiedjob_host_status_counts.py
Normal file
18
awx/main/migrations/0161_unifiedjob_host_status_counts.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.12 on 2022-04-27 02:16
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0160_alter_schedule_rrule'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='host_status_counts',
|
||||
field=models.JSONField(blank=True, default=None, editable=False, help_text='Playbook stats from the Ansible playbook_on_stats event.', null=True),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0162_alter_unifiedjob_dependent_jobs.py
Normal file
18
awx/main/migrations/0162_alter_unifiedjob_dependent_jobs.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.13 on 2022-05-02 21:27
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0161_unifiedjob_host_status_counts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='dependent_jobs',
|
||||
field=models.ManyToManyField(editable=False, related_name='unifiedjob_blocked_jobs', to='main.UnifiedJob'),
|
||||
),
|
||||
]
|
||||
23
awx/main/migrations/0163_convert_job_tags_to_textfield.py
Normal file
23
awx/main/migrations/0163_convert_job_tags_to_textfield.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-02 18:15
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0162_alter_unifiedjob_dependent_jobs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='job',
|
||||
name='job_tags',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_tags',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,40 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-21 21:29
|
||||
|
||||
from django.db import migrations
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("awx")
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
sources = InventorySource.objects.filter(update_on_project_update=True)
|
||||
for src in sources:
|
||||
if src.update_on_launch == False:
|
||||
src.update_on_launch = True
|
||||
src.save(update_fields=['update_on_launch'])
|
||||
logger.info(f"Setting update_on_launch to True for {src}")
|
||||
proj = src.source_project
|
||||
if proj and proj.scm_update_on_launch is False:
|
||||
proj.scm_update_on_launch = True
|
||||
proj.save(update_fields=['scm_update_on_launch'])
|
||||
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0163_convert_job_tags_to_textfield'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forwards, migrations.RunPython.noop),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='scm_last_revision',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='update_on_project_update',
|
||||
),
|
||||
]
|
||||
@@ -35,6 +35,7 @@ def gce(cred, env, private_data_dir):
|
||||
container_path = to_container_path(path, private_data_dir)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = container_path
|
||||
env['GCP_SERVICE_ACCOUNT_FILE'] = container_path
|
||||
env['GOOGLE_APPLICATION_CREDENTIALS'] = container_path
|
||||
|
||||
# Handle env variables for new module types.
|
||||
# This includes gcp_compute inventory plugin and
|
||||
|
||||
@@ -6,7 +6,7 @@ from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.db import models, DatabaseError
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc, now
|
||||
@@ -126,6 +126,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'host_name',
|
||||
'verbosity',
|
||||
]
|
||||
WRAPUP_EVENT = 'playbook_on_stats'
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
@@ -384,14 +385,6 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
job.get_event_queryset().filter(uuid__in=changed).update(changed=True)
|
||||
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([job.id])
|
||||
|
||||
connection.on_commit(_send_notifications)
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_str(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
@@ -470,6 +463,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
"""
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'job_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -600,6 +594,7 @@ UnpartitionedJobEvent._meta.db_table = '_unpartitioned_' + JobEvent._meta.db_tab
|
||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'project_update_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -641,6 +636,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
"""
|
||||
|
||||
VALID_KEYS = ['event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line', 'end_line', 'verbosity']
|
||||
WRAPUP_EVENT = 'EOF'
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
@@ -736,6 +732,8 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id', 'job_created']
|
||||
WRAPUP_EVENT = 'playbook_on_stats' # exception to BaseCommandEvent
|
||||
JOB_REFERENCE = 'ad_hoc_command_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -836,6 +834,7 @@ UnpartitionedAdHocCommandEvent._meta.db_table = '_unpartitioned_' + AdHocCommand
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'inventory_update_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -881,6 +880,7 @@ UnpartitionedInventoryUpdateEvent._meta.db_table = '_unpartitioned_' + Inventory
|
||||
class SystemJobEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'system_job_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from decimal import Decimal
|
||||
import random
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -20,7 +19,7 @@ from solo.models import SingletonModel
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONBlob
|
||||
from awx.main.managers import InstanceManager, InstanceGroupManager, UUID_DEFAULT
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
@@ -175,12 +174,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
def jobs_total(self):
|
||||
return UnifiedJob.objects.filter(execution_node=self.hostname).count()
|
||||
|
||||
@staticmethod
|
||||
def choose_online_control_plane_node():
|
||||
return random.choice(
|
||||
Instance.objects.filter(enabled=True, capacity__gt=0).filter(node_type__in=['control', 'hybrid']).values_list('hostname', flat=True)
|
||||
)
|
||||
|
||||
def get_cleanup_task_kwargs(self, **kwargs):
|
||||
"""
|
||||
Produce options to use for the command: ansible-runner worker cleanup
|
||||
@@ -307,8 +300,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
|
||||
objects = InstanceGroupManager()
|
||||
|
||||
name = models.CharField(max_length=250, unique=True)
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
@@ -366,37 +357,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
@staticmethod
|
||||
def fit_task_to_most_remaining_capacity_instance(task, instances, impact=None, capacity_type=None, add_hybrid_control_cost=False):
|
||||
impact = impact if impact else task.task_impact
|
||||
capacity_type = capacity_type if capacity_type else task.capacity_type
|
||||
instance_most_capacity = None
|
||||
most_remaining_capacity = -1
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
would_be_remaining = i.remaining_capacity - impact
|
||||
# hybrid nodes _always_ control their own tasks
|
||||
if add_hybrid_control_cost and i.node_type == 'hybrid':
|
||||
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
most_remaining_capacity = would_be_remaining
|
||||
return instance_most_capacity
|
||||
|
||||
@staticmethod
|
||||
def find_largest_idle_instance(instances, capacity_type='execution'):
|
||||
largest_instance = None
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
if i.jobs_running == 0:
|
||||
if largest_instance is None:
|
||||
largest_instance = i
|
||||
elif i.capacity > largest_instance.capacity:
|
||||
largest_instance = i
|
||||
return largest_instance
|
||||
|
||||
def set_default_policy_fields(self):
|
||||
self.policy_instance_list = []
|
||||
self.policy_instance_minimum = 0
|
||||
|
||||
@@ -985,18 +985,11 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
default=None,
|
||||
null=True,
|
||||
)
|
||||
scm_last_revision = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
update_on_project_update = models.BooleanField(
|
||||
default=False,
|
||||
)
|
||||
|
||||
update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
)
|
||||
|
||||
update_cache_timeout = models.PositiveIntegerField(
|
||||
default=0,
|
||||
)
|
||||
@@ -1034,14 +1027,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.name = 'inventory source (%s)' % replace_text
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
# Reset revision if SCM source has changed parameters
|
||||
if self.source == 'scm' and not is_new_instance:
|
||||
before_is = self.__class__.objects.get(pk=self.pk)
|
||||
if before_is.source_path != self.source_path or before_is.source_project_id != self.source_project_id:
|
||||
# Reset the scm_revision if file changed to force update
|
||||
self.scm_last_revision = ''
|
||||
if 'scm_last_revision' not in update_fields:
|
||||
update_fields.append('scm_last_revision')
|
||||
|
||||
# Do the actual save.
|
||||
super(InventorySource, self).save(*args, **kwargs)
|
||||
@@ -1050,10 +1035,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
if replace_text in self.name:
|
||||
self.name = self.name.replace(replace_text, str(self.pk))
|
||||
super(InventorySource, self).save(update_fields=['name'])
|
||||
if self.source == 'scm' and is_new_instance and self.update_on_project_update:
|
||||
# Schedule a new Project update if one is not already queued
|
||||
if self.source_project and not self.source_project.project_updates.filter(status__in=['new', 'pending', 'waiting']).exists():
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields()
|
||||
@@ -1143,25 +1124,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
)
|
||||
return dict(error=list(error_notification_templates), started=list(started_notification_templates), success=list(success_notification_templates))
|
||||
|
||||
def clean_update_on_project_update(self):
|
||||
if (
|
||||
self.update_on_project_update is True
|
||||
and self.source == 'scm'
|
||||
and InventorySource.objects.filter(Q(inventory=self.inventory, update_on_project_update=True, source='scm') & ~Q(id=self.id)).exists()
|
||||
):
|
||||
raise ValidationError(_("More than one SCM-based inventory source with update on project update per-inventory not allowed."))
|
||||
return self.update_on_project_update
|
||||
|
||||
def clean_update_on_launch(self):
|
||||
if self.update_on_project_update is True and self.source == 'scm' and self.update_on_launch is True:
|
||||
raise ValidationError(
|
||||
_(
|
||||
"Cannot update SCM-based inventory source on launch if set to update on project update. "
|
||||
"Instead, configure the corresponding source project to update on launch."
|
||||
)
|
||||
)
|
||||
return self.update_on_launch
|
||||
|
||||
def clean_source_path(self):
|
||||
if self.source != 'scm' and self.source_path:
|
||||
raise ValidationError(_("Cannot set source_path if not SCM type."))
|
||||
@@ -1297,13 +1259,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res:
|
||||
if self.launch_type != 'scm' and self.source_project_update:
|
||||
self.source_project_update.cancel(job_explanation=job_explanation)
|
||||
return res
|
||||
|
||||
|
||||
class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
class Meta:
|
||||
|
||||
@@ -130,8 +130,7 @@ class JobOptions(BaseModel):
|
||||
)
|
||||
)
|
||||
)
|
||||
job_tags = models.CharField(
|
||||
max_length=1024,
|
||||
job_tags = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
@@ -744,6 +743,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
return "$hidden due to Ansible no_log flag$"
|
||||
return artifacts
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""Return unified job artifacts (from set_stats) to pass downstream in workflows"""
|
||||
if isinstance(self.artifacts, dict):
|
||||
return self.artifacts
|
||||
return {}
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@@ -407,41 +407,54 @@ class TaskManagerUnifiedJobMixin(models.Model):
|
||||
def get_jobs_fail_chain(self):
|
||||
return []
|
||||
|
||||
def dependent_jobs_finished(self):
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
return [self.project_update] if self.project_update else []
|
||||
|
||||
def dependent_jobs_finished(self):
|
||||
for j in self.dependent_jobs.all():
|
||||
if j.status in ['pending', 'waiting', 'running']:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
return list(self.dependent_jobs.all())
|
||||
|
||||
|
||||
class TaskManagerProjectUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
# project update can be a dependency of an inventory update, in which
|
||||
# case we need to fail the job that may have spawned the inventory
|
||||
# update.
|
||||
# The inventory update will fail, but since it is not running it will
|
||||
# not cascade fail to the job from the errback logic in apply_async. As
|
||||
# such we should capture it here.
|
||||
blocked_jobs = list(self.unifiedjob_blocked_jobs.all().prefetch_related("unifiedjob_blocked_jobs"))
|
||||
other_tasks = []
|
||||
for b in blocked_jobs:
|
||||
other_tasks += list(b.unifiedjob_blocked_jobs.all())
|
||||
return blocked_jobs + other_tasks
|
||||
|
||||
|
||||
class TaskManagerInventoryUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
blocked_jobs = list(self.unifiedjob_blocked_jobs.all())
|
||||
other_updates = []
|
||||
if blocked_jobs:
|
||||
# blocked_jobs[0] is just a reference to a job that depends on this
|
||||
# inventory update.
|
||||
# We can look at the dependencies of this blocked job to find other
|
||||
# inventory sources that are safe to fail.
|
||||
# Since the dependencies could also include project updates,
|
||||
# we need to check for type.
|
||||
for dep in blocked_jobs[0].dependent_jobs.all():
|
||||
if type(dep) is type(self) and dep.id != self.id:
|
||||
other_updates.append(dep)
|
||||
return blocked_jobs + other_updates
|
||||
|
||||
|
||||
class ExecutionEnvironmentMixin(models.Model):
|
||||
class Meta:
|
||||
|
||||
@@ -408,6 +408,7 @@ class JobNotificationMixin(object):
|
||||
'inventory': 'Stub Inventory',
|
||||
'id': 42,
|
||||
'hosts': {},
|
||||
'extra_vars': {},
|
||||
'friendly_name': 'Job',
|
||||
'finished': False,
|
||||
'credential': 'Stub credential',
|
||||
@@ -421,21 +422,8 @@ class JobNotificationMixin(object):
|
||||
The context will contain allowed content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_ALLOWED_LIST the job's friendly name,
|
||||
and a url to the job run."""
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
try:
|
||||
has_event_property = any([f for f in self.event_class._meta.fields if f.name == 'event'])
|
||||
except NotImplementedError:
|
||||
has_event_property = False
|
||||
if has_event_property:
|
||||
qs = self.get_event_queryset()
|
||||
if qs:
|
||||
event = qs.only('event_data').filter(event='playbook_on_stats').first()
|
||||
if event:
|
||||
summary = event.get_host_status_counts()
|
||||
job_context['host_status_counts'] = summary
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job': {'host_status_counts': self.host_status_counts},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), ensure_ascii=False, indent=4),
|
||||
|
||||
@@ -114,13 +114,6 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(organization=self)
|
||||
|
||||
def create_default_galaxy_credential(self):
|
||||
from awx.main.models import Credential
|
||||
|
||||
public_galaxy_credential = Credential.objects.filter(managed=True, name='Ansible Galaxy').first()
|
||||
if public_galaxy_credential is not None and public_galaxy_credential not in self.galaxy_credentials.all():
|
||||
self.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialMembership(models.Model):
|
||||
|
||||
|
||||
@@ -354,7 +354,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
skip_update = bool(kwargs.pop('skip_update', False))
|
||||
self._skip_update = bool(kwargs.pop('skip_update', False))
|
||||
# Create auto-generated local path if project uses SCM.
|
||||
if self.pk and self.scm_type and not self.local_path.startswith('_'):
|
||||
slug_name = slugify(str(self.name)).replace(u'-', u'_')
|
||||
@@ -372,14 +372,16 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
self.save(update_fields=update_fields, skip_update=self._skip_update)
|
||||
# If we just created a new project with SCM, start the initial update.
|
||||
# also update if certain fields have changed
|
||||
relevant_change = any(pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None) for fd_name in self.FIELDS_TRIGGER_UPDATE)
|
||||
if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
|
||||
if (relevant_change or new_instance) and (not self._skip_update) and self.scm_type:
|
||||
self.update()
|
||||
|
||||
def _get_current_status(self):
|
||||
if getattr(self, '_skip_update', False):
|
||||
return self.status
|
||||
if self.scm_type:
|
||||
if self.current_job and self.current_job.status:
|
||||
return self.current_job.status
|
||||
|
||||
@@ -81,32 +81,41 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
dtend = models.DateTimeField(
|
||||
null=True, default=None, editable=False, help_text=_("The last occurrence of the schedule occurs before this time, aftewards the schedule expires.")
|
||||
)
|
||||
rrule = models.CharField(max_length=255, help_text=_("A value representing the schedules iCal recurrence rule."))
|
||||
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
|
||||
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
|
||||
|
||||
@classmethod
|
||||
def get_zoneinfo(self):
|
||||
def get_zoneinfo(cls):
|
||||
return sorted(get_zonefile_instance().zones)
|
||||
|
||||
@classmethod
|
||||
def get_zoneinfo_links(cls):
|
||||
return_val = {}
|
||||
zone_instance = get_zonefile_instance()
|
||||
for zone_name in zone_instance.zones:
|
||||
if str(zone_name) != str(zone_instance.zones[zone_name]._filename):
|
||||
return_val[zone_name] = zone_instance.zones[zone_name]._filename
|
||||
return return_val
|
||||
|
||||
@property
|
||||
def timezone(self):
|
||||
utc = tzutc()
|
||||
# All rules in a ruleset will have the same dtstart so we can just take the first rule
|
||||
tzinfo = Schedule.rrulestr(self.rrule)._rrule[0]._dtstart.tzinfo
|
||||
if tzinfo is utc:
|
||||
return 'UTC'
|
||||
all_zones = Schedule.get_zoneinfo()
|
||||
all_zones.sort(key=lambda x: -len(x))
|
||||
for r in Schedule.rrulestr(self.rrule)._rrule:
|
||||
if r._dtstart:
|
||||
tzinfo = r._dtstart.tzinfo
|
||||
if tzinfo is utc:
|
||||
return 'UTC'
|
||||
fname = getattr(tzinfo, '_filename', None)
|
||||
if fname:
|
||||
for zone in all_zones:
|
||||
if fname.endswith(zone):
|
||||
return zone
|
||||
fname = getattr(tzinfo, '_filename', None)
|
||||
if fname:
|
||||
for zone in all_zones:
|
||||
if fname.endswith(zone):
|
||||
return zone
|
||||
logger.warning('Could not detect valid zoneinfo for {}'.format(self.rrule))
|
||||
return ''
|
||||
|
||||
@property
|
||||
# TODO: How would we handle multiple until parameters? The UI is currently using this on the edit screen of a schedule
|
||||
def until(self):
|
||||
# The UNTIL= datestamp (if any) coerced from UTC to the local naive time
|
||||
# of the DTSTART
|
||||
@@ -134,34 +143,48 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
# timezone (America/New_York), and so we'll coerce to UTC _for you_
|
||||
# automatically.
|
||||
#
|
||||
if 'until=' in rrule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rrule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
# Find the UNTIL=N part of the string
|
||||
# naive_until = UNTIL=20200601T170000
|
||||
naive_until = match_until.group('until')
|
||||
# Find the DTSTART rule or raise an error, its usually the first rule but that is not strictly enforced
|
||||
start_date_rule = re.sub('^.*(DTSTART[^\s]+)\s.*$', r'\1', rrule)
|
||||
if not start_date_rule:
|
||||
raise ValueError('A DTSTART field needs to be in the rrule')
|
||||
|
||||
# What is the DTSTART timezone for:
|
||||
# DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000Z
|
||||
# local_tz = tzfile('/usr/share/zoneinfo/America/New_York')
|
||||
local_tz = dateutil.rrule.rrulestr(rrule.replace(naive_until, naive_until + 'Z'), tzinfos=UTC_TIMEZONES)._dtstart.tzinfo
|
||||
rules = re.split(r'\s+', rrule)
|
||||
for index in range(0, len(rules)):
|
||||
rule = rules[index]
|
||||
if 'until=' in rule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
# Make a datetime object with tzinfo=<the DTSTART timezone>
|
||||
# localized_until = datetime.datetime(2020, 6, 1, 17, 0, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
|
||||
localized_until = make_aware(datetime.datetime.strptime(re.sub('^UNTIL=', '', naive_until), "%Y%m%dT%H%M%S"), local_tz)
|
||||
# Find the UNTIL=N part of the string
|
||||
# naive_until = UNTIL=20200601T170000
|
||||
naive_until = match_until.group('until')
|
||||
|
||||
# Coerce the datetime to UTC and format it as a string w/ Zulu format
|
||||
# utc_until = UNTIL=20200601T220000Z
|
||||
utc_until = 'UNTIL=' + localized_until.astimezone(pytz.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
# What is the DTSTART timezone for:
|
||||
# DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000Z
|
||||
# local_tz = tzfile('/usr/share/zoneinfo/America/New_York')
|
||||
# We are going to construct a 'dummy' rule for parsing which will include the DTSTART and the rest of the rule
|
||||
temp_rule = "{} {}".format(start_date_rule, rule.replace(naive_until, naive_until + 'Z'))
|
||||
# If the rule is an EX rule we have to add an RRULE to it because an EX rule alone will not manifest into a ruleset
|
||||
if rule.lower().startswith('ex'):
|
||||
temp_rule = "{} {}".format(temp_rule, 'RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000Z')
|
||||
local_tz = dateutil.rrule.rrulestr(temp_rule, tzinfos=UTC_TIMEZONES, **{'forceset': True})._rrule[0]._dtstart.tzinfo
|
||||
|
||||
# rrule was: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
# rrule is now: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T220000Z
|
||||
rrule = rrule.replace(naive_until, utc_until)
|
||||
return rrule
|
||||
# Make a datetime object with tzinfo=<the DTSTART timezone>
|
||||
# localized_until = datetime.datetime(2020, 6, 1, 17, 0, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
|
||||
localized_until = make_aware(datetime.datetime.strptime(re.sub('^UNTIL=', '', naive_until), "%Y%m%dT%H%M%S"), local_tz)
|
||||
|
||||
# Coerce the datetime to UTC and format it as a string w/ Zulu format
|
||||
# utc_until = UNTIL=20200601T220000Z
|
||||
utc_until = 'UNTIL=' + localized_until.astimezone(pytz.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
|
||||
# rule was: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
# rule is now: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T220000Z
|
||||
rules[index] = rule.replace(naive_until, utc_until)
|
||||
return " ".join(rules)
|
||||
|
||||
@classmethod
|
||||
def rrulestr(cls, rrule, fast_forward=True, **kwargs):
|
||||
@@ -176,20 +199,28 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
if r._dtstart and r._dtstart.tzinfo is None:
|
||||
raise ValueError('A valid TZID must be provided (e.g., America/New_York)')
|
||||
|
||||
if fast_forward and ('MINUTELY' in rrule or 'HOURLY' in rrule) and 'COUNT=' not in rrule:
|
||||
# Fast forward is a way for us to limit the number of events in the rruleset
|
||||
# If we are fastforwading and we don't have a count limited rule that is minutely or hourley
|
||||
# We will modify the start date of the rule to last week to prevent a large number of entries
|
||||
if fast_forward:
|
||||
try:
|
||||
# All rules in a ruleset will have the same dtstart value
|
||||
# so lets compare the first event to now to see if its > 7 days old
|
||||
first_event = x[0]
|
||||
# If the first event was over a week ago...
|
||||
if (now() - first_event).days > 7:
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
dtstart = x._rrule[0]._dtstart.strftime(':%Y%m%dT')
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime(':%Y%m%dT')
|
||||
new_rrule = rrule.replace(dtstart, new_start)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
for rule in x._rrule:
|
||||
# If any rule has a minutely or hourly rule without a count...
|
||||
if rule._freq in [dateutil.rrule.MINUTELY, dateutil.rrule.HOURLY] and not rule._count:
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime('%Y%m%d')
|
||||
# Now we want to repalce the DTSTART:<value>T with the new date (which includes the T)
|
||||
new_rrule = re.sub('(DTSTART[^:]*):[^T]+T', r'\1:{0}T'.format(new_start), rrule)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
return x
|
||||
|
||||
def __str__(self):
|
||||
@@ -206,6 +237,22 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
job_kwargs['_eager_fields'] = {'launch_type': 'scheduled', 'schedule': self}
|
||||
return job_kwargs
|
||||
|
||||
def get_end_date(ruleset):
|
||||
# if we have a complex ruleset with a lot of options getting the last index of the ruleset can take some time
|
||||
# And a ruleset without a count/until can come back as datetime.datetime(9999, 12, 31, 15, 0, tzinfo=tzfile('US/Eastern'))
|
||||
# So we are going to do a quick scan to make sure we would have an end date
|
||||
for a_rule in ruleset._rrule:
|
||||
# if this rule does not have until or count in it then we have no end date
|
||||
if not a_rule._until and not a_rule._count:
|
||||
return None
|
||||
|
||||
# If we made it this far we should have an end date and can ask the ruleset what the last date is
|
||||
# However, if the until/count is before dtstart we will get an IndexError when trying to get [-1]
|
||||
try:
|
||||
return ruleset[-1].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def update_computed_fields_no_save(self):
|
||||
affects_fields = ['next_run', 'dtstart', 'dtend']
|
||||
starting_values = {}
|
||||
@@ -229,12 +276,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
self.dtstart = future_rs[0].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtstart = None
|
||||
self.dtend = None
|
||||
if 'until' in self.rrule.lower() or 'count' in self.rrule.lower():
|
||||
try:
|
||||
self.dtend = future_rs[-1].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtend = None
|
||||
self.dtend = Schedule.get_end_date(future_rs)
|
||||
|
||||
changed = any(getattr(self, field_name) != starting_values[field_name] for field_name in affects_fields)
|
||||
return changed
|
||||
|
||||
@@ -533,7 +533,7 @@ class UnifiedJob(
|
||||
('workflow', _('Workflow')), # Job was started from a workflow job.
|
||||
('webhook', _('Webhook')), # Job was started from a webhook event.
|
||||
('sync', _('Sync')), # Job was started from a project sync.
|
||||
('scm', _('SCM Update')), # Job was created as an Inventory SCM sync.
|
||||
('scm', _('SCM Update')), # (deprecated) Job was created as an Inventory SCM sync.
|
||||
]
|
||||
|
||||
PASSWORD_FIELDS = ('start_args',)
|
||||
@@ -575,7 +575,8 @@ class UnifiedJob(
|
||||
dependent_jobs = models.ManyToManyField(
|
||||
'self',
|
||||
editable=False,
|
||||
related_name='%(class)s_blocked_jobs+',
|
||||
related_name='%(class)s_blocked_jobs',
|
||||
symmetrical=False,
|
||||
)
|
||||
execution_node = models.TextField(
|
||||
blank=True,
|
||||
@@ -717,6 +718,13 @@ class UnifiedJob(
|
||||
editable=False,
|
||||
help_text=_("The version of Ansible Core installed in the execution environment."),
|
||||
)
|
||||
host_status_counts = models.JSONField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("Playbook stats from the Ansible playbook_on_stats event."),
|
||||
)
|
||||
work_unit_id = models.CharField(
|
||||
max_length=255, blank=True, default=None, editable=False, null=True, help_text=_("The Receptor work unit ID associated with this job.")
|
||||
)
|
||||
@@ -1196,6 +1204,10 @@ class UnifiedJob(
|
||||
pass
|
||||
return None
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""Return unified job artifacts (from set_stats) to pass downstream in workflows"""
|
||||
return {}
|
||||
|
||||
def get_passwords_needed_to_start(self):
|
||||
return []
|
||||
|
||||
|
||||
@@ -318,8 +318,8 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
for parent_node in self.get_parent_nodes():
|
||||
is_root_node = False
|
||||
aa_dict.update(parent_node.ancestor_artifacts)
|
||||
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
|
||||
aa_dict.update(parent_node.job.artifacts)
|
||||
if parent_node.job:
|
||||
aa_dict.update(parent_node.job.get_effective_artifacts(parents_set=set([self.workflow_job_id])))
|
||||
if aa_dict and not is_root_node:
|
||||
self.ancestor_artifacts = aa_dict
|
||||
self.save(update_fields=['ancestor_artifacts'])
|
||||
@@ -659,6 +659,13 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
node_job_description = 'job #{0}, "{1}", which finished with status {2}.'.format(node.job.id, node.job.name, node.job.status)
|
||||
str_arr.append("- node #{0} spawns {1}".format(node.id, node_job_description))
|
||||
result['body'] = '\n'.join(str_arr)
|
||||
result.update(
|
||||
dict(
|
||||
inventory=self.inventory.name if self.inventory else None,
|
||||
limit=self.limit,
|
||||
extra_vars=self.display_extra_vars(),
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
@property
|
||||
@@ -682,6 +689,27 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
wj = wj.get_workflow_job()
|
||||
return ancestors
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""
|
||||
For downstream jobs of a workflow nested inside of a workflow,
|
||||
we send aggregated artifacts from the nodes inside of the nested workflow
|
||||
"""
|
||||
artifacts = {}
|
||||
job_queryset = (
|
||||
UnifiedJob.objects.filter(unified_job_node__workflow_job=self)
|
||||
.defer('job_args', 'job_cwd', 'start_args', 'result_traceback')
|
||||
.order_by('finished', 'id')
|
||||
.filter(status__in=['successful', 'failed'])
|
||||
.iterator()
|
||||
)
|
||||
parents_set = kwargs.get('parents_set', set())
|
||||
new_parents_set = parents_set | {self.id}
|
||||
for job in job_queryset:
|
||||
if job.id in parents_set:
|
||||
continue
|
||||
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
|
||||
return artifacts
|
||||
|
||||
def get_notification_templates(self):
|
||||
return self.workflow_job_template.notification_templates
|
||||
|
||||
@@ -885,3 +913,12 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
@property
|
||||
def workflow_job(self):
|
||||
return self.unified_job_node.workflow_job
|
||||
|
||||
def notification_data(self):
|
||||
result = super(WorkflowApproval, self).notification_data()
|
||||
result.update(
|
||||
dict(
|
||||
extra_vars=self.workflow_job.display_extra_vars(),
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
@@ -54,8 +54,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.grafana_key = grafana_key
|
||||
self.dashboardId = dashboardId
|
||||
self.panelId = panelId
|
||||
self.dashboardId = int(dashboardId) if dashboardId is not None else None
|
||||
self.panelId = int(panelId) if panelId is not None else None
|
||||
self.annotation_tags = annotation_tags if annotation_tags is not None else []
|
||||
self.grafana_no_verify_ssl = grafana_no_verify_ssl
|
||||
self.isRegion = isRegion
|
||||
@@ -86,8 +86,10 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_str(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'], m.body['finished'])))
|
||||
grafana_data['isRegion'] = self.isRegion
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
grafana_data['panelId'] = self.panelId
|
||||
if self.dashboardId is not None:
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
if self.panelId is not None:
|
||||
grafana_data['panelId'] = self.panelId
|
||||
if self.annotation_tags:
|
||||
grafana_data['tags'] = self.annotation_tags
|
||||
grafana_data['text'] = m.subject
|
||||
|
||||
@@ -8,7 +8,6 @@ import redis
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
__all__ = ['CallbackQueueDispatcher']
|
||||
|
||||
@@ -28,7 +27,6 @@ class CallbackQueueDispatcher(object):
|
||||
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
|
||||
self.connection = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics()
|
||||
|
||||
def dispatch(self, obj):
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
|
||||
@@ -26,7 +26,7 @@ class DependencyGraph(object):
|
||||
# The reason for tracking both inventory and inventory sources:
|
||||
# Consider InvA, which has two sources, InvSource1, InvSource2.
|
||||
# JobB might depend on InvA, which launches two updates, one for each source.
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# INVENTORY_UPDATES, instead of having to check for both entries in
|
||||
# INVENTORY_SOURCE_UPDATES.
|
||||
self.data[self.INVENTORY_UPDATES] = {}
|
||||
|
||||
@@ -6,7 +6,9 @@ from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
from types import SimpleNamespace
|
||||
import time
|
||||
import sys
|
||||
import signal
|
||||
|
||||
# Django
|
||||
from django.db import transaction, connection
|
||||
@@ -19,7 +21,6 @@ from awx.main.dispatch.reaper import reap_job
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
Job,
|
||||
@@ -36,13 +37,28 @@ from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.utils.common import create_partition
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
def timeit(func):
|
||||
def inner(*args, **kwargs):
|
||||
t_now = time.perf_counter()
|
||||
result = func(*args, **kwargs)
|
||||
dur = time.perf_counter() - t_now
|
||||
args[0].subsystem_metrics.inc("task_manager_" + func.__name__ + "_seconds", dur)
|
||||
return result
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
class TaskManager:
|
||||
def __init__(self):
|
||||
"""
|
||||
@@ -54,47 +70,29 @@ class TaskManager:
|
||||
The NOOP case is short-circuit logic. If the task manager realizes that another instance
|
||||
of the task manager is already running, then it short-circuits and decides not to run.
|
||||
"""
|
||||
self.graph = dict()
|
||||
# start task limit indicates how many pending jobs can be started on this
|
||||
# .schedule() run. Starting jobs is expensive, and there is code in place to reap
|
||||
# the task manager after 5 minutes. At scale, the task manager can easily take more than
|
||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||
# ensures each task manager metric will be overridden when pipe_execute
|
||||
# is called later.
|
||||
for m in self.subsystem_metrics.METRICS:
|
||||
if m.startswith("task_manager"):
|
||||
self.subsystem_metrics.set(m, 0)
|
||||
|
||||
def after_lock_init(self):
|
||||
def after_lock_init(self, all_sorted_tasks):
|
||||
"""
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
"""
|
||||
instances = Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop')
|
||||
self.real_instances = {i.hostname: i for i in instances}
|
||||
self.controlplane_ig = None
|
||||
self.dependency_graph = DependencyGraph()
|
||||
|
||||
instances_partial = [
|
||||
SimpleNamespace(
|
||||
obj=instance,
|
||||
node_type=instance.node_type,
|
||||
remaining_capacity=instance.remaining_capacity,
|
||||
capacity=instance.capacity,
|
||||
jobs_running=instance.jobs_running,
|
||||
hostname=instance.hostname,
|
||||
)
|
||||
for instance in instances
|
||||
]
|
||||
|
||||
instances_by_hostname = {i.hostname: i for i in instances_partial}
|
||||
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
if rampart_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
self.controlplane_ig = rampart_group
|
||||
self.graph[rampart_group.name] = dict(
|
||||
instances=[
|
||||
instances_by_hostname[instance.hostname] for instance in rampart_group.instances.all() if instance.hostname in instances_by_hostname
|
||||
],
|
||||
)
|
||||
self.instances = TaskManagerInstances(all_sorted_tasks)
|
||||
self.instance_groups = TaskManagerInstanceGroups(instances_by_hostname=self.instances)
|
||||
self.controlplane_ig = self.instance_groups.controlplane_ig
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
@@ -104,13 +102,27 @@ class TaskManager:
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
blocked_by = task.dependent_jobs.first()
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@timeit
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
inventory_updates_qs = (
|
||||
@@ -136,6 +148,7 @@ class TaskManager:
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
|
||||
@timeit
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
if workflow_job.cancel_flag:
|
||||
@@ -235,14 +248,16 @@ class TaskManager:
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
status_changed = True
|
||||
if status_changed:
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
workflow_job.websocket_emit_status(workflow_job.status)
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
workflow_job.send_notification_templates('succeeded' if workflow_job.status == 'successful' else 'failed')
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
self.subsystem_metrics.inc("task_manager_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
@@ -275,10 +290,10 @@ class TaskManager:
|
||||
schedule_task_manager()
|
||||
# at this point we already have control/execution nodes selected for the following cases
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
task.instance_group = instance_group
|
||||
execution_node_msg = f' and execution node {task.execution_node}' if task.execution_node else ''
|
||||
logger.debug(
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {rampart_group.name}{execution_node_msg}.'
|
||||
f'Submitting job {task.log_format} controlled by {task.controller_node} to instance group {instance_group.name}{execution_node_msg}.'
|
||||
)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
@@ -302,12 +317,15 @@ class TaskManager:
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
@timeit
|
||||
def process_running_tasks(self, running_tasks):
|
||||
for task in running_tasks:
|
||||
self.dependency_graph.add_job(task)
|
||||
|
||||
def create_project_update(self, task):
|
||||
project_task = Project.objects.get(id=task.project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
def create_project_update(self, task, project_id=None):
|
||||
if project_id is None:
|
||||
project_id = task.project_id
|
||||
project_task = Project.objects.get(id=project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
|
||||
# Project created 1 seconds behind
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
@@ -327,14 +345,10 @@ class TaskManager:
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
return inventory_task
|
||||
|
||||
def capture_chain_failure_dependencies(self, task, dependencies):
|
||||
def add_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
for dep in dependencies:
|
||||
# Add task + all deps except self
|
||||
dep.dependent_jobs.add(*([task] + [d for d in dependencies if d != dep]))
|
||||
|
||||
def get_latest_inventory_update(self, inventory_source):
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||
if not latest_inventory_update.exists():
|
||||
@@ -360,8 +374,8 @@ class TaskManager:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_latest_project_update(self, job):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created")
|
||||
def get_latest_project_update(self, project_id):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=project_id, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
@@ -401,47 +415,73 @@ class TaskManager:
|
||||
return True
|
||||
return False
|
||||
|
||||
def gen_dep_for_job(self, task):
|
||||
created_dependencies = []
|
||||
dependencies = []
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task.project_id)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
latest_project_update = self.create_project_update(task)
|
||||
created_dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
# generator for inventory sources related to this task
|
||||
task_inv_sources = (invsrc for invsrc in self.all_inventory_sources if invsrc.inventory_id == task.inventory_id)
|
||||
for inventory_source in task_inv_sources:
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if dependencies:
|
||||
self.add_dependencies(task, dependencies)
|
||||
|
||||
return created_dependencies
|
||||
|
||||
def gen_dep_for_inventory_update(self, inventory_task):
|
||||
created_dependencies = []
|
||||
if inventory_task.source == "scm":
|
||||
invsrc = inventory_task.inventory_source
|
||||
if not invsrc.source_project.scm_update_on_launch:
|
||||
return created_dependencies
|
||||
|
||||
latest_src_project_update = self.get_latest_project_update(invsrc.source_project_id)
|
||||
if self.should_update_related_project(inventory_task, latest_src_project_update):
|
||||
latest_src_project_update = self.create_project_update(inventory_task, project_id=invsrc.source_project_id)
|
||||
created_dependencies.append(latest_src_project_update)
|
||||
self.add_dependencies(inventory_task, [latest_src_project_update])
|
||||
latest_src_project_update.scm_inventory_updates.add(inventory_task)
|
||||
return created_dependencies
|
||||
|
||||
@timeit
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
task.log_lifecycle("acknowledged")
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
if type(task) is Job:
|
||||
created_dependencies += self.gen_dep_for_job(task)
|
||||
elif type(task) is InventoryUpdate:
|
||||
created_dependencies += self.gen_dep_for_inventory_update(task)
|
||||
else:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
for inventory_source in [invsrc for invsrc in self.all_inventory_sources if invsrc.inventory == task.inventory]:
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
|
||||
UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
|
||||
return created_dependencies
|
||||
|
||||
@timeit
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = {wf.unified_job_template_id for wf in self.get_running_workflow_jobs()}
|
||||
tasks_to_update_job_explanation = []
|
||||
@@ -450,6 +490,7 @@ class TaskManager:
|
||||
break
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc("task_manager_tasks_blocked", 1)
|
||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||
if task.job_explanation != job_explanation:
|
||||
@@ -476,8 +517,8 @@ class TaskManager:
|
||||
control_impact = task.task_impact + settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
else:
|
||||
control_impact = settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
control_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
|
||||
task, self.graph[settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]['instances'], impact=control_impact, capacity_type='control'
|
||||
control_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, impact=control_impact, capacity_type='control'
|
||||
)
|
||||
if not control_instance:
|
||||
self.task_needs_capacity(task, tasks_to_update_job_explanation)
|
||||
@@ -489,33 +530,31 @@ class TaskManager:
|
||||
# All task.capacity_type == 'control' jobs should run on control plane, no need to loop over instance groups
|
||||
if task.capacity_type == 'control':
|
||||
task.execution_node = control_instance.hostname
|
||||
control_instance.remaining_capacity = max(0, control_instance.remaining_capacity - control_impact)
|
||||
control_instance.jobs_running += 1
|
||||
control_instance.consume_capacity(control_impact)
|
||||
self.dependency_graph.add_job(task)
|
||||
execution_instance = self.real_instances[control_instance.hostname]
|
||||
execution_instance = self.instances[control_instance.hostname].obj
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
self.start_task(task, self.controlplane_ig, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
continue
|
||||
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if rampart_group.is_container_group:
|
||||
control_instance.jobs_running += 1
|
||||
for instance_group in preferred_instance_groups:
|
||||
if instance_group.is_container_group:
|
||||
self.dependency_graph.add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), None)
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
# TODO: remove this after we have confidence that OCP control nodes are reporting node_type=control
|
||||
if settings.IS_K8S and task.capacity_type == 'execution':
|
||||
logger.debug("Skipping group {}, task cannot run on control plane".format(rampart_group.name))
|
||||
logger.debug("Skipping group {}, task cannot run on control plane".format(instance_group.name))
|
||||
continue
|
||||
# at this point we know the instance group is NOT a container group
|
||||
# because if it was, it would have started the task and broke out of the loop.
|
||||
execution_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(
|
||||
task, self.graph[rampart_group.name]['instances'], add_hybrid_control_cost=True
|
||||
) or InstanceGroup.find_largest_idle_instance(self.graph[rampart_group.name]['instances'], capacity_type=task.capacity_type)
|
||||
execution_instance = self.instance_groups.fit_task_to_most_remaining_capacity_instance(
|
||||
task, instance_group_name=instance_group.name, add_hybrid_control_cost=True
|
||||
) or self.instance_groups.find_largest_idle_instance(instance_group_name=instance_group.name, capacity_type=task.capacity_type)
|
||||
|
||||
if execution_instance:
|
||||
task.execution_node = execution_instance.hostname
|
||||
@@ -524,27 +563,24 @@ class TaskManager:
|
||||
control_instance = execution_instance
|
||||
task.controller_node = execution_instance.hostname
|
||||
|
||||
control_instance.remaining_capacity = max(0, control_instance.remaining_capacity - settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
control_instance.consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
task.log_lifecycle("controller_node_chosen")
|
||||
if control_instance != execution_instance:
|
||||
control_instance.jobs_running += 1
|
||||
execution_instance.remaining_capacity = max(0, execution_instance.remaining_capacity - task.task_impact)
|
||||
execution_instance.jobs_running += 1
|
||||
execution_instance.consume_capacity(task.task_impact)
|
||||
task.log_lifecycle("execution_node_chosen")
|
||||
logger.debug(
|
||||
"Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname, execution_instance.remaining_capacity
|
||||
task.log_format, instance_group.name, execution_instance.hostname, execution_instance.remaining_capacity
|
||||
)
|
||||
)
|
||||
execution_instance = self.real_instances[execution_instance.hostname]
|
||||
execution_instance = self.instances[execution_instance.hostname].obj
|
||||
self.dependency_graph.add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
self.start_task(task, instance_group, task.get_jobs_fail_chain(), execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug(
|
||||
"No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact
|
||||
instance_group.name, task.log_format, task.task_impact
|
||||
)
|
||||
)
|
||||
if not found_acceptable_queue:
|
||||
@@ -596,20 +632,27 @@ class TaskManager:
|
||||
|
||||
def process_tasks(self, all_sorted_tasks):
|
||||
running_tasks = [t for t in all_sorted_tasks if t.status in ['waiting', 'running']]
|
||||
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_running_processed", len(running_tasks))
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
deps_of_deps = self.generate_dependencies(dependencies)
|
||||
dependencies += deps_of_deps
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(dependencies))
|
||||
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(pending_tasks))
|
||||
|
||||
@timeit
|
||||
def _schedule(self):
|
||||
finished_wfjs = []
|
||||
all_sorted_tasks = self.get_tasks()
|
||||
|
||||
self.after_lock_init()
|
||||
self.after_lock_init(all_sorted_tasks)
|
||||
|
||||
if len(all_sorted_tasks) > 0:
|
||||
# TODO: Deal with
|
||||
@@ -640,6 +683,28 @@ class TaskManager:
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not settings.IS_TESTING():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc("task_manager_schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode("task_manager_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set("task_manager_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
|
||||
def record_aggregate_metrics_and_exit(self, *args):
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def schedule(self):
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
@@ -649,5 +714,8 @@ class TaskManager:
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
with task_manager_bulk_reschedule():
|
||||
# if sigterm due to timeout, still record metrics
|
||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||
self._schedule()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug("Finishing Scheduler")
|
||||
|
||||
123
awx/main/scheduler/task_manager_models.py
Normal file
123
awx/main/scheduler/task_manager_models.py
Normal file
@@ -0,0 +1,123 @@
|
||||
# Copyright (c) 2022 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
class TaskManagerInstance:
|
||||
"""A class representing minimal data the task manager needs to represent an Instance."""
|
||||
|
||||
def __init__(self, obj):
|
||||
self.obj = obj
|
||||
self.node_type = obj.node_type
|
||||
self.consumed_capacity = 0
|
||||
self.capacity = obj.capacity
|
||||
self.hostname = obj.hostname
|
||||
|
||||
def consume_capacity(self, impact):
|
||||
self.consumed_capacity += impact
|
||||
|
||||
@property
|
||||
def remaining_capacity(self):
|
||||
remaining = self.capacity - self.consumed_capacity
|
||||
if remaining < 0:
|
||||
return 0
|
||||
return remaining
|
||||
|
||||
|
||||
class TaskManagerInstances:
|
||||
def __init__(self, active_tasks, instances=None):
|
||||
self.instances_by_hostname = dict()
|
||||
if instances is None:
|
||||
instances = (
|
||||
Instance.objects.filter(hostname__isnull=False, enabled=True).exclude(node_type='hop').only('node_type', 'capacity', 'hostname', 'enabled')
|
||||
)
|
||||
for instance in instances:
|
||||
self.instances_by_hostname[instance.hostname] = TaskManagerInstance(instance)
|
||||
|
||||
# initialize remaining capacity based on currently waiting and running tasks
|
||||
for task in active_tasks:
|
||||
if task.status not in ['waiting', 'running']:
|
||||
continue
|
||||
control_instance = self.instances_by_hostname.get(task.controller_node, '')
|
||||
execution_instance = self.instances_by_hostname.get(task.execution_node, '')
|
||||
if execution_instance and execution_instance.node_type in ('hybrid', 'execution'):
|
||||
self.instances_by_hostname[task.execution_node].consume_capacity(task.task_impact)
|
||||
if control_instance and control_instance.node_type in ('hybrid', 'control'):
|
||||
self.instances_by_hostname[task.controller_node].consume_capacity(settings.AWX_CONTROL_NODE_TASK_IMPACT)
|
||||
|
||||
def __getitem__(self, hostname):
|
||||
return self.instances_by_hostname.get(hostname)
|
||||
|
||||
def __contains__(self, hostname):
|
||||
return hostname in self.instances_by_hostname
|
||||
|
||||
|
||||
class TaskManagerInstanceGroups:
|
||||
"""A class representing minimal data the task manager needs to represent an InstanceGroup."""
|
||||
|
||||
def __init__(self, instances_by_hostname=None, instance_groups=None, instance_groups_queryset=None):
|
||||
self.instance_groups = dict()
|
||||
self.controlplane_ig = None
|
||||
|
||||
if instance_groups is not None: # for testing
|
||||
self.instance_groups = instance_groups
|
||||
else:
|
||||
if instance_groups_queryset is None:
|
||||
instance_groups_queryset = InstanceGroup.objects.prefetch_related('instances').only('name', 'instances')
|
||||
for instance_group in instance_groups_queryset:
|
||||
if instance_group.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
self.controlplane_ig = instance_group
|
||||
self.instance_groups[instance_group.name] = dict(
|
||||
instances=[
|
||||
instances_by_hostname[instance.hostname] for instance in instance_group.instances.all() if instance.hostname in instances_by_hostname
|
||||
],
|
||||
)
|
||||
|
||||
def get_remaining_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.remaining_capacity for inst in instances)
|
||||
|
||||
def get_consumed_capacity(self, group_name):
|
||||
instances = self.instance_groups[group_name]['instances']
|
||||
return sum(inst.consumed_capacity for inst in instances)
|
||||
|
||||
def fit_task_to_most_remaining_capacity_instance(self, task, instance_group_name, impact=None, capacity_type=None, add_hybrid_control_cost=False):
|
||||
impact = impact if impact else task.task_impact
|
||||
capacity_type = capacity_type if capacity_type else task.capacity_type
|
||||
instance_most_capacity = None
|
||||
most_remaining_capacity = -1
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
would_be_remaining = i.remaining_capacity - impact
|
||||
# hybrid nodes _always_ control their own tasks
|
||||
if add_hybrid_control_cost and i.node_type == 'hybrid':
|
||||
would_be_remaining -= settings.AWX_CONTROL_NODE_TASK_IMPACT
|
||||
if would_be_remaining >= 0 and (instance_most_capacity is None or would_be_remaining > most_remaining_capacity):
|
||||
instance_most_capacity = i
|
||||
most_remaining_capacity = would_be_remaining
|
||||
return instance_most_capacity
|
||||
|
||||
def find_largest_idle_instance(self, instance_group_name, capacity_type='execution'):
|
||||
largest_instance = None
|
||||
instances = self.instance_groups[instance_group_name]['instances']
|
||||
for i in instances:
|
||||
if i.node_type not in (capacity_type, 'hybrid'):
|
||||
continue
|
||||
if (hasattr(i, 'jobs_running') and i.jobs_running == 0) or i.remaining_capacity == i.capacity:
|
||||
if largest_instance is None:
|
||||
largest_instance = i
|
||||
elif i.capacity > largest_instance.capacity:
|
||||
largest_instance = i
|
||||
return largest_instance
|
||||
@@ -409,7 +409,7 @@ def emit_activity_stream_change(instance):
|
||||
from awx.api.serializers import ActivityStreamSerializer
|
||||
|
||||
actor = None
|
||||
if instance.actor:
|
||||
if instance.actor_id:
|
||||
actor = instance.actor.username
|
||||
summary_fields = ActivityStreamSerializer(instance).get_summary_fields(instance)
|
||||
analytics_logger.info(
|
||||
|
||||
@@ -9,19 +9,19 @@ import stat
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
from django_guid import get_guid
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# AWX
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
|
||||
from awx.main.utils.update_model import update_model
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.tasks.signals import signal_callback
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.callback')
|
||||
|
||||
|
||||
class RunnerCallback:
|
||||
event_data_key = 'job_id'
|
||||
|
||||
def __init__(self, model=None):
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
@@ -33,10 +33,40 @@ class RunnerCallback:
|
||||
self.event_ct = 0
|
||||
self.model = model
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
||||
self.wrapup_event_dispatched = False
|
||||
self.extra_update_fields = {}
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
return update_model(self.model, pk, _attempt=0, _max_attempts=self.update_attempts, **updates)
|
||||
|
||||
@cached_property
|
||||
def wrapup_event_type(self):
|
||||
return self.instance.event_class.WRAPUP_EVENT
|
||||
|
||||
@cached_property
|
||||
def event_data_key(self):
|
||||
return self.instance.event_class.JOB_REFERENCE
|
||||
|
||||
def delay_update(self, skip_if_already_set=False, **kwargs):
|
||||
"""Stash fields that should be saved along with the job status change"""
|
||||
for key, value in kwargs.items():
|
||||
if key in self.extra_update_fields and skip_if_already_set:
|
||||
continue
|
||||
elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):
|
||||
if str(value) in self.extra_update_fields.get(key, ''):
|
||||
continue # if already set, avoid duplicating messages
|
||||
# In the case of these fields, we do not want to lose any prior information, so combine values
|
||||
self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)])
|
||||
else:
|
||||
self.extra_update_fields[key] = value
|
||||
|
||||
def get_delayed_update_fields(self):
|
||||
"""Return finalized dict of all fields that should be saved along with the job status change"""
|
||||
self.extra_update_fields['emitted_events'] = self.event_ct
|
||||
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
|
||||
self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
|
||||
return self.extra_update_fields
|
||||
|
||||
def event_handler(self, event_data):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
@@ -130,6 +160,9 @@ class RunnerCallback:
|
||||
elif self.recent_event_timings.maxlen:
|
||||
self.recent_event_timings.append(time.time())
|
||||
|
||||
if event_data.get('event', '') == self.wrapup_event_type:
|
||||
self.wrapup_event_dispatched = True
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -138,8 +171,7 @@ class RunnerCallback:
|
||||
Handle artifacts
|
||||
'''
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
self.delay_update(artifacts=event_data['event_data']['artifact_data'])
|
||||
|
||||
return False
|
||||
|
||||
@@ -148,7 +180,13 @@ class RunnerCallback:
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
"""
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
if signal_callback():
|
||||
return True
|
||||
try:
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
except Exception:
|
||||
logger.exception(f'Encountered error during cancel check for {unified_job_id}, canceling now')
|
||||
return True
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
@@ -170,6 +208,8 @@ class RunnerCallback:
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
if self.wrapup_event_type == 'EOF':
|
||||
self.wrapup_event_dispatched = True
|
||||
|
||||
def status_handler(self, status_data, runner_config):
|
||||
"""
|
||||
@@ -205,16 +245,10 @@ class RunnerCallback:
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
self.delay_update(result_traceback=result_traceback)
|
||||
|
||||
|
||||
class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'project_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForProjectUpdate, self).__init__(*args, **kwargs)
|
||||
self.playbook_new_revision = None
|
||||
@@ -231,9 +265,6 @@ class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||
|
||||
|
||||
class RunnerCallbackForInventoryUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'inventory_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForInventoryUpdate, self).__init__(*args, **kwargs)
|
||||
self.end_line = 0
|
||||
@@ -245,9 +276,6 @@ class RunnerCallbackForInventoryUpdate(RunnerCallback):
|
||||
|
||||
|
||||
class RunnerCallbackForAdHocCommand(RunnerCallback):
|
||||
|
||||
event_data_key = 'ad_hoc_command_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForAdHocCommand, self).__init__(*args, **kwargs)
|
||||
self.host_map = {}
|
||||
@@ -255,4 +283,4 @@ class RunnerCallbackForAdHocCommand(RunnerCallback):
|
||||
|
||||
class RunnerCallbackForSystemJob(RunnerCallback):
|
||||
|
||||
event_data_key = 'system_job_id'
|
||||
pass
|
||||
|
||||
@@ -17,10 +17,8 @@ import time
|
||||
import urllib.parse as urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
|
||||
|
||||
# Runner
|
||||
@@ -32,7 +30,6 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
|
||||
# AWX
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.constants import (
|
||||
@@ -65,6 +62,7 @@ from awx.main.tasks.callback import (
|
||||
RunnerCallbackForProjectUpdate,
|
||||
RunnerCallbackForSystemJob,
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
@@ -78,7 +76,7 @@ from awx.main.utils.common import (
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications, update_smart_memberships_for_inventory, update_inventory_computed_fields
|
||||
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields
|
||||
from awx.main.utils.update_model import update_model
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
@@ -119,6 +117,25 @@ class BaseTask(object):
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
return update_model(self.model, pk, _attempt=0, _max_attempts=self.update_attempts, **updates)
|
||||
|
||||
def write_private_data_file(self, private_data_dir, file_name, data, sub_dir=None, file_permissions=0o600):
|
||||
base_path = private_data_dir
|
||||
if sub_dir:
|
||||
base_path = os.path.join(private_data_dir, sub_dir)
|
||||
os.makedirs(base_path, mode=0o700, exist_ok=True)
|
||||
|
||||
# If we got a file name create it, otherwise we want a temp file
|
||||
if file_name:
|
||||
file_path = os.path.join(base_path, file_name)
|
||||
else:
|
||||
handle, file_path = tempfile.mkstemp(dir=base_path)
|
||||
os.close(handle)
|
||||
|
||||
file = Path(file_path)
|
||||
file.touch(mode=file_permissions, exist_ok=True)
|
||||
with open(file_path, 'w') as f:
|
||||
f.write(data)
|
||||
return file_path
|
||||
|
||||
def get_path_to(self, *args):
|
||||
"""
|
||||
Return absolute path relative to this file.
|
||||
@@ -222,6 +239,7 @@ class BaseTask(object):
|
||||
"""
|
||||
private_data = self.build_private_data(instance, private_data_dir)
|
||||
private_data_files = {'credentials': {}}
|
||||
ssh_key_data = None
|
||||
if private_data is not None:
|
||||
for credential, data in private_data.get('credentials', {}).items():
|
||||
# OpenSSH formatted keys must have a trailing newline to be
|
||||
@@ -231,34 +249,15 @@ class BaseTask(object):
|
||||
# For credentials used with ssh-add, write to a named pipe which
|
||||
# will be read then closed, instead of leaving the SSH key on disk.
|
||||
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
|
||||
try:
|
||||
os.mkdir(os.path.join(private_data_dir, 'env'))
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
path = os.path.join(private_data_dir, 'env', 'ssh_key')
|
||||
ansible_runner.utils.open_fifo_write(path, data.encode())
|
||||
private_data_files['credentials']['ssh'] = path
|
||||
ssh_key_data = data
|
||||
# Ansible network modules do not yet support ssh-agent.
|
||||
# Instead, ssh private key file is explicitly passed via an
|
||||
# env variable.
|
||||
else:
|
||||
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||
f = os.fdopen(handle, 'w')
|
||||
f.write(data)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
private_data_files['credentials'][credential] = path
|
||||
private_data_files['credentials'][credential] = self.write_private_data_file(private_data_dir, None, data, sub_dir='env')
|
||||
for credential, data in private_data.get('certificates', {}).items():
|
||||
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
|
||||
if not os.path.exists(artifact_dir):
|
||||
os.makedirs(artifact_dir, mode=0o700)
|
||||
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
return private_data_files
|
||||
self.write_private_data_file(private_data_dir, 'ssh_key_data-cert.pub', data, sub_dir=os.path.join('artifacts', str(self.instance.id)))
|
||||
return private_data_files, ssh_key_data
|
||||
|
||||
def build_passwords(self, instance, runtime_passwords):
|
||||
"""
|
||||
@@ -276,23 +275,11 @@ class BaseTask(object):
|
||||
"""
|
||||
|
||||
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
|
||||
env_path = os.path.join(private_data_dir, 'env')
|
||||
try:
|
||||
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
path = os.path.join(env_path, 'extravars')
|
||||
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
|
||||
f = os.fdopen(handle, 'w')
|
||||
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
|
||||
f.write(yaml.safe_dump(vars))
|
||||
content = yaml.safe_dump(vars)
|
||||
else:
|
||||
f.write(safe_dump(vars, safe_dict))
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
content = safe_dump(vars, safe_dict)
|
||||
return self.write_private_data_file(private_data_dir, 'extravars', content, sub_dir='env')
|
||||
|
||||
def add_awx_venv(self, env):
|
||||
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
|
||||
@@ -330,32 +317,14 @@ class BaseTask(object):
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
json_data = json.dumps(script_data)
|
||||
path = os.path.join(private_data_dir, 'inventory')
|
||||
fn = os.path.join(path, 'hosts')
|
||||
with open(fn, 'w') as f:
|
||||
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
|
||||
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
|
||||
return fn
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
|
||||
def write_args_file(self, private_data_dir, args):
|
||||
env_path = os.path.join(private_data_dir, 'env')
|
||||
try:
|
||||
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
path = os.path.join(env_path, 'cmdline')
|
||||
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
|
||||
f = os.fdopen(handle, 'w')
|
||||
f.write(ansible_runner.utils.args2cmdline(*args))
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
return self.write_private_data_file(private_data_dir, 'cmdline', ansible_runner.utils.args2cmdline(*args), sub_dir='env')
|
||||
|
||||
def build_credentials_list(self, instance):
|
||||
return []
|
||||
@@ -424,6 +393,7 @@ class BaseTask(object):
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
"""
|
||||
Run the job/task and capture its output.
|
||||
@@ -440,7 +410,6 @@ class BaseTask(object):
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
extra_update_fields = {}
|
||||
fact_modification_times = {}
|
||||
self.runner_callback.event_ct = 0
|
||||
|
||||
@@ -456,7 +425,7 @@ class BaseTask(object):
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
self.instance.log_lifecycle("preparing_playbook")
|
||||
if self.instance.cancel_flag:
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||
if self.instance.status != 'running':
|
||||
# Stop the task chain and prevent starting the job if it has
|
||||
@@ -477,7 +446,7 @@ class BaseTask(object):
|
||||
)
|
||||
|
||||
# May have to serialize the value
|
||||
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
|
||||
private_data_files, ssh_key_data = self.build_private_data_files(self.instance, private_data_dir)
|
||||
passwords = self.build_passwords(self.instance, kwargs)
|
||||
self.build_extra_vars_file(self.instance, private_data_dir)
|
||||
args = self.build_args(self.instance, private_data_dir, passwords)
|
||||
@@ -512,17 +481,12 @@ class BaseTask(object):
|
||||
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
|
||||
'inventory': self.build_inventory(self.instance, private_data_dir),
|
||||
'passwords': expect_passwords,
|
||||
'suppress_env_files': getattr(settings, 'AWX_RUNNER_OMIT_ENV_FILES', True),
|
||||
'envvars': env,
|
||||
'settings': {
|
||||
'job_timeout': self.get_instance_timeout(self.instance),
|
||||
'suppress_ansible_output': True,
|
||||
'suppress_output_file': True,
|
||||
},
|
||||
}
|
||||
|
||||
idle_timeout = getattr(settings, 'DEFAULT_JOB_IDLE_TIMEOUT', 0)
|
||||
if idle_timeout > 0:
|
||||
params['settings']['idle_timeout'] = idle_timeout
|
||||
if ssh_key_data is not None:
|
||||
params['ssh_key'] = ssh_key_data
|
||||
|
||||
if isinstance(self.instance, AdHocCommand):
|
||||
params['module'] = self.build_module_name(self.instance)
|
||||
@@ -545,6 +509,19 @@ class BaseTask(object):
|
||||
if not params[v]:
|
||||
del params[v]
|
||||
|
||||
runner_settings = {
|
||||
'job_timeout': self.get_instance_timeout(self.instance),
|
||||
'suppress_ansible_output': True,
|
||||
'suppress_output_file': getattr(settings, 'AWX_RUNNER_SUPPRESS_OUTPUT_FILE', True),
|
||||
}
|
||||
|
||||
idle_timeout = getattr(settings, 'DEFAULT_JOB_IDLE_TIMEOUT', 0)
|
||||
if idle_timeout > 0:
|
||||
runner_settings['idle_timeout'] = idle_timeout
|
||||
|
||||
# Write out our own settings file
|
||||
self.write_private_data_file(private_data_dir, 'settings', json.dumps(runner_settings), sub_dir='env')
|
||||
|
||||
self.instance.log_lifecycle("running_playbook")
|
||||
if isinstance(self.instance, SystemJob):
|
||||
res = ansible_runner.interface.run(
|
||||
@@ -567,20 +544,19 @@ class BaseTask(object):
|
||||
rc = res.rc
|
||||
|
||||
if status in ('timeout', 'error'):
|
||||
job_explanation = f"Job terminated due to {status}"
|
||||
self.instance.job_explanation = self.instance.job_explanation or job_explanation
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation=f"Job terminated due to {status}")
|
||||
if status == 'timeout':
|
||||
status = 'failed'
|
||||
|
||||
extra_update_fields['job_explanation'] = self.instance.job_explanation
|
||||
# ensure failure notification sends even if playbook_on_stats event is not triggered
|
||||
handle_success_and_failure_notifications.apply_async([self.instance.id])
|
||||
|
||||
elif status == 'canceled':
|
||||
self.instance = self.update_model(pk)
|
||||
if (getattr(self.instance, 'cancel_flag', False) is False) and signal_callback():
|
||||
self.runner_callback.delay_update(job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||
status = 'failed'
|
||||
except ReceptorNodeNotFound as exc:
|
||||
extra_update_fields['job_explanation'] = str(exc)
|
||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||
except Exception:
|
||||
# this could catch programming or file system errors
|
||||
extra_update_fields['result_traceback'] = traceback.format_exc()
|
||||
self.runner_callback.delay_update(result_traceback=traceback.format_exc())
|
||||
logger.exception('%s Exception occurred while running task', self.instance.log_format)
|
||||
finally:
|
||||
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.runner_callback.event_ct)
|
||||
@@ -590,14 +566,19 @@ class BaseTask(object):
|
||||
except PostRunError as exc:
|
||||
if status == 'successful':
|
||||
status = exc.status
|
||||
extra_update_fields['job_explanation'] = exc.args[0]
|
||||
self.runner_callback.delay_update(job_explanation=exc.args[0])
|
||||
if exc.tb:
|
||||
extra_update_fields['result_traceback'] = exc.tb
|
||||
self.runner_callback.delay_update(result_traceback=exc.tb)
|
||||
except Exception:
|
||||
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
|
||||
|
||||
self.instance = self.update_model(pk)
|
||||
self.instance = self.update_model(pk, status=status, emitted_events=self.runner_callback.event_ct, **extra_update_fields)
|
||||
self.instance = self.update_model(pk, status=status, select_for_update=True, **self.runner_callback.get_delayed_update_fields())
|
||||
|
||||
# Field host_status_counts is used as a metric to check if event processing is finished
|
||||
# we send notifications if it is, if not, callback receiver will send them
|
||||
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
|
||||
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
|
||||
|
||||
try:
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
|
||||
@@ -1054,7 +1035,7 @@ class RunProjectUpdate(BaseTask):
|
||||
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
|
||||
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
|
||||
if settings.GALAXY_IGNORE_CERTS:
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = True
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = str(True)
|
||||
|
||||
# build out env vars for Galaxy credentials (in order)
|
||||
galaxy_server_list = []
|
||||
@@ -1161,6 +1142,7 @@ class RunProjectUpdate(BaseTask):
|
||||
'scm_track_submodules': project_update.scm_track_submodules,
|
||||
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
|
||||
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
|
||||
'galaxy_task_env': settings.GALAXY_TASK_ENV,
|
||||
}
|
||||
)
|
||||
# apply custom refspec from user for PR refs and the like
|
||||
@@ -1191,64 +1173,6 @@ class RunProjectUpdate(BaseTask):
|
||||
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
|
||||
return d
|
||||
|
||||
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
|
||||
scm_revision = project_update.project.scm_revision
|
||||
inv_update_class = InventoryUpdate._get_task_class()
|
||||
for inv_src in dependent_inventory_sources:
|
||||
if not inv_src.update_on_project_update:
|
||||
continue
|
||||
if inv_src.scm_last_revision == scm_revision:
|
||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
|
||||
continue
|
||||
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
|
||||
with transaction.atomic():
|
||||
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
|
||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
|
||||
continue
|
||||
|
||||
if settings.IS_K8S:
|
||||
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
|
||||
else:
|
||||
instance_group = project_update.instance_group
|
||||
|
||||
local_inv_update = inv_src.create_inventory_update(
|
||||
_eager_fields=dict(
|
||||
launch_type='scm',
|
||||
status='running',
|
||||
instance_group=instance_group,
|
||||
execution_node=project_update.execution_node,
|
||||
controller_node=project_update.execution_node,
|
||||
source_project_update=project_update,
|
||||
celery_task_id=project_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
local_inv_update.log_lifecycle("controller_node_chosen")
|
||||
local_inv_update.log_lifecycle("execution_node_chosen")
|
||||
try:
|
||||
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
except Exception:
|
||||
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
|
||||
|
||||
try:
|
||||
project_update.refresh_from_db()
|
||||
except ProjectUpdate.DoesNotExist:
|
||||
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
|
||||
break
|
||||
try:
|
||||
local_inv_update.refresh_from_db()
|
||||
except InventoryUpdate.DoesNotExist:
|
||||
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
|
||||
continue
|
||||
if project_update.cancel_flag:
|
||||
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
|
||||
break
|
||||
if local_inv_update.cancel_flag:
|
||||
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
|
||||
if local_inv_update.status == 'successful':
|
||||
inv_src.scm_last_revision = scm_revision
|
||||
inv_src.save(update_fields=['scm_last_revision'])
|
||||
|
||||
def release_lock(self, instance):
|
||||
try:
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
||||
@@ -1458,12 +1382,6 @@ class RunProjectUpdate(BaseTask):
|
||||
p.inventory_files = p.inventories
|
||||
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
|
||||
|
||||
# Update any inventories that depend on this project
|
||||
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
|
||||
if len(dependent_inventory_sources) > 0:
|
||||
if status == 'successful' and instance.launch_type != 'sync':
|
||||
self._update_dependent_inventories(instance, dependent_inventory_sources)
|
||||
|
||||
def build_execution_environment_params(self, instance, private_data_dir):
|
||||
if settings.IS_K8S:
|
||||
return {}
|
||||
@@ -1474,8 +1392,8 @@ class RunProjectUpdate(BaseTask):
|
||||
params.setdefault('container_volume_mounts', [])
|
||||
params['container_volume_mounts'].extend(
|
||||
[
|
||||
f"{project_path}:{project_path}:Z",
|
||||
f"{cache_path}:{cache_path}:Z",
|
||||
f"{project_path}:{project_path}:z",
|
||||
f"{cache_path}:{cache_path}:z",
|
||||
]
|
||||
)
|
||||
return params
|
||||
@@ -1568,13 +1486,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
return env
|
||||
|
||||
def write_args_file(self, private_data_dir, args):
|
||||
path = os.path.join(private_data_dir, 'args')
|
||||
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
|
||||
f = os.fdopen(handle, 'w')
|
||||
f.write(' '.join(args))
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
return self.write_private_data_file(private_data_dir, 'args', ' '.join(args))
|
||||
|
||||
def build_args(self, inventory_update, private_data_dir, passwords):
|
||||
"""Build the command line argument list for running an inventory
|
||||
@@ -1630,11 +1542,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
if injector is not None:
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
|
||||
self.write_private_data_file(private_data_dir, injector.filename, content, sub_dir='inventory', file_permissions=0o700)
|
||||
rel_path = os.path.join('inventory', injector.filename)
|
||||
elif src == 'scm':
|
||||
rel_path = os.path.join('project', inventory_update.source_path)
|
||||
@@ -1653,9 +1561,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
source_project = None
|
||||
if inventory_update.inventory_source:
|
||||
source_project = inventory_update.inventory_source.source_project
|
||||
if (
|
||||
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
|
||||
): # never ever update manual projects
|
||||
if inventory_update.source == 'scm' and source_project and source_project.scm_type: # never ever update manual projects
|
||||
|
||||
# Check if the content cache exists, so that we do not unnecessarily re-download roles
|
||||
sync_needs = ['update_{}'.format(source_project.scm_type)]
|
||||
@@ -1688,8 +1594,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
|
||||
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
|
||||
except Exception:
|
||||
inventory_update = self.update_model(
|
||||
inventory_update.pk,
|
||||
@@ -1700,9 +1604,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
),
|
||||
)
|
||||
raise
|
||||
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
|
||||
# This follows update, not sync, so make copy here
|
||||
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
|
||||
|
||||
def post_run_hook(self, inventory_update, status):
|
||||
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
|
||||
@@ -1961,13 +1862,7 @@ class RunSystemJob(BaseTask):
|
||||
return args
|
||||
|
||||
def write_args_file(self, private_data_dir, args):
|
||||
path = os.path.join(private_data_dir, 'args')
|
||||
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
|
||||
f = os.fdopen(handle, 'w')
|
||||
f.write(' '.join(args))
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
return self.write_private_data_file(private_data_dir, 'args', ' '.join(args))
|
||||
|
||||
def build_env(self, instance, private_data_dir, private_data_files=None):
|
||||
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
|
||||
|
||||
@@ -26,7 +26,6 @@ from awx.main.utils.common import (
|
||||
)
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
@@ -348,6 +347,11 @@ class AWXReceptorJob:
|
||||
resultsock.shutdown(socket.SHUT_RDWR)
|
||||
resultfile.close()
|
||||
elif res.status == 'error':
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if 'result_traceback' in self.task.runner_callback.extra_update_fields:
|
||||
return res
|
||||
|
||||
try:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
@@ -363,26 +367,19 @@ class AWXReceptorJob:
|
||||
logger.warning(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||
self.task.update_model(self.task.instance.pk, status='pending')
|
||||
return
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if state_name == 'Succeeded':
|
||||
return res
|
||||
|
||||
if not self.task.instance.result_traceback:
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.instance.result_traceback = receptor_output
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
elif detail:
|
||||
self.task.instance.result_traceback = detail
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
elif detail:
|
||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
63
awx/main/tasks/signals.py
Normal file
63
awx/main/tasks/signals.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import signal
|
||||
import functools
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.signals')
|
||||
|
||||
|
||||
__all__ = ['with_signal_handling', 'signal_callback']
|
||||
|
||||
|
||||
class SignalState:
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.is_active = False
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def set_flag(self, *args):
|
||||
"""Method to pass into the python signal.signal method to receive signals"""
|
||||
self.sigterm_flag = True
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_flag)
|
||||
signal.signal(signal.SIGINT, self.set_flag)
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
self.reset()
|
||||
|
||||
|
||||
signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return signal_state.sigterm_flag
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
"""
|
||||
Change signal handling to make signal_callback return True in event of SIGTERM or SIGINT.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
def _wrapped(*args, **kwargs):
|
||||
try:
|
||||
this_is_outermost_caller = False
|
||||
if not signal_state.is_active:
|
||||
signal_state.connect_signals()
|
||||
this_is_outermost_caller = True
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
if this_is_outermost_caller:
|
||||
signal_state.restore_signals()
|
||||
|
||||
return _wrapped
|
||||
@@ -103,7 +103,8 @@ def dispatch_startup():
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
Metrics().clear_values()
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
@@ -113,10 +114,6 @@ def inform_cluster_of_shutdown():
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
try:
|
||||
reaper.reap(this_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
@@ -695,7 +692,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status in ('successful', 'failed'):
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
@@ -716,25 +713,6 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_success_and_failure_notifications(job_id):
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
retries = 0
|
||||
while retries < settings.AWX_NOTIFICATION_JOB_FINISH_MAX_RETRY:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
return
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warning(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
- ansible.builtin.import_playbook: foo
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
- ansible.builtin.include: foo
|
||||
@@ -2,8 +2,9 @@
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
"GCP_ENV_TYPE": "tower",
|
||||
"GCP_PROJECT": "fooo",
|
||||
"GCP_SERVICE_ACCOUNT_FILE": "{{ file_reference }}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ def test_empty():
|
||||
"workflow_job_template": 0,
|
||||
"unified_job": 0,
|
||||
"pending_jobs": 0,
|
||||
"database_connections": 1,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ EXPECTED_VALUES = {
|
||||
'awx_license_instance_total': 0,
|
||||
'awx_license_instance_free': 0,
|
||||
'awx_pending_jobs_total': 0,
|
||||
'awx_database_connections_total': 1,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -532,6 +532,49 @@ def test_vault_password_required(post, organization, admin):
|
||||
assert 'required fields (vault_password)' in j.job_explanation
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_vault_id_immutable(post, patch, organization, admin):
|
||||
vault = CredentialType.defaults['vault']()
|
||||
vault.save()
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
{
|
||||
'credential_type': vault.pk,
|
||||
'organization': organization.id,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {'vault_id': 'password', 'vault_password': 'password'},
|
||||
},
|
||||
admin,
|
||||
)
|
||||
assert response.status_code == 201
|
||||
assert Credential.objects.count() == 1
|
||||
response = patch(
|
||||
reverse('api:credential_detail', kwargs={'pk': response.data['id']}), {'inputs': {'vault_id': 'password2', 'vault_password': 'password'}}, admin
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert response.data['inputs'][0] == 'Vault IDs cannot be changed once they have been created.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_patch_without_vault_id_valid(post, patch, organization, admin):
|
||||
vault = CredentialType.defaults['vault']()
|
||||
vault.save()
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
{
|
||||
'credential_type': vault.pk,
|
||||
'organization': organization.id,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {'vault_id': 'password', 'vault_password': 'password'},
|
||||
},
|
||||
admin,
|
||||
)
|
||||
assert response.status_code == 201
|
||||
assert Credential.objects.count() == 1
|
||||
response = patch(reverse('api:credential_detail', kwargs={'pk': response.data['id']}), {'name': 'worst_credential_ever'}, admin)
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
#
|
||||
# Net Credentials
|
||||
#
|
||||
|
||||
@@ -9,9 +9,7 @@ from awx.api.versioning import reverse
|
||||
@pytest.fixture
|
||||
def ec2_source(inventory, project):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
return inventory.inventory_sources.create(
|
||||
name='some_source', update_on_project_update=True, source='ec2', source_project=project, scm_last_revision=project.scm_revision
|
||||
)
|
||||
return inventory.inventory_sources.create(name='some_source', source='ec2', source_project=project)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -46,3 +46,88 @@ def test_ad_hoc_events_sublist_truncation(get, organization_factory, job_templat
|
||||
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert (len(response.data['results'][0]['stdout']) == 1025) == expected
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_job_events_children_summary(get, organization_factory, job_template_factory):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["event_processing_finished"] == False
|
||||
'''
|
||||
E1
|
||||
E2
|
||||
E3
|
||||
E4 (verbose)
|
||||
E5
|
||||
'''
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid1', parent_uuid='', event="playbook_on_start", counter=1, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event="playbook_on_play_start", counter=2, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="playbook_on_task_start", counter=3, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_play_start", counter=5, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
job.emitted_events = job.get_event_queryset().count()
|
||||
job.status = "successful"
|
||||
job.save()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["children_summary"] == {1: {"rowNumber": 0, "numChildren": 4}, 2: {"rowNumber": 1, "numChildren": 2}}
|
||||
assert response.data["meta_event_nested_uuid"] == {4: "uuid2"}
|
||||
assert response.data["event_processing_finished"] == True
|
||||
assert response.data["is_tree"] == True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_job_events_children_summary_is_tree(get, organization_factory, job_template_factory):
|
||||
'''
|
||||
children_summary should return {is_tree: False} if the event structure is not tree-like
|
||||
'''
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["event_processing_finished"] == False
|
||||
'''
|
||||
E1
|
||||
E2
|
||||
E3
|
||||
E4 (verbose)
|
||||
E5
|
||||
E6 <-- parent is E2, but comes after another "branch" E5
|
||||
'''
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid1', parent_uuid='', event="playbook_on_start", counter=1, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event="playbook_on_play_start", counter=2, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="playbook_on_task_start", counter=3, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_play_start", counter=5, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid6', parent_uuid='uuid2', event="playbook_on_task_start", counter=6, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
job.emitted_events = job.get_event_queryset().count()
|
||||
job.status = "successful"
|
||||
job.save()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["children_summary"] == {}
|
||||
assert response.data["meta_event_nested_uuid"] == {}
|
||||
assert response.data["event_processing_finished"] == True
|
||||
assert response.data["is_tree"] == False
|
||||
|
||||
@@ -301,3 +301,17 @@ def test_instance_group_unattach_from_instance(post, instance_group, node_type_i
|
||||
assert new_activity.instance_group.first() == instance_group
|
||||
else:
|
||||
assert not new_activity
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance_group, node_type_instance, admin_user):
|
||||
instance = node_type_instance(hostname='hybrid_node', node_type='hybrid')
|
||||
controlplane_instance_group.instances.add(instance)
|
||||
|
||||
url = reverse('api:instance_group_instance_list', kwargs={'pk': controlplane_instance_group.pk})
|
||||
r = post(url, {'disassociate': True, 'id': instance.id}, admin_user, expect=400)
|
||||
assert 'Cannot disassociate hybrid node' in str(r.data)
|
||||
|
||||
url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk})
|
||||
r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400)
|
||||
assert f'Cannot disassociate hybrid instance' in str(r.data)
|
||||
|
||||
@@ -13,9 +13,7 @@ from awx.main.models import InventorySource, Inventory, ActivityStream
|
||||
@pytest.fixture
|
||||
def scm_inventory(inventory, project):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
inventory.inventory_sources.create(
|
||||
name='foobar', update_on_project_update=True, source='scm', source_project=project, scm_last_revision=project.scm_revision
|
||||
)
|
||||
inventory.inventory_sources.create(name='foobar', source='scm', source_project=project)
|
||||
return inventory
|
||||
|
||||
|
||||
@@ -23,9 +21,7 @@ def scm_inventory(inventory, project):
|
||||
def factory_scm_inventory(inventory, project):
|
||||
def fn(**kwargs):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
return inventory.inventory_sources.create(
|
||||
source_project=project, overwrite_vars=True, source='scm', scm_last_revision=project.scm_revision, **kwargs
|
||||
)
|
||||
return inventory.inventory_sources.create(source_project=project, overwrite_vars=True, source='scm', **kwargs)
|
||||
|
||||
return fn
|
||||
|
||||
@@ -76,6 +72,22 @@ def test_inventory_host_name_unique(scm_inventory, post, admin_user):
|
||||
assert "A Group with that name already exists." in json.dumps(resp.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_host_list_ordering(scm_inventory, get, admin_user):
|
||||
# create 3 hosts, hit the inventory host list view 3 times and get the order visible there each time and compare
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
host1 = inv_src.hosts.create(name='1', inventory=scm_inventory)
|
||||
host2 = inv_src.hosts.create(name='2', inventory=scm_inventory)
|
||||
host3 = inv_src.hosts.create(name='3', inventory=scm_inventory)
|
||||
expected_ids = [host1.id, host2.id, host3.id]
|
||||
resp = get(
|
||||
reverse('api:inventory_hosts_list', kwargs={'pk': scm_inventory.id}),
|
||||
admin_user,
|
||||
).data['results']
|
||||
host_list = [host['id'] for host in resp]
|
||||
assert host_list == expected_ids
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_group_name_unique(scm_inventory, post, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -94,6 +106,24 @@ def test_inventory_group_name_unique(scm_inventory, post, admin_user):
|
||||
assert "A Host with that name already exists." in json.dumps(resp.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_group_list_ordering(scm_inventory, get, put, admin_user):
|
||||
# create 3 groups, hit the inventory groups list view 3 times and get the order visible there each time and compare
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
group1 = inv_src.groups.create(name='1', inventory=scm_inventory)
|
||||
group2 = inv_src.groups.create(name='2', inventory=scm_inventory)
|
||||
group3 = inv_src.groups.create(name='3', inventory=scm_inventory)
|
||||
expected_ids = [group1.id, group2.id, group3.id]
|
||||
group_ids = {}
|
||||
for x in range(3):
|
||||
resp = get(
|
||||
reverse('api:inventory_groups_list', kwargs={'pk': scm_inventory.id}),
|
||||
admin_user,
|
||||
).data['results']
|
||||
group_ids[x] = [group['id'] for group in resp]
|
||||
assert group_ids[0] == group_ids[1] == group_ids[2] == expected_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize("role_field,expected_status_code", [(None, 403), ('admin_role', 200), ('update_role', 403), ('adhoc_role', 403), ('use_role', 403)])
|
||||
@pytest.mark.django_db
|
||||
def test_edit_inventory(put, inventory, alice, role_field, expected_status_code):
|
||||
@@ -510,15 +540,12 @@ class TestControlledBySCM:
|
||||
def test_safe_method_works(self, get, options, scm_inventory, admin_user):
|
||||
get(scm_inventory.get_absolute_url(), admin_user, expect=200)
|
||||
options(scm_inventory.get_absolute_url(), admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision != ''
|
||||
|
||||
def test_vars_edit_reset(self, patch, scm_inventory, admin_user):
|
||||
patch(scm_inventory.get_absolute_url(), {'variables': 'hello: world'}, admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_name_edit_allowed(self, patch, scm_inventory, admin_user):
|
||||
patch(scm_inventory.get_absolute_url(), {'variables': '---', 'name': 'newname'}, admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision != ''
|
||||
|
||||
def test_host_associations_reset(self, post, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -526,14 +553,12 @@ class TestControlledBySCM:
|
||||
g = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
post(reverse('api:host_groups_list', kwargs={'pk': h.id}), {'id': g.id}, admin_user, expect=204)
|
||||
post(reverse('api:group_hosts_list', kwargs={'pk': g.id}), {'id': h.id}, admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_group_group_associations_reset(self, post, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
g1 = inv_src.groups.create(name='barland', inventory=scm_inventory)
|
||||
g2 = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
post(reverse('api:group_children_list', kwargs={'pk': g1.id}), {'id': g2.id}, admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_host_group_delete_reset(self, delete, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -541,7 +566,6 @@ class TestControlledBySCM:
|
||||
g = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
delete(h.get_absolute_url(), admin_user, expect=204)
|
||||
delete(g.get_absolute_url(), admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_remove_scm_inv_src(self, delete, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -554,7 +578,6 @@ class TestControlledBySCM:
|
||||
{
|
||||
'name': 'new inv src',
|
||||
'source_project': project.pk,
|
||||
'update_on_project_update': False,
|
||||
'source': 'scm',
|
||||
'overwrite_vars': True,
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
@@ -563,27 +586,6 @@ class TestControlledBySCM:
|
||||
expect=201,
|
||||
)
|
||||
|
||||
def test_adding_inv_src_prohibited(self, post, scm_inventory, project, admin_user):
|
||||
post(
|
||||
reverse('api:inventory_inventory_sources_list', kwargs={'pk': scm_inventory.id}),
|
||||
{'name': 'new inv src', 'source_project': project.pk, 'update_on_project_update': True, 'source': 'scm', 'overwrite_vars': True},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_two_update_on_project_update_inv_src_prohibited(self, patch, scm_inventory, factory_scm_inventory, project, admin_user):
|
||||
scm_inventory2 = factory_scm_inventory(name="scm_inventory2")
|
||||
res = patch(
|
||||
reverse('api:inventory_source_detail', kwargs={'pk': scm_inventory2.id}),
|
||||
{
|
||||
'update_on_project_update': True,
|
||||
},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
content = json.loads(res.content)
|
||||
assert content['update_on_project_update'] == ["More than one SCM-based inventory source with update on project update " "per-inventory not allowed."]
|
||||
|
||||
def test_adding_inv_src_without_proj_access_prohibited(self, post, project, inventory, rando):
|
||||
inventory.admin_role.members.add(rando)
|
||||
post(
|
||||
|
||||
@@ -220,7 +220,7 @@ class TestControllerNode:
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
r = get(reverse('api:inventory_update_detail', kwargs={'pk': inventory_update.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
assert 'controller_node' in r.data
|
||||
|
||||
r = get(reverse('api:system_job_detail', kwargs={'pk': system_job.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
@@ -71,6 +71,17 @@ def test_organization_list_integrity(organization, get, admin, alice):
|
||||
assert field in res.data['results'][0]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_list_order_integrity(organizations, get, admin):
|
||||
# check that the order of the organization list retains integrity.
|
||||
orgs = organizations(4)
|
||||
org_ids = {}
|
||||
for x in range(3):
|
||||
res = get(reverse('api:organization_list'), user=admin).data['results']
|
||||
org_ids[x] = [org['id'] for org in res]
|
||||
assert org_ids[0] == org_ids[1] == org_ids[2] == [orgs[0].id, orgs[1].id, orgs[2].id, orgs[3].id]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_list_visibility(organizations, get, admin, alice):
|
||||
orgs = organizations(2)
|
||||
@@ -127,6 +138,18 @@ def test_organization_inventory_list(organization, inventory_factory, get, alice
|
||||
get(reverse('api:organization_inventories_list', kwargs={'pk': organization.id}), user=rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_inventory_list_order_integrity(organization, admin, inventory_factory, get):
|
||||
inv1 = inventory_factory('inventory')
|
||||
inv2 = inventory_factory('inventory2')
|
||||
inv3 = inventory_factory('inventory3')
|
||||
inv_ids = {}
|
||||
for x in range(3):
|
||||
res = get(reverse('api:organization_inventories_list', kwargs={'pk': organization.id}), user=admin).data['results']
|
||||
inv_ids[x] = [inv['id'] for inv in res]
|
||||
assert inv_ids[0] == inv_ids[1] == inv_ids[2] == [inv1.id, inv2.id, inv3.id]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_organization(post, admin, alice):
|
||||
new_org = {'name': 'new org', 'description': 'my description'}
|
||||
|
||||
@@ -111,21 +111,41 @@ def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, su
|
||||
[
|
||||
("", "This field may not be blank"),
|
||||
("DTSTART:NONSENSE", "Valid DTSTART required in rrule"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
("DTSTART:20300308T050000Z DTSTART:20310308T050000", "Multiple DTSTART is not supported"),
|
||||
("DTSTART:20300308T050000Z", "RRULE required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
|
||||
("DTSTART:20300308T050000Z", "One or more rule required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1; EXDATE:20220401", "EXDATE not allowed in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1; RDATE:20220401", "RDATE not allowed in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=SECONDLY;INTERVAL=5;COUNT=6", "SECONDLY is not supported"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Multiple BYMONTHDAYs not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=1,2", "Multiple BYMONTHs not supported"), # noqa
|
||||
# Individual rule test
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO", "BYDAY with numeric prefix not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYYEARDAY=100", "BYYEARDAY not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYWEEKNO=20", "BYWEEKNO not supported"),
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa
|
||||
# Individual rule test with multiple rules
|
||||
## Bad Rule: RRULE:NONSENSE
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU", "INTERVAL required in rrule"),
|
||||
## Bad Rule: RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO
|
||||
(
|
||||
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO",
|
||||
"BYDAY with numeric prefix not supported",
|
||||
), # noqa
|
||||
## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z
|
||||
(
|
||||
"DTSTART:20030925T104941Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z",
|
||||
"RRULE may not contain both COUNT and UNTIL",
|
||||
), # noqa
|
||||
## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000
|
||||
(
|
||||
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000",
|
||||
"COUNT > 999 is unsupported",
|
||||
), # noqa
|
||||
# Multiple errors, first condition should be returned
|
||||
("DTSTART:NONSENSE RRULE:NONSENSE RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Valid DTSTART required in rrule"),
|
||||
# Parsing Tests
|
||||
("DTSTART;TZID=US-Eastern:19961105T090000 RRULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5", "A valid TZID must be provided"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=REGULARLY;INTERVAL=1", "rrule parsing failed validation: invalid 'FREQ': REGULARLY"), # noqa
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
],
|
||||
)
|
||||
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
@@ -143,6 +163,29 @@ def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
assert error in smart_str(resp.content)
|
||||
|
||||
|
||||
def test_multiple_invalid_rrules(post, admin_user, project, inventory):
|
||||
job_template = JobTemplate.objects.create(name='test-jt', project=project, playbook='helloworld.yml', inventory=inventory)
|
||||
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
|
||||
resp = post(
|
||||
url,
|
||||
{
|
||||
'name': 'Some Schedule',
|
||||
'rrule': "EXRULE:FREQ=SECONDLY DTSTART;TZID=US-Eastern:19961105T090000 RRULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5;UNTIL=20220101 DTSTART;TZID=US-Eastern:19961105T090000",
|
||||
},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
expected_result = {
|
||||
"rrule": [
|
||||
"Multiple DTSTART is not supported.",
|
||||
"INTERVAL required in rrule: RULE:FREQ=SECONDLY",
|
||||
"RRULE may not contain both COUNT and UNTIL: RULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5;UNTIL=20220101",
|
||||
"rrule parsing failed validation: 'NoneType' object has no attribute 'group'",
|
||||
]
|
||||
}
|
||||
assert expected_result == resp.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_users_can_preview_schedules(post, alice):
|
||||
url = reverse('api:schedule_rrule')
|
||||
@@ -381,11 +424,83 @@ def test_dst_rollback_duplicates(post, admin_user):
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'rrule, expected_result',
|
||||
(
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300302T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20300304T1500 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU',
|
||||
['2030-03-02 15:00:00-05:00', '2030-03-04 15:00:00-05:00'],
|
||||
id="Every day except sundays",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=US/Eastern:20300428T170000 RRULE:INTERVAL=1;FREQ=DAILY;COUNT=4 EXRULE:INTERVAL=1;FREQ=DAILY;BYMONTH=4;BYMONTHDAY=30',
|
||||
['2030-04-28 17:00:00-04:00', '2030-04-29 17:00:00-04:00', '2030-05-01 17:00:00-04:00'],
|
||||
id="Every day except April 30th",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300313T164500 RRULE:INTERVAL=5;FREQ=MINUTELY EXRULE:FREQ=MINUTELY;INTERVAL=5;BYDAY=WE;BYHOUR=17,18',
|
||||
[
|
||||
'2030-03-13 16:45:00-04:00',
|
||||
'2030-03-13 16:50:00-04:00',
|
||||
'2030-03-13 16:55:00-04:00',
|
||||
'2030-03-13 19:00:00-04:00',
|
||||
'2030-03-13 19:05:00-04:00',
|
||||
'2030-03-13 19:10:00-04:00',
|
||||
'2030-03-13 19:15:00-04:00',
|
||||
'2030-03-13 19:20:00-04:00',
|
||||
'2030-03-13 19:25:00-04:00',
|
||||
'2030-03-13 19:30:00-04:00',
|
||||
],
|
||||
id="Every 5 minutes but not Wednesdays from 5-7pm",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300426T100100 RRULE:INTERVAL=15;FREQ=MINUTELY;BYDAY=MO,TU,WE,TH,FR;BYHOUR=10,11 EXRULE:INTERVAL=15;FREQ=MINUTELY;BYDAY=MO,TU,WE,TH,FR;BYHOUR=11;BYMINUTE=3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,34,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59',
|
||||
[
|
||||
'2030-04-26 10:01:00-04:00',
|
||||
'2030-04-26 10:16:00-04:00',
|
||||
'2030-04-26 10:31:00-04:00',
|
||||
'2030-04-26 10:46:00-04:00',
|
||||
'2030-04-26 11:01:00-04:00',
|
||||
'2030-04-29 10:01:00-04:00',
|
||||
'2030-04-29 10:16:00-04:00',
|
||||
'2030-04-29 10:31:00-04:00',
|
||||
'2030-04-29 10:46:00-04:00',
|
||||
'2030-04-29 11:01:00-04:00',
|
||||
],
|
||||
id="Every 15 minutes Monday - Friday from 10:01am to 11:02pm (inclusive)",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART:20301219T130551Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYMONTHDAY=12,13,14,15,16,17,18',
|
||||
[
|
||||
'2031-01-18 13:05:51+00:00',
|
||||
'2031-02-15 13:05:51+00:00',
|
||||
'2031-03-15 13:05:51+00:00',
|
||||
'2031-04-12 13:05:51+00:00',
|
||||
'2031-05-17 13:05:51+00:00',
|
||||
'2031-06-14 13:05:51+00:00',
|
||||
'2031-07-12 13:05:51+00:00',
|
||||
'2031-08-16 13:05:51+00:00',
|
||||
'2031-09-13 13:05:51+00:00',
|
||||
'2031-10-18 13:05:51+00:00',
|
||||
],
|
||||
id="Any Saturday whose month day is between 12 and 18",
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_complex_schedule(post, admin_user, rrule, expected_result):
|
||||
# Every day except Sunday, 2022-05-01 is a Sunday
|
||||
|
||||
url = reverse('api:schedule_rrule')
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert list(map(str, r.data['local'])) == expected_result
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_zoneinfo(get, admin_user):
|
||||
url = reverse('api:schedule_zoneinfo')
|
||||
r = get(url, admin_user, expect=200)
|
||||
assert {'name': 'America/New_York'} in r.data
|
||||
assert 'America/New_York' in r.data['zones']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
26
awx/main/tests/functional/commands/test_callback_receiver.py
Normal file
26
awx/main/tests/functional/commands/test_callback_receiver.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.dispatch.worker.callback import job_stats_wrapup
|
||||
from awx.main.models.jobs import Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_wrapup_does_not_send_notifications(mocker):
|
||||
job = Job.objects.create(status='running')
|
||||
assert job.host_status_counts is None
|
||||
mock = mocker.patch('awx.main.models.notifications.JobNotificationMixin.send_notification_templates')
|
||||
job_stats_wrapup(job.id)
|
||||
job.refresh_from_db()
|
||||
assert job.host_status_counts == {}
|
||||
mock.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_wrapup_does_send_notifications(mocker):
|
||||
job = Job.objects.create(status='successful')
|
||||
assert job.host_status_counts is None
|
||||
mock = mocker.patch('awx.main.models.notifications.JobNotificationMixin.send_notification_templates')
|
||||
job_stats_wrapup(job.id)
|
||||
job.refresh_from_db()
|
||||
assert job.host_status_counts == {}
|
||||
mock.assert_called_once_with('succeeded')
|
||||
@@ -0,0 +1,40 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.management.commands.provision_instance import Command
|
||||
from awx.main.models.ha import InstanceGroup, Instance
|
||||
from awx.main.tasks.system import apply_cluster_membership_policies
|
||||
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_traditional_registration():
|
||||
assert not Instance.objects.exists()
|
||||
assert not InstanceGroup.objects.exists()
|
||||
|
||||
Command().handle(hostname='bar_node', node_type='execution', uuid='4321')
|
||||
|
||||
inst = Instance.objects.first()
|
||||
assert inst.hostname == 'bar_node'
|
||||
assert inst.node_type == 'execution'
|
||||
assert inst.uuid == '4321'
|
||||
|
||||
assert not InstanceGroup.objects.exists()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_register_self_openshift():
|
||||
assert not Instance.objects.exists()
|
||||
assert not InstanceGroup.objects.exists()
|
||||
|
||||
with override_settings(AWX_AUTO_DEPROVISION_INSTANCES=True, CLUSTER_HOST_ID='foo_node', SYSTEM_UUID='12345'):
|
||||
Command().handle()
|
||||
inst = Instance.objects.first()
|
||||
assert inst.hostname == 'foo_node'
|
||||
assert inst.uuid == '12345'
|
||||
assert inst.node_type == 'control'
|
||||
|
||||
apply_cluster_membership_policies() # populate instance list using policy rules
|
||||
|
||||
assert list(InstanceGroup.objects.get(name='default').instances.all()) == [] # container group
|
||||
assert list(InstanceGroup.objects.get(name='controlplane').instances.all()) == [inst]
|
||||
@@ -52,10 +52,12 @@ class TestKeyRegeneration:
|
||||
settings.cache.delete('REDHAT_PASSWORD')
|
||||
|
||||
# verify that the old SECRET_KEY doesn't work
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
with pytest.raises(InvalidToken):
|
||||
settings.REDHAT_PASSWORD
|
||||
|
||||
# verify that the new SECRET_KEY *does* work
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
with override_settings(SECRET_KEY=new_key):
|
||||
assert settings.REDHAT_PASSWORD == 'sensitive'
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user