mirror of
https://github.com/ansible/awx.git
synced 2026-02-10 06:04:42 -03:30
Compare commits
368 Commits
21.0.0
...
revert-124
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ba8dd4d98 | ||
|
|
33e445f4f6 | ||
|
|
9bcb60d9e0 | ||
|
|
40109d58c7 | ||
|
|
2ef3f5f9e8 | ||
|
|
389c4a3180 | ||
|
|
cbb019ed09 | ||
|
|
bf5dfdaba7 | ||
|
|
0f7f8af9b8 | ||
|
|
0237402390 | ||
|
|
84d7fa882d | ||
|
|
cd2fae3471 | ||
|
|
8be64145f9 | ||
|
|
23d28fb4c8 | ||
|
|
aeffd6f393 | ||
|
|
ab6b4bad03 | ||
|
|
769c253ac2 | ||
|
|
8031b3d402 | ||
|
|
bd93ac7edd | ||
|
|
37ff9913d3 | ||
|
|
9cb44a7e52 | ||
|
|
6279295541 | ||
|
|
de17cff39c | ||
|
|
22ca49e673 | ||
|
|
008a4b4d30 | ||
|
|
8d4089c7f3 | ||
|
|
e296d0adad | ||
|
|
401b30b3ed | ||
|
|
20cc54694c | ||
|
|
e6ec0952fb | ||
|
|
db1dec3a98 | ||
|
|
1853d3850e | ||
|
|
1e57c84383 | ||
|
|
3cf120c6a7 | ||
|
|
fd671ecc9d | ||
|
|
a0d5f1fb03 | ||
|
|
ff882a322b | ||
|
|
b70231f7d0 | ||
|
|
93d1aa0a9d | ||
|
|
c586f8bbc6 | ||
|
|
26912a06d1 | ||
|
|
218a3d333b | ||
|
|
d2013bd416 | ||
|
|
6a3f9690b0 | ||
|
|
d59b6f834c | ||
|
|
cbea36745e | ||
|
|
ae7be525e1 | ||
|
|
5062ce1e61 | ||
|
|
566665ee8c | ||
|
|
96423af160 | ||
|
|
a01bef8d2c | ||
|
|
0522233892 | ||
|
|
63ea6bb5b3 | ||
|
|
c2715d7c29 | ||
|
|
783b744bdb | ||
|
|
f7982a0d64 | ||
|
|
2147ac226e | ||
|
|
6cc22786bc | ||
|
|
861a9f581e | ||
|
|
e57a8183ba | ||
|
|
8a7163ffad | ||
|
|
439b351c95 | ||
|
|
14afab918e | ||
|
|
ef8d4e73ae | ||
|
|
61f483ae32 | ||
|
|
21bed7473d | ||
|
|
31d8ddcf84 | ||
|
|
9419270897 | ||
|
|
f755d93a58 | ||
|
|
05df2ebad2 | ||
|
|
b44442c460 | ||
|
|
989b389ba4 | ||
|
|
5bd4aade0e | ||
|
|
470910b612 | ||
|
|
dbb81551c8 | ||
|
|
f7c5cb2979 | ||
|
|
babd6f0975 | ||
|
|
7bcceb7e98 | ||
|
|
c92619a2dc | ||
|
|
923cc671db | ||
|
|
db105c21e4 | ||
|
|
372aa36207 | ||
|
|
173318764b | ||
|
|
1dd535a859 | ||
|
|
e7d37b26f3 | ||
|
|
f4ef7d6927 | ||
|
|
7cbe112e4e | ||
|
|
c441db2aab | ||
|
|
fb292d9706 | ||
|
|
35a5f93182 | ||
|
|
116dc0c480 | ||
|
|
b87ba1c53d | ||
|
|
59691b71bb | ||
|
|
cc0bb3e401 | ||
|
|
7ef90bd9f4 | ||
|
|
f820c49b82 | ||
|
|
ac62d86f2a | ||
|
|
b9e67e7972 | ||
|
|
48a2ebd48c | ||
|
|
ee13ddd87d | ||
|
|
3fcf7429a3 | ||
|
|
51a8790d56 | ||
|
|
c231e4d05e | ||
|
|
987e5a084d | ||
|
|
70ac7b2920 | ||
|
|
bda335cb19 | ||
|
|
30c060cb27 | ||
|
|
9b0a2b0b76 | ||
|
|
2f82b75748 | ||
|
|
84fcd2ff00 | ||
|
|
3bc0c53e37 | ||
|
|
bc2dbcfce8 | ||
|
|
876edf54a3 | ||
|
|
b31bf8fab1 | ||
|
|
e8b2998578 | ||
|
|
8a92a01652 | ||
|
|
705f86f8cf | ||
|
|
9ab6a6d57e | ||
|
|
791eb4c1e1 | ||
|
|
870ca29388 | ||
|
|
816518cfab | ||
|
|
9e981583a6 | ||
|
|
d6fb8d6cd7 | ||
|
|
7dbf5f7138 | ||
|
|
aaec9487e6 | ||
|
|
96fa881df1 | ||
|
|
b7057fdc3e | ||
|
|
2679c99cad | ||
|
|
ea3a8d4912 | ||
|
|
63d9cd7b57 | ||
|
|
b692bbaa12 | ||
|
|
186af73e5d | ||
|
|
fddf292d47 | ||
|
|
1180634ba7 | ||
|
|
9abdafe101 | ||
|
|
48ebcd5918 | ||
|
|
fe6d0ce9cc | ||
|
|
62dabcae63 | ||
|
|
0b63af8d4d | ||
|
|
b05ebe9623 | ||
|
|
c836fafb61 | ||
|
|
96330f608d | ||
|
|
23aaf5b3ad | ||
|
|
a3e86dcd73 | ||
|
|
81b8028ea2 | ||
|
|
a4bfb032ff | ||
|
|
2704b202bf | ||
|
|
550d9d5e42 | ||
|
|
ab2d05a07d | ||
|
|
4543f6935f | ||
|
|
78d3d6dc94 | ||
|
|
02e7424f51 | ||
|
|
2d6ca4cbb1 | ||
|
|
e244644a1d | ||
|
|
d216457c09 | ||
|
|
20a1da61c0 | ||
|
|
bf7ab1ede7 | ||
|
|
3b6b449545 | ||
|
|
781cf531e6 | ||
|
|
9b7475247c | ||
|
|
44dc7f8d1d | ||
|
|
60eaf9e235 | ||
|
|
f5102ed24d | ||
|
|
309178e4e2 | ||
|
|
76ffdbb993 | ||
|
|
d8037618c8 | ||
|
|
e94e15977c | ||
|
|
f37951249f | ||
|
|
9191079dda | ||
|
|
fdd560747d | ||
|
|
faa5df19ca | ||
|
|
5f9326b131 | ||
|
|
8e389d40b4 | ||
|
|
e62c77e783 | ||
|
|
48b3a43ec2 | ||
|
|
5f783fd5ee | ||
|
|
e112cf93c2 | ||
|
|
d9f26a411e | ||
|
|
ea84e7a491 | ||
|
|
7fab619fed | ||
|
|
699a35b88a | ||
|
|
8095adb945 | ||
|
|
8d36712860 | ||
|
|
0db34d0498 | ||
|
|
7ab254e5e3 | ||
|
|
dd7ab459e2 | ||
|
|
33df2e8aa4 | ||
|
|
39b8fd433b | ||
|
|
c31d74100d | ||
|
|
3af89c1e2b | ||
|
|
1d35bba8c3 | ||
|
|
c3c3e24875 | ||
|
|
ab9c97b158 | ||
|
|
5e700c992d | ||
|
|
b548ad21a9 | ||
|
|
127016d36b | ||
|
|
3d0391173b | ||
|
|
ce560bcd5f | ||
|
|
d553c37d7d | ||
|
|
8a5e89e24b | ||
|
|
8c3e289170 | ||
|
|
9364c8e562 | ||
|
|
5831949ebf | ||
|
|
7fe98a670f | ||
|
|
6f68f3cba6 | ||
|
|
4dc956c76f | ||
|
|
11a56117eb | ||
|
|
10eed6286a | ||
|
|
d36befd9ce | ||
|
|
0c4ddc7f6f | ||
|
|
3ef9679de3 | ||
|
|
d36441489a | ||
|
|
d26c12dd7c | ||
|
|
7fa7ed3658 | ||
|
|
2c68e7a3d2 | ||
|
|
0c9b1c3c79 | ||
|
|
e10b0e513e | ||
|
|
68c66edada | ||
|
|
6eb17e7af7 | ||
|
|
9a24da3098 | ||
|
|
8ed0543b8b | ||
|
|
73a84444d1 | ||
|
|
451767c179 | ||
|
|
8366386126 | ||
|
|
997686a2ea | ||
|
|
f02212b1fe | ||
|
|
2ba68ef5d0 | ||
|
|
2041665880 | ||
|
|
1e6ca01686 | ||
|
|
e15a76e7aa | ||
|
|
64db44acef | ||
|
|
9972389a8d | ||
|
|
e0b1274eee | ||
|
|
973facebba | ||
|
|
df649e2c56 | ||
|
|
a778017efb | ||
|
|
6a9305818e | ||
|
|
2669904c72 | ||
|
|
35529b5eeb | ||
|
|
d55ed8713c | ||
|
|
7973f28bed | ||
|
|
8189964cce | ||
|
|
ee4c901dc7 | ||
|
|
78220cad82 | ||
|
|
40279bc6c0 | ||
|
|
f6fb46d99e | ||
|
|
954b32941e | ||
|
|
48b016802c | ||
|
|
35aa5dd79f | ||
|
|
237402068c | ||
|
|
31dda6e9d6 | ||
|
|
bca6e00e37 | ||
|
|
1c9b4af61d | ||
|
|
eba4a3f1c2 | ||
|
|
0ae9fe3624 | ||
|
|
1b662fcca5 | ||
|
|
cfdba959dd | ||
|
|
78660ad0a2 | ||
|
|
70697869d7 | ||
|
|
10e55108ef | ||
|
|
d4223b8877 | ||
|
|
9537d148d7 | ||
|
|
a133a14b70 | ||
|
|
4ca9e9577b | ||
|
|
44986fad36 | ||
|
|
eb2fca86b6 | ||
|
|
458a1fc035 | ||
|
|
6e87b29e92 | ||
|
|
be1d0c525c | ||
|
|
0787cb4fc2 | ||
|
|
19063a2d90 | ||
|
|
e8e2f820d2 | ||
|
|
aaad634483 | ||
|
|
dfa4127bae | ||
|
|
f3725c714a | ||
|
|
cef3ed01ac | ||
|
|
fc1a3f46f9 | ||
|
|
bfa5feb51b | ||
|
|
4c0813bd69 | ||
|
|
9b0b0f2a5f | ||
|
|
e87c121f8f | ||
|
|
65dfc424bc | ||
|
|
dfea9cc526 | ||
|
|
0d97a0364a | ||
|
|
1da57a4a12 | ||
|
|
b73078e9db | ||
|
|
b17f22cd38 | ||
|
|
7b225057ce | ||
|
|
8242078c06 | ||
|
|
a86740c3c9 | ||
|
|
cbde56549d | ||
|
|
385a94866c | ||
|
|
21972c91dd | ||
|
|
36d3f9afdb | ||
|
|
df2d303ab0 | ||
|
|
05eba350b7 | ||
|
|
1e12e12578 | ||
|
|
bbdab82433 | ||
|
|
f7be6b6423 | ||
|
|
ba358eaa4f | ||
|
|
162e09972f | ||
|
|
2cfccdbe16 | ||
|
|
434fa7b7be | ||
|
|
2f8bdf1eab | ||
|
|
e1705738a1 | ||
|
|
4cfb8fe482 | ||
|
|
d52d2af4b4 | ||
|
|
97fd3832d4 | ||
|
|
3cedd0e0bd | ||
|
|
507b1898ce | ||
|
|
e3fe9010b7 | ||
|
|
2c350b8b90 | ||
|
|
d74e258079 | ||
|
|
b03cabd314 | ||
|
|
6a63af83c0 | ||
|
|
452744b67e | ||
|
|
703a68d4fe | ||
|
|
557893e4b0 | ||
|
|
d7051fb6ce | ||
|
|
867c50da19 | ||
|
|
e8d76ec272 | ||
|
|
c102c61532 | ||
|
|
adb2b0da89 | ||
|
|
3610008699 | ||
|
|
3b44838dde | ||
|
|
0205d7deab | ||
|
|
dd47829bdb | ||
|
|
e7e72d13a9 | ||
|
|
4bbdf1ec8a | ||
|
|
4596df449e | ||
|
|
ecbb636ba1 | ||
|
|
e3aed9dad4 | ||
|
|
213983a322 | ||
|
|
2977084787 | ||
|
|
b6362a63cc | ||
|
|
7517ba820b | ||
|
|
29d60844a8 | ||
|
|
41b0607d7e | ||
|
|
13f7166a30 | ||
|
|
0cc9b84ead | ||
|
|
68ee4311bf | ||
|
|
6e6c3f676e | ||
|
|
c67f50831b | ||
|
|
50ef234bd6 | ||
|
|
2bef5ce09b | ||
|
|
a49c4796f4 | ||
|
|
9eab9586e5 | ||
|
|
cd35787a86 | ||
|
|
cbe84ff4f3 | ||
|
|
410f38eccf | ||
|
|
b885fc2d86 | ||
|
|
4c93f5794a | ||
|
|
456bb75dcb | ||
|
|
02fd8b0d20 | ||
|
|
fbe6c80f86 | ||
|
|
3d5f302d10 | ||
|
|
856a2c1734 | ||
|
|
4277b73438 | ||
|
|
2888f9f8d0 | ||
|
|
68221cdcbe | ||
|
|
f50501cc2a | ||
|
|
c84fac65e0 | ||
|
|
d64c457b3d | ||
|
|
1bd5a880dc | ||
|
|
47d5a89f40 | ||
|
|
6060e7e29f | ||
|
|
b562d5cc88 | ||
|
|
dfde30798e |
17
.github/BOTMETA.yml
vendored
17
.github/BOTMETA.yml
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
files:
|
||||
awx/ui/:
|
||||
labels: component:ui
|
||||
maintainers: $team_ui
|
||||
awx/api/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
awx/main/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
installer/:
|
||||
labels: component:installer
|
||||
|
||||
macros:
|
||||
team_api: wwitzel3 matburt chrismeyersfsu cchurch AlanCoding ryanpetrello rooftopcellist
|
||||
team_ui: jlmitch5 jaredevantabor mabashian marshmalien benthomasson jakemcdermott
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
workflows/e2e_test.yml @tiagodread @shanemcd @jakemcdermott
|
||||
26
.github/ISSUE_TEMPLATE.md
vendored
26
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,17 +6,37 @@ practices regarding responsible disclosure, see
|
||||
https://www.ansible.com/security
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!--
|
||||
|
||||
PLEASE DO NOT USE A BLANK TEMPLATE IN THE AWX REPO.
|
||||
This is a legacy template used for internal testing ONLY.
|
||||
|
||||
Any issues opened will this template will be automatically closed.
|
||||
|
||||
Instead use the bug or feature request.
|
||||
|
||||
-->
|
||||
|
||||
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Bug Report
|
||||
- Feature Idea
|
||||
- Documentation
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug or Docs Fix
|
||||
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- Docs
|
||||
- CLI
|
||||
- Other
|
||||
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,13 +1,12 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
description: "🐞 Create a report to help us improve"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues are for **concrete, actionable bugs and feature requests** only. For debugging help or technical support, please use:
|
||||
- The #ansible-awx channel on irc.libera.chat
|
||||
- The awx project mailing list, https://groups.google.com/forum/#!forum/awx-project
|
||||
Bug Report issues are for **concrete, actionable bugs** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
@@ -24,7 +23,7 @@ body:
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Summary
|
||||
label: Bug Summary
|
||||
description: Briefly describe the problem.
|
||||
validations:
|
||||
required: false
|
||||
@@ -45,6 +44,9 @@ body:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
- type: dropdown
|
||||
id: awx-install-method
|
||||
@@ -57,9 +59,8 @@ body:
|
||||
- minikube
|
||||
- openshift
|
||||
- minishift
|
||||
- docker on linux
|
||||
- docker for mac
|
||||
- boot2docker
|
||||
- docker development environment
|
||||
- N/A
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: For debugging help or technical support
|
||||
url: https://github.com/ansible/awx#get-involved
|
||||
about: For general debugging or technical support please see the Get Involved section of our readme.
|
||||
- name: 📝 Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: AWX uses the Ansible Code of Conduct; ❤ Be nice to other members of the community. ☮ Behave.
|
||||
- name: 💼 For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
name: "✨ Feature request"
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||
|
||||
- http://web.libera.chat/?channels=#ansible-awx
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||
|
||||
##### ISSUE TYPE
|
||||
- Feature Idea
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem or desired enhancement. -->
|
||||
42
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
42
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: ✨ Feature request
|
||||
description: Suggest an idea for this project
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Feature Request issues are for **feature requests** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
options:
|
||||
- label: I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
required: true
|
||||
- label: I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Feature Summary
|
||||
description: Briefly describe the desired enhancement.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: components
|
||||
attributes:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: "\U0001F525 Security bug report"
|
||||
about: How to report security vulnerabilities
|
||||
|
||||
---
|
||||
|
||||
For all security related bugs, email security@ansible.com instead of using this issue tracker and you will receive a prompt response.
|
||||
|
||||
For more information on the Ansible community's practices regarding responsible disclosure, see https://www.ansible.com/security
|
||||
9
.github/LABEL_MAP.md
vendored
9
.github/LABEL_MAP.md
vendored
@@ -1,9 +0,0 @@
|
||||
Bug Report: type:bug
|
||||
Bugfix Pull Request: type:bug
|
||||
Feature Request: type:enhancement
|
||||
Feature Pull Request: type:enhancement
|
||||
UI: component:ui
|
||||
API: component:api
|
||||
Installer: component:installer
|
||||
Docs Pull Request: component:docs
|
||||
Documentation: component:docs
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,11 +1,3 @@
|
||||
<!--- changelog-entry
|
||||
# Fill in 'msg' below to have an entry automatically added to the next release changelog.
|
||||
# Leaving 'msg' blank will not generate a changelog entry for this PR.
|
||||
# Please ensure this is a simple (and readable) one-line string.
|
||||
---
|
||||
msg: ""
|
||||
-->
|
||||
|
||||
##### SUMMARY
|
||||
<!--- Describe the change, including rationale and design decisions -->
|
||||
|
||||
@@ -17,15 +9,18 @@ the change does.
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Feature Pull Request
|
||||
- Bugfix Pull Request
|
||||
- Docs Pull Request
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug or Docs Fix
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- CLI
|
||||
- Docs
|
||||
- Other
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
|
||||
19
.github/dependabot.yml
vendored
Normal file
19
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/awx/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 5
|
||||
allow:
|
||||
- dependency-type: "production"
|
||||
reviewers:
|
||||
- "AlexSCorey"
|
||||
- "keithjgrant"
|
||||
- "kialam"
|
||||
- "mabashian"
|
||||
- "marshmalien"
|
||||
labels:
|
||||
- "component:ui"
|
||||
- "dependencies"
|
||||
target-branch: "devel"
|
||||
8
.github/issue_labeler.yml
vendored
8
.github/issue_labeler.yml
vendored
@@ -1,12 +1,16 @@
|
||||
needs_triage:
|
||||
- '.*'
|
||||
"type:bug":
|
||||
- "Please confirm the following"
|
||||
- "Bug Summary"
|
||||
"type:enhancement":
|
||||
- "Feature Idea"
|
||||
- "Feature Summary"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
- "\\[X\\] Docs"
|
||||
"component:awx_collection":
|
||||
- "\\[X\\] Collection"
|
||||
"component:cli":
|
||||
- "\\[X\\] awxkit"
|
||||
|
||||
17
.github/pr_labeler.yml
vendored
17
.github/pr_labeler.yml
vendored
@@ -1,14 +1,19 @@
|
||||
"component:api":
|
||||
- any: ['awx/**/*', '!awx/ui/*']
|
||||
- any: ["awx/**/*", "!awx/ui/**"]
|
||||
|
||||
"component:ui":
|
||||
- any: ['awx/ui/**/*']
|
||||
- any: ["awx/ui/**/*"]
|
||||
|
||||
"component:docs":
|
||||
- any: ['docs/**/*']
|
||||
- any: ["docs/**/*"]
|
||||
|
||||
"component:cli":
|
||||
- any: ['awxkit/**/*']
|
||||
- any: ["awxkit/**/*"]
|
||||
|
||||
"component:collection":
|
||||
- any: ['awx_collection/**/*']
|
||||
"component:awx_collection":
|
||||
- any: ["awx_collection/**/*"]
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
|
||||
116
.github/triage_replies.md
vendored
116
.github/triage_replies.md
vendored
@@ -1,31 +1,111 @@
|
||||
## General
|
||||
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
|
||||
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
|
||||
- Hello, we think your question is answered in our FAQ. Does this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
- You can find the latest documentation here: https://docs.ansible.com/automation-controller/latest/html/userguide/index.html
|
||||
|
||||
## Visit our mailing list
|
||||
- Hello, your question seems like a good one to ask on our mailing list at https://groups.google.com/g/awx-project. You can also join #ansible-awx on https://libera.chat/ and ask your question there.
|
||||
|
||||
## Create an issue
|
||||
- Hello, thanks for reaching out on list. We think this merits an issue on our Github, https://github.com/ansible/awx/issues. If you could open an issue up on Github it will get tagged and integrated into our planning and workflow. All future work will be tracked there.
|
||||
|
||||
## Create a Pull Request
|
||||
- Hello, we think your idea is good, please consider contributing a PR for this, following our contributing guidelines: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
## PRs/Issues
|
||||
|
||||
## Receptor
|
||||
- You can find the receptor docs here: https://receptor.readthedocs.io/en/latest/
|
||||
- Hello, your issue seems related to receptor, could you please open an issue in the receptor repository? https://github.com/ansible/receptor. Thanks!
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
|
||||
## Ansible Engine not AWX
|
||||
- Hello, your question seems to be about Ansible development, not about AWX. Try asking on the Ansible-devel specific mailing list: https://groups.google.com/g/ansible-devel
|
||||
### Denied Submission
|
||||
|
||||
- Hi! \
|
||||
\
|
||||
Thanks very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
At this time we do not want to merge this PR. Our reasons for this are: \
|
||||
\
|
||||
(A) INSERT ITEM HERE \
|
||||
\
|
||||
Please know that we are always up for discussion but this project is very active. Because of this, we're unlikely to see comments made on closed PRs, and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it is not always necessary. \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress Issue
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for for this issue. It means a lot to us that you have taken time to contribute by opening this report. \
|
||||
\
|
||||
On this issue, there were comments added but it has been some time since then without response. At this time we are closing this issue. If you get time to address the comments we can reopen the issue if you can contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress PR
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
On this PR, changes were requested but it has been some time since then. We think this PR has merit but without the requested changes we are unable to merge it. At this time we are closing your PR. If you get time to address the changes you are welcome to open another PR or we can reopen this PR upon request if you contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
|
||||
|
||||
## Common
|
||||
|
||||
### Give us more info
|
||||
- Hello, we'd love to help, but we need a little more information about the problem you're having. Screenshots, log outputs, or any reproducers would be very helpful.
|
||||
|
||||
### Code of Conduct
|
||||
- Hello. Please keep in mind that Ansible adheres to a Code of Conduct in its community spaces. The spirit of the code of conduct is to be kind, and this is your friendly reminder to be so. Please see the full code of conduct here if you have questions: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
|
||||
### EE Contents / Community General
|
||||
- Hello. The awx-ee contains the collections and dependencies needed for supported AWX features to function. Anything beyond that (like the community.general package) will require you to build your own EE. For information on how to do that, see https://ansible-builder.readthedocs.io/en/stable/ \
|
||||
\
|
||||
The Ansible Community is looking at building an EE that corresponds to all of the collections inside the ansible package. That may help you if and when it happens; see https://github.com/ansible-community/community-topics/issues/31 for details.
|
||||
|
||||
|
||||
|
||||
## Mailing List Triage
|
||||
|
||||
### Create an issue
|
||||
- Hello, thanks for reaching out on list. We think this merits an issue on our Github, https://github.com/ansible/awx/issues. If you could open an issue up on Github it will get tagged and integrated into our planning and workflow. All future work will be tracked there. Issues should include as much information as possible, including screenshots, log outputs, or any reproducers.
|
||||
|
||||
### Create a Pull Request
|
||||
- Hello, we think your idea is good! Please consider contributing a PR for this following our contributing guidelines: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
|
||||
### Receptor
|
||||
- You can find the receptor docs here: https://receptor.readthedocs.io/en/latest/
|
||||
- Hello, your issue seems related to receptor. Could you please open an issue in the receptor repository? https://github.com/ansible/receptor. Thanks!
|
||||
|
||||
### Ansible Engine not AWX
|
||||
- Hello, your question seems to be about Ansible development, not about AWX. Try asking on the Ansible-devel specific mailing list: https://groups.google.com/g/ansible-devel
|
||||
- Hello, your question seems to be about using Ansible, not about AWX. https://groups.google.com/g/ansible-project is the best place to visit for user questions about Ansible. Thanks!
|
||||
|
||||
## Ansible Galaxy not AWX
|
||||
- Hey there, that sounds like an FAQ question, did this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
### Ansible Galaxy not AWX
|
||||
- Hey there. That sounds like an FAQ question. Did this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
|
||||
## Contributing Guidelines
|
||||
- AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
### Contributing Guidelines
|
||||
- AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
- AWX-Operator: https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md
|
||||
|
||||
## Code of Conduct
|
||||
- Hello. Please keep in mind that Ansible adheres to a Code of Conduct in its community spaces. The spirit of the code of conduct is to be kind, and this is your friendly reminder to be so. Please see the full code of conduct here if you have questions: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
### AWX Release
|
||||
Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb
|
||||
|
||||
- Hi all, \
|
||||
\
|
||||
We're happy to announce that the next release of AWX, version <b>`Xa.Ya.za`</b> is now available! \
|
||||
In addition AWX Operator version <b>`Xb.Yb.zb`</b> has also been released! \
|
||||
\
|
||||
Please see the releases pages for more details: \
|
||||
AWX: https://github.com/ansible/awx/releases/tag/Xa.Ya.za \
|
||||
Operator: https://github.com/ansible/awx-operator/releases/tag/Xb.Yb.zb \
|
||||
\
|
||||
The AWX team.
|
||||
|
||||
## Try latest version
|
||||
- Hello, this issue pertains to an older version of AWX. Try upgrading to the latest version and let us know if that resolves your issue.
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -113,7 +113,7 @@ jobs:
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
python3 -m pip install docker setuptools_scm
|
||||
|
||||
- name: Build AWX image
|
||||
working-directory: awx
|
||||
|
||||
6
.github/workflows/promote.yml
vendored
6
.github/workflows/promote.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python${{ env.py_version }} -m pip install wheel twine
|
||||
python${{ env.py_version }} -m pip install wheel twine setuptools-scm
|
||||
|
||||
- name: Set official collection namespace
|
||||
run: echo collection_namespace=awx >> $GITHUB_ENV
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
run: |
|
||||
COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz
|
||||
@@ -70,4 +70,4 @@ jobs:
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
|
||||
|
||||
|
||||
15
.github/workflows/stage.yml
vendored
15
.github/workflows/stage.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
python3 -m pip install docker setuptools_scm
|
||||
|
||||
- name: Build and stage AWX
|
||||
working-directory: awx
|
||||
@@ -100,23 +100,10 @@ jobs:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
|
||||
- name: Generate changelog
|
||||
uses: shanemcd/simple-changelog-generator@v1
|
||||
id: changelog
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
|
||||
- name: Write changelog to file
|
||||
run: |
|
||||
cat << 'EOF' > /tmp/awx-changelog
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
EOF
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/stage.yml \
|
||||
-e changelog_path=/tmp/awx-changelog \
|
||||
-e repo=${{ github.repository }} \
|
||||
-e awx_image=ghcr.io/${{ github.repository }} \
|
||||
-e version=${{ github.event.inputs.version }} \
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -38,7 +38,6 @@ awx/ui/build
|
||||
awx/ui/.env.local
|
||||
awx/ui/instrumented
|
||||
rsyslog.pid
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/ansible/awx_dump.sql
|
||||
tools/docker-compose/Dockerfile
|
||||
tools/docker-compose/_build
|
||||
@@ -154,6 +153,9 @@ use_dev_supervisor.txt
|
||||
/sanity/
|
||||
/awx_collection_build/
|
||||
|
||||
# Setup for metrics gathering
|
||||
tools/prometheus/prometheus.yml
|
||||
|
||||
.idea/*
|
||||
*.unison.tmp
|
||||
*.#
|
||||
|
||||
39
Makefile
39
Makefile
@@ -5,8 +5,8 @@ NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) setup.py --version)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) setup.py --version | cut -d . -f 1-3)
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -15,6 +15,12 @@ MAIN_NODE_TYPE ?= hybrid
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
LDAP ?= false
|
||||
# If set to true docker-compose will also start a splunk instance
|
||||
SPLUNK ?= false
|
||||
# If set to true docker-compose will also start a prometheus instance
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
@@ -43,7 +49,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build sdist \
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
@@ -198,7 +204,7 @@ uwsgi: collectstatic
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
|
||||
|
||||
awx-autoreload:
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel "$(DEV_RELOAD_COMMAND)"
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -267,7 +273,7 @@ api-lint:
|
||||
yamllint -s .
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
@@ -286,6 +292,7 @@ COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
@@ -313,7 +320,7 @@ awx_collection_build: $(shell find awx_collection -type f)
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
-e '{"awx_template_version":false}'
|
||||
-e '{"awx_template_version": $(COLLECTION_TEMPLATE_VERSION)}'
|
||||
ansible-galaxy collection build awx_collection_build --force --output-path=awx_collection_build
|
||||
|
||||
build_collection: awx_collection_build
|
||||
@@ -417,21 +424,13 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# Build a pip-installable package into dist/ with a timestamped version number.
|
||||
dev_build:
|
||||
$(PYTHON) setup.py dev_build
|
||||
|
||||
# Build a pip-installable package into dist/ with the release version number.
|
||||
release_build:
|
||||
$(PYTHON) setup.py release_build
|
||||
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
endif
|
||||
$(PYTHON) setup.py $(SDIST_COMMAND)
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
|
||||
sdist: dist/$(SDIST_TAR_FILE)
|
||||
@@ -466,7 +465,10 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP)
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA)
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
@@ -514,7 +516,7 @@ docker-clean:
|
||||
fi
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm tools_awx_db
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -525,9 +527,6 @@ docker-compose-elk: awx/projects docker-compose-sources
|
||||
docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
|
||||
|
||||
@@ -6,9 +6,40 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
__version__ = get_distribution('awx').version
|
||||
def get_version():
|
||||
version_from_file = get_version_from_file()
|
||||
if version_from_file:
|
||||
return version_from_file
|
||||
else:
|
||||
from setuptools_scm import get_version
|
||||
|
||||
version = get_version(root='..', relative_to=__file__)
|
||||
return version
|
||||
|
||||
|
||||
def get_version_from_file():
|
||||
vf = version_file()
|
||||
if vf:
|
||||
with open(vf, 'r') as file:
|
||||
return file.read().strip()
|
||||
|
||||
|
||||
def version_file():
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
version_file = os.path.join(current_dir, '..', 'VERSION')
|
||||
|
||||
if os.path.exists(version_file):
|
||||
return version_file
|
||||
|
||||
|
||||
try:
|
||||
import pkg_resources
|
||||
|
||||
__version__ = pkg_resources.get_distribution('awx').version
|
||||
except pkg_resources.DistributionNotFound:
|
||||
__version__ = get_version()
|
||||
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
@@ -21,7 +52,6 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
@@ -78,9 +108,10 @@ def oauth2_getattribute(self, attr):
|
||||
# Custom method to override
|
||||
# oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__
|
||||
from django.conf import settings
|
||||
from oauth2_provider.settings import DEFAULTS
|
||||
|
||||
val = None
|
||||
if 'migrate' not in sys.argv:
|
||||
if (isinstance(attr, str)) and (attr in DEFAULTS) and (not attr.startswith('_')):
|
||||
# certain Django OAuth Toolkit migrations actually reference
|
||||
# setting lookups for references to model classes (e.g.,
|
||||
# oauth2_settings.REFRESH_TOKEN_MODEL)
|
||||
|
||||
@@ -1607,7 +1607,6 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
|
||||
class Meta:
|
||||
@@ -1622,14 +1621,6 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except ProjectUpdateEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
@@ -2082,7 +2073,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
|
||||
class Meta:
|
||||
model = InventorySource
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project', 'update_on_project_update') + (
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project') + (
|
||||
'last_update_failed',
|
||||
'last_updated',
|
||||
) # Backwards compatibility.
|
||||
@@ -2145,11 +2136,6 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
|
||||
return value
|
||||
|
||||
def validate_update_on_project_update(self, value):
|
||||
if value and self.instance and self.instance.schedules.exists():
|
||||
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
@@ -2200,7 +2186,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'update_on_project_update']))
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
|
||||
if redundant_scm_fields:
|
||||
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
|
||||
|
||||
@@ -2245,7 +2231,6 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
'source_project_update',
|
||||
'custom_virtualenv',
|
||||
'instance_group',
|
||||
'-controller_node',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -2320,7 +2305,6 @@ class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
|
||||
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', '-controller_node') # field removal undone by UJ serializer
|
||||
|
||||
|
||||
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
|
||||
@@ -2673,6 +2657,13 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
return credential_type
|
||||
|
||||
def validate_inputs(self, inputs):
|
||||
if self.instance and self.instance.credential_type.kind == "vault":
|
||||
if 'vault_id' in inputs and inputs['vault_id'] != self.instance.inputs['vault_id']:
|
||||
raise ValidationError(_('Vault IDs cannot be changed once they have been created.'))
|
||||
|
||||
return inputs
|
||||
|
||||
|
||||
class CredentialSerializerCreate(CredentialSerializer):
|
||||
|
||||
@@ -3107,7 +3098,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
|
||||
class JobDetailSerializer(JobSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
custom_virtualenv = serializers.ReadOnlyField()
|
||||
|
||||
@@ -3123,14 +3113,6 @@ class JobDetailSerializer(JobSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except JobEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class JobCancelSerializer(BaseSerializer):
|
||||
|
||||
@@ -3319,21 +3301,10 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
|
||||
class Meta:
|
||||
model = AdHocCommand
|
||||
fields = ('*', 'host_status_counts')
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except AdHocCommandEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
|
||||
|
||||
@@ -4502,7 +4473,10 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
body = messages[event].get('body', {})
|
||||
if body:
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
rendered_body = (
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
)
|
||||
potential_body = json.loads(rendered_body)
|
||||
if not isinstance(potential_body, dict):
|
||||
error_list.append(
|
||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
@@ -4645,69 +4619,74 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
|
||||
# We reject rrules if:
|
||||
# - DTSTART is not include
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - TZID is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - BYYEARDAY
|
||||
# - BYWEEKNO
|
||||
# - Multiple DTSTART or RRULE elements
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
# - Multiple DTSTART
|
||||
# - At least one of RRULE is not included
|
||||
# - EXDATE or RDATE is included
|
||||
# For any rule in the ruleset:
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
def validate_rrule(self, value):
|
||||
rrule_value = value
|
||||
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RULE\:[^\s]*)", rrule_value)
|
||||
errors = []
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
errors.append(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
errors.append(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
if len(match_multiple_dtstart) > 1:
|
||||
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
|
||||
if not len(match_multiple_rrule):
|
||||
raise serializers.ValidationError(_('RRULE required in rrule.'))
|
||||
if len(match_multiple_rrule) > 1:
|
||||
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
|
||||
if 'interval' not in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
|
||||
if 'secondly' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('SECONDLY is not supported.'))
|
||||
if re.match(multi_by_month_day, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
|
||||
if re.match(multi_by_month, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
|
||||
if re.match(by_day_with_numeric_prefix, rrule_value):
|
||||
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
|
||||
if 'byyearday' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYYEARDAY not supported."))
|
||||
if 'byweekno' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYWEEKNO not supported."))
|
||||
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
|
||||
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
|
||||
errors.append(_('Multiple DTSTART is not supported.'))
|
||||
if "rrule:" not in rrule_value.lower():
|
||||
errors.append(_('One or more rule required in rrule.'))
|
||||
if "exdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('EXDATE not allowed in rrule.'))
|
||||
if "rdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('RDATE not allowed in rrule.'))
|
||||
for a_rule in match_multiple_rrule:
|
||||
if 'interval' not in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('INTERVAL required in rrule'), a_rule))
|
||||
elif 'secondly' in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('SECONDLY is not supported'), a_rule))
|
||||
if re.match(by_day_with_numeric_prefix, a_rule):
|
||||
errors.append("{0}: {1}".format(_("BYDAY with numeric prefix not supported"), a_rule))
|
||||
if 'COUNT' in a_rule and 'UNTIL' in a_rule:
|
||||
errors.append("{0}: {1}".format(_("RRULE may not contain both COUNT and UNTIL"), a_rule))
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", a_rule)
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
errors.append("{0}: {1}".format(_("COUNT > 999 is unsupported"), a_rule))
|
||||
|
||||
try:
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
logger.error(traceback.format_exc())
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
errors.append(_("rrule parsing failed validation: {}").format(e))
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
timezone = serializers.SerializerMethodField()
|
||||
until = serializers.SerializerMethodField()
|
||||
timezone = serializers.SerializerMethodField(
|
||||
help_text=_(
|
||||
'The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field.'
|
||||
),
|
||||
)
|
||||
until = serializers.SerializerMethodField(
|
||||
help_text=_('The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an emptry string will be returned'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Schedule
|
||||
@@ -4761,13 +4740,6 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(
|
||||
_(
|
||||
'Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.'.format(value.source_project.name)
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
|
||||
@@ -115,7 +115,6 @@ from awx.api.metadata import RoleMetadata
|
||||
from awx.main.constants import ACTIVE_STATES, SURVEY_TYPE_MAPPING
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.api.views.mixin import (
|
||||
ControlledByScmMixin,
|
||||
InstanceGroupMembershipMixin,
|
||||
OrganizationCountsMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
@@ -537,6 +536,7 @@ class ScheduleList(ListCreateAPIView):
|
||||
name = _("Schedules")
|
||||
model = models.Schedule
|
||||
serializer_class = serializers.ScheduleSerializer
|
||||
ordering = ('id',)
|
||||
|
||||
|
||||
class ScheduleDetail(RetrieveUpdateDestroyAPIView):
|
||||
@@ -577,8 +577,7 @@ class ScheduleZoneInfo(APIView):
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def get(self, request):
|
||||
zones = [{'name': zone} for zone in models.Schedule.get_zoneinfo()]
|
||||
return Response(zones)
|
||||
return Response({'zones': models.Schedule.get_zoneinfo(), 'links': models.Schedule.get_zoneinfo_links()})
|
||||
|
||||
|
||||
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
@@ -1675,7 +1674,7 @@ class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
return Response(dict(error=_(str(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1709,7 +1708,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
return qs
|
||||
|
||||
|
||||
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
'''the list of groups a host is directly a member of'''
|
||||
|
||||
model = models.Group
|
||||
@@ -1825,7 +1824,7 @@ class EnforceParentRelationshipMixin(object):
|
||||
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class GroupChildrenList(ControlledByScmMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupChildrenList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -1871,7 +1870,7 @@ class GroupPotentialChildrenList(SubListAPIView):
|
||||
return qs.exclude(pk__in=except_pks)
|
||||
|
||||
|
||||
class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
'''the list of hosts directly below a group'''
|
||||
|
||||
model = models.Host
|
||||
@@ -1935,7 +1934,7 @@ class GroupActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all()))
|
||||
|
||||
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -3849,7 +3848,7 @@ class JobJobEventsChildrenSummary(APIView):
|
||||
meta_events = ('debug', 'verbose', 'warning', 'error', 'system_warning', 'deprecated')
|
||||
|
||||
def get(self, request, **kwargs):
|
||||
resp = dict(children_summary={}, meta_event_nested_uuid={}, event_processing_finished=False)
|
||||
resp = dict(children_summary={}, meta_event_nested_uuid={}, event_processing_finished=False, is_tree=True)
|
||||
job = get_object_or_404(models.Job, pk=kwargs['pk'])
|
||||
if not job.event_processing_finished:
|
||||
return Response(resp)
|
||||
@@ -3869,13 +3868,41 @@ class JobJobEventsChildrenSummary(APIView):
|
||||
# key is counter of meta events (i.e. verbose), value is uuid of the assigned parent
|
||||
map_meta_counter_nested_uuid = {}
|
||||
|
||||
# collapsable tree view in the UI only makes sense for tree-like
|
||||
# hierarchy. If ansible is ran with a strategy like free or host_pinned, then
|
||||
# events can be out of sequential order, and no longer follow a tree structure
|
||||
# E1
|
||||
# E2
|
||||
# E3
|
||||
# E4 <- parent is E3
|
||||
# E5 <- parent is E1
|
||||
# in the above, there is no clear way to collapse E1, because E5 comes after
|
||||
# E3, which occurs after E1. Thus the tree view should be disabled.
|
||||
|
||||
# mark the last seen uuid at a given level (0-3)
|
||||
# if a parent uuid is not in this list, then we know the events are not tree-like
|
||||
# and return a response with is_tree: False
|
||||
level_current_uuid = [None, None, None, None]
|
||||
|
||||
prev_non_meta_event = events[0]
|
||||
for i, e in enumerate(events):
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
prev_non_meta_event = e
|
||||
if not e['uuid']:
|
||||
continue
|
||||
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
level = models.JobEvent.LEVEL_FOR_EVENT[e['event']]
|
||||
level_current_uuid[level] = e['uuid']
|
||||
# if setting level 1, for example, set levels 2 and 3 back to None
|
||||
for u in range(level + 1, len(level_current_uuid)):
|
||||
level_current_uuid[u] = None
|
||||
|
||||
puuid = e['parent_uuid']
|
||||
if puuid and puuid not in level_current_uuid:
|
||||
# improper tree detected, so bail out early
|
||||
resp['is_tree'] = False
|
||||
return Response(resp)
|
||||
|
||||
# if event is verbose (or debug, etc), we need to "assign" it a
|
||||
# parent. This code looks at the event level of the previous
|
||||
|
||||
@@ -41,7 +41,7 @@ from awx.api.serializers import (
|
||||
JobTemplateSerializer,
|
||||
LabelSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
|
||||
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
@@ -75,7 +75,7 @@ class InventoryList(ListCreateAPIView):
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
@@ -10,13 +10,12 @@ from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400, parse_yaml_or_json
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
@@ -186,35 +185,6 @@ class OrganizationCountsMixin(object):
|
||||
return full_context
|
||||
|
||||
|
||||
class ControlledByScmMixin(object):
|
||||
"""
|
||||
Special method to reset SCM inventory commit hash
|
||||
if anything that it manages changes.
|
||||
"""
|
||||
|
||||
def _reset_inv_src_rev(self, obj):
|
||||
if self.request.method in SAFE_METHODS or not obj:
|
||||
return
|
||||
project_following_sources = obj.inventory_sources.filter(update_on_project_update=True, source='scm')
|
||||
if project_following_sources:
|
||||
# Allow inventory changes unrelated to variables
|
||||
if self.model == Inventory and (
|
||||
not self.request or not self.request.data or parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)
|
||||
):
|
||||
return
|
||||
project_following_sources.update(scm_last_revision='')
|
||||
|
||||
def get_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
def get_parent_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_parent_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class NoTruncateMixin(object):
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
|
||||
@@ -204,7 +204,7 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||
return h.hexdigest()
|
||||
|
||||
def get_event_status_api(self):
|
||||
if self.get_event_type() != 'Merge Request Hook':
|
||||
if self.get_event_type() not in self.ref_keys.keys():
|
||||
return
|
||||
project = self.request.data.get('project', {})
|
||||
repo_url = project.get('web_url')
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
@@ -31,7 +30,7 @@ from awx.conf.models import Setting
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
SETTING_MEMORY_TTL = 5 if 'callback_receiver' in ' '.join(sys.argv) else 0
|
||||
SETTING_MEMORY_TTL = 5
|
||||
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
@@ -234,6 +233,8 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.__dict__['_awx_conf_init_readonly'] = False
|
||||
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
|
||||
self.__dict__['registry'] = registry
|
||||
self.__dict__['_awx_conf_memoizedcache'] = cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL)
|
||||
self.__dict__['_awx_conf_memoizedcache_lock'] = threading.Lock()
|
||||
|
||||
# record the current pid so we compare it post-fork for
|
||||
# processes like the dispatcher and callback receiver
|
||||
@@ -396,12 +397,20 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def SETTINGS_MODULE(self):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
@cachetools.cached(cache=cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL))
|
||||
@cachetools.cachedmethod(
|
||||
cache=lambda self: self.__dict__['_awx_conf_memoizedcache'],
|
||||
key=lambda *args, **kwargs: SettingsWrapper.hashkey(*args, **kwargs),
|
||||
lock=lambda self: self.__dict__['_awx_conf_memoizedcache_lock'],
|
||||
)
|
||||
def _get_local_with_cache(self, name):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
value = self._get_local_with_cache(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
@@ -475,6 +484,23 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
return set_locally or set_on_default
|
||||
|
||||
@classmethod
|
||||
def hashkey(cls, *args, **kwargs):
|
||||
"""
|
||||
Usage of @cachetools.cached has changed to @cachetools.cachedmethod
|
||||
The previous cachetools decorator called the hash function and passed in (self, key).
|
||||
The new cachtools decorator calls the hash function with just (key).
|
||||
Ideally, we would continue to pass self, however, the cachetools decorator interface
|
||||
does not allow us to.
|
||||
|
||||
This hashkey function is to maintain that the key generated looks like
|
||||
('<SettingsWrapper>', key). The thought is that maybe it is important to namespace
|
||||
our cache to the SettingsWrapper scope in case some other usage of this cache exists.
|
||||
I can not think of how any other system could and would use our private cache, but
|
||||
for safety sake we are ensuring the key schema does not change.
|
||||
"""
|
||||
return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
|
||||
|
||||
|
||||
def __getattr_without_cache__(self, name):
|
||||
# Django 1.10 added an optimization to settings lookup:
|
||||
|
||||
@@ -28,6 +28,9 @@ def handle_setting_change(key, for_delete=False):
|
||||
cache_keys = {Setting.get_cache_key(k) for k in setting_keys}
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
# if we have changed a setting, we want to avoid mucking with the in-memory cache entirely
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
# Send setting_changed signal with new value for each setting.
|
||||
for setting_key in setting_keys:
|
||||
setting_changed.send(sender=Setting, setting=setting_key, value=getattr(settings, setting_key, None), enter=not bool(for_delete))
|
||||
|
||||
@@ -8,6 +8,8 @@ import codecs
|
||||
from uuid import uuid4
|
||||
import time
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
@@ -299,3 +301,33 @@ def test_readonly_sensitive_cache_data_is_encrypted(settings):
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_only_for_registered_settings(settings):
|
||||
"Test that we only make use of the in-memory TTL cache for registered settings"
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.MIDDLEWARE
|
||||
assert len(settings._awx_conf_memoizedcache) == 0 # does not cache MIDDLEWARE
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR'] # because it is cached_property
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
assert len(settings._awx_conf_memoizedcache) == 1 # caches registered settings
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_works(settings):
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR']
|
||||
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('awx.conf.settings.SettingsWrapper._get_local', return_value='DEFAULT') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_called_once_with('AWX_VAR')
|
||||
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
@@ -12,8 +12,6 @@ from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from psycopg2.errors import UntranslatableCharacter
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
@@ -131,7 +129,7 @@ def config(since, **kwargs):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
@register('counts', '1.2', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (
|
||||
@@ -174,6 +172,13 @@ def counts(since, **kwargs):
|
||||
.count()
|
||||
)
|
||||
counts['pending_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').filter(status__in=('pending',)).count()
|
||||
if connection.vendor == 'postgresql':
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"select count(*) from pg_stat_activity where datname=\'{connection.settings_dict['NAME']}\'")
|
||||
counts['database_connections'] = cursor.fetchone()[0]
|
||||
else:
|
||||
# We should be using postgresql, but if we do that change that ever we should change the below value
|
||||
counts['database_connections'] = 1
|
||||
return counts
|
||||
|
||||
|
||||
@@ -378,10 +383,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::jsonb"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::jsonb"), path=full_path)
|
||||
return _copy_table(table='events', query=query(fr"replace({tbl}.event_data, '\u', '\u005cu')::jsonb"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.5', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
@@ -394,7 +396,7 @@ def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.3', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
@register('unified_jobs_table', '1.4', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
@@ -420,7 +422,8 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id,
|
||||
main_unifiedjob.installed_collections,
|
||||
main_unifiedjob.ansible_version
|
||||
main_unifiedjob.ansible_version,
|
||||
main_job.forks
|
||||
FROM main_unifiedjob
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
LEFT JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
|
||||
@@ -126,6 +126,8 @@ def metrics():
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
||||
|
||||
DATABASE_CONNECTIONS = Gauge('awx_database_connections_total', 'Number of connections to database', registry=REGISTRY)
|
||||
|
||||
license_info = get_license()
|
||||
SYSTEM_INFO.info(
|
||||
{
|
||||
@@ -163,6 +165,8 @@ def metrics():
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions'])
|
||||
|
||||
DATABASE_CONNECTIONS.set(current_counts['database_connections'])
|
||||
|
||||
all_job_data = job_counts(None)
|
||||
statuses = all_job_data.get('status', {})
|
||||
for status, value in statuses.items():
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class BaseM:
|
||||
@@ -16,16 +16,22 @@ class BaseM:
|
||||
self.field = field
|
||||
self.help_text = help_text
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.current_value = 0
|
||||
|
||||
def inc(self, value):
|
||||
self.current_value += value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def set(self, value):
|
||||
self.current_value = value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def get(self):
|
||||
return self.current_value
|
||||
|
||||
def decode(self, conn):
|
||||
value = conn.hget(root_key, self.field)
|
||||
@@ -34,7 +40,9 @@ class BaseM:
|
||||
def to_prometheus(self, instance_data):
|
||||
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
|
||||
for instance in instance_data:
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
if self.field in instance_data[instance]:
|
||||
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
return output_text
|
||||
|
||||
|
||||
@@ -46,8 +54,10 @@ class FloatM(BaseM):
|
||||
return 0.0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class IntM(BaseM):
|
||||
@@ -58,8 +68,10 @@ class IntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetIntM(BaseM):
|
||||
@@ -70,10 +82,9 @@ class SetIntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
# do not set value if it has not changed since last time this was called
|
||||
if self.current_value is not None:
|
||||
if self.metric_has_changed:
|
||||
conn.hset(root_key, self.field, self.current_value)
|
||||
self.current_value = None
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetFloatM(SetIntM):
|
||||
@@ -94,13 +105,13 @@ class HistogramM(BaseM):
|
||||
self.sum = IntM(field + '_sum', '')
|
||||
super(HistogramM, self).__init__(field, help_text)
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.inf.clear_value(conn)
|
||||
self.sum.clear_value(conn)
|
||||
self.inf.reset_value(conn)
|
||||
self.sum.reset_value(conn)
|
||||
for b in self.buckets_to_keys.values():
|
||||
b.clear_value(conn)
|
||||
super(HistogramM, self).clear_value(conn)
|
||||
b.reset_value(conn)
|
||||
super(HistogramM, self).reset_value(conn)
|
||||
|
||||
def observe(self, value):
|
||||
for b in self.buckets:
|
||||
@@ -136,7 +147,7 @@ class HistogramM(BaseM):
|
||||
|
||||
|
||||
class Metrics:
|
||||
def __init__(self, auto_pipe_execute=True, instance_name=None):
|
||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.last_pipe_execute = time.time()
|
||||
@@ -152,6 +163,8 @@ class Metrics:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif settings.IS_TESTING():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.me().hostname
|
||||
|
||||
@@ -161,15 +174,29 @@ class Metrics:
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Time spent saving events to database'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading all tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('task_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow jobs'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager_schedule_calls', 'Number of calls to task manager schedule'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
@@ -179,29 +206,39 @@ class Metrics:
|
||||
# track last time metrics were sent to other nodes
|
||||
self.previous_send_metrics = SetFloatM('send_metrics_time', 'Timestamp of previous send_metrics call')
|
||||
|
||||
def clear_values(self):
|
||||
def reset_values(self):
|
||||
# intended to be called once on app startup to reset all metric
|
||||
# values to 0
|
||||
for m in self.METRICS.values():
|
||||
m.clear_value(self.conn)
|
||||
m.reset_value(self.conn)
|
||||
self.metrics_have_changed = True
|
||||
self.conn.delete(root_key + "_lock")
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
self.conn.delete(m)
|
||||
|
||||
def inc(self, field, value):
|
||||
if value != 0:
|
||||
self.METRICS[field].inc(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def set(self, field, value):
|
||||
self.METRICS[field].set(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def get(self, field):
|
||||
return self.METRICS[field].get()
|
||||
|
||||
def decode(self, field):
|
||||
return self.METRICS[field].decode(self.conn)
|
||||
|
||||
def observe(self, field, value):
|
||||
self.METRICS[field].observe(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def serialize_local_metrics(self):
|
||||
@@ -249,8 +286,8 @@ class Metrics:
|
||||
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# get acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock', thread_local=False)
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
try:
|
||||
|
||||
@@ -10,6 +10,27 @@ from awx.main.models import Instance, UnifiedJob, WorkflowJob
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def startup_reaping():
|
||||
"""
|
||||
If this particular instance is starting, then we know that any running jobs are invalid
|
||||
so we will reap those jobs as a special action here
|
||||
"""
|
||||
me = Instance.objects.me()
|
||||
jobs = UnifiedJob.objects.filter(status='running', controller_node=me.hostname)
|
||||
job_ids = []
|
||||
for j in jobs:
|
||||
job_ids.append(j.id)
|
||||
j.status = 'failed'
|
||||
j.start_args = ''
|
||||
j.job_explanation += 'Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.'
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status('failed')
|
||||
if job_ids:
|
||||
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
||||
# just in case, don't reap jobs that aren't running
|
||||
|
||||
@@ -169,8 +169,9 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
|
||||
self.pg_down_time = time.time()
|
||||
self.pg_is_down = True
|
||||
if time.time() - self.pg_down_time > self.pg_max_wait:
|
||||
logger.warning(f"Postgres event consumer has not recovered in {self.pg_max_wait} s, exiting")
|
||||
current_downtime = time.time() - self.pg_down_time
|
||||
if current_downtime > self.pg_max_wait:
|
||||
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
|
||||
raise
|
||||
# Wait for a second before next attempt, but still listen for any shutdown signals
|
||||
for i in range(10):
|
||||
@@ -179,6 +180,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
time.sleep(0.1)
|
||||
for conn in db.connections.all():
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in dispatcher main loop')
|
||||
raise
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
@@ -4,10 +4,12 @@ import os
|
||||
import signal
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db import DatabaseError, OperationalError, transaction, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django_guid import set_guid
|
||||
|
||||
@@ -16,8 +18,8 @@ import psutil
|
||||
import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
@@ -26,6 +28,32 @@ from .base import BaseWorker
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
def job_stats_wrapup(job_identifier, event=None):
|
||||
"""Fill in the unified job host_status_counts, fire off notifications if needed"""
|
||||
try:
|
||||
# empty dict (versus default of None) can still indicate that events have been processed
|
||||
# for job types like system jobs, and jobs with no hosts matched
|
||||
host_status_counts = {}
|
||||
if event:
|
||||
host_status_counts = event.get_host_status_counts()
|
||||
|
||||
# Update host_status_counts while holding the row lock
|
||||
with transaction.atomic():
|
||||
uj = UnifiedJob.objects.select_for_update().get(pk=job_identifier)
|
||||
uj.host_status_counts = host_status_counts
|
||||
uj.save(update_fields=['host_status_counts'])
|
||||
|
||||
uj.log_lifecycle("stats_wrapup_finished")
|
||||
|
||||
# If the status was a finished state before this update was made, send notifications
|
||||
# If not, we will send notifications when the status changes
|
||||
if uj.status not in ACTIVE_STATES:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
|
||||
except Exception:
|
||||
logger.exception('Worker failed to save stats or emit notifications: Job {}'.format(job_identifier))
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
"""
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
@@ -44,7 +72,6 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.pid = os.getpid()
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.queue_pop = 0
|
||||
@@ -53,6 +80,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
|
||||
self.redis.delete(key)
|
||||
|
||||
@cached_property
|
||||
def pid(self):
|
||||
"""This needs to be obtained after forking, or else it will give the parent process"""
|
||||
return os.getpid()
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
res = self.redis.blpop(self.queue_name, timeout=1)
|
||||
@@ -120,12 +152,17 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
metrics_singular_events_saved = 0
|
||||
metrics_events_batch_save_errors = 0
|
||||
metrics_events_broadcast = 0
|
||||
metrics_events_missing_created = 0
|
||||
metrics_total_job_event_processing_seconds = datetime.timedelta(seconds=0)
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
e.modified = now # this can be set before created because now is set above on line 149
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
metrics_events_missing_created += 1
|
||||
else: # only calculate the seconds if the created time already has been set
|
||||
metrics_total_job_event_processing_seconds += e.modified - e.created
|
||||
metrics_duration_to_save = time.perf_counter()
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
@@ -146,6 +183,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
metrics_events_broadcast += 1
|
||||
emit_event_detail(e)
|
||||
if getattr(e, '_notification_trigger_event', False):
|
||||
job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
@@ -156,6 +195,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', metrics_bulk_events_saved)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(metrics_bulk_events_saved + metrics_singular_events_saved))
|
||||
self.subsystem_metrics.inc('callback_receiver_events_broadcast', metrics_events_broadcast)
|
||||
self.subsystem_metrics.set(
|
||||
'callback_receiver_event_processing_avg_seconds',
|
||||
metrics_total_job_event_processing_seconds.total_seconds()
|
||||
/ (metrics_bulk_events_saved + metrics_singular_events_saved - metrics_events_missing_created),
|
||||
)
|
||||
if self.subsystem_metrics.should_pipe_execute() is True:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
|
||||
@@ -165,47 +209,32 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
if flush:
|
||||
self.last_event = ''
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls.JOB_REFERENCE in body:
|
||||
job_identifier = body[cls.JOB_REFERENCE]
|
||||
break
|
||||
|
||||
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
|
||||
|
||||
notification_trigger_event = bool(body.get('event') == cls.WRAPUP_EVENT)
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
logger.info('Starting EOF event processing for Job {}'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter))
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
|
||||
if isinstance(uj, Job):
|
||||
# *actual playbooks* send their success/failure
|
||||
# notifications in response to the playbook_on_stats
|
||||
# event handling code in main.models.events
|
||||
pass
|
||||
elif hasattr(uj, 'send_notification_templates'):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
if notification_trigger_event:
|
||||
job_stats_wrapup(job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
logger.exception('Worker failed to perform EOF tasks: Job {}'.format(job_identifier))
|
||||
finally:
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -1)
|
||||
set_guid('')
|
||||
@@ -215,9 +244,12 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
if skip_websocket_message:
|
||||
if skip_websocket_message: # if this event sends websocket messages, fire them off on flush
|
||||
event._skip_websocket_message = True
|
||||
|
||||
if notification_trigger_event: # if this is an Ansible stats event, ensure notifications on flush
|
||||
event._notification_trigger_event = True
|
||||
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
|
||||
@@ -103,7 +103,7 @@ class DeleteMeta:
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
query = "SELECT inhrelid::regclass::text AS child FROM pg_catalog.pg_inherits"
|
||||
query += f" WHERE inhparent = 'public.{tbl_name}'::regclass"
|
||||
query += f" WHERE inhparent = '{tbl_name}'::regclass"
|
||||
query += f" AND TO_TIMESTAMP(LTRIM(inhrelid::regclass::text, '{tbl_name}_'), 'YYYYMMDD_HH24') < '{self.cutoff}'"
|
||||
query += " ORDER BY inhrelid::regclass::text"
|
||||
|
||||
|
||||
@@ -32,8 +32,10 @@ class Command(BaseCommand):
|
||||
name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
status='successful',
|
||||
scm_revision='347e44fea036c94d5f60e544de006453ee5c71ad',
|
||||
playbook_files=['hello_world.yml'],
|
||||
)
|
||||
|
||||
p.organization = o
|
||||
|
||||
@@ -53,7 +53,7 @@ class Command(BaseCommand):
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
reaper.startup_reaping()
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
|
||||
@@ -26,6 +26,17 @@ logger = logging.getLogger('awx.main.middleware')
|
||||
perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
class SettingsCacheMiddleware(MiddlewareMixin):
|
||||
"""
|
||||
Clears the in-memory settings cache at the beginning of a request.
|
||||
We do this so that a script can POST to /api/v2/settings/all/ and then
|
||||
right away GET /api/v2/settings/all/ and see the updated value.
|
||||
"""
|
||||
|
||||
def process_request(self, request):
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
|
||||
class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
|
||||
dest = '/var/log/tower/profile'
|
||||
|
||||
18
awx/main/migrations/0160_alter_schedule_rrule.py
Normal file
18
awx/main/migrations/0160_alter_schedule_rrule.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.12 on 2022-04-18 21:29
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0159_deprecate_inventory_source_UoPU_field'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='rrule',
|
||||
field=models.TextField(help_text='A value representing the schedules iCal recurrence rule.'),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0161_unifiedjob_host_status_counts.py
Normal file
18
awx/main/migrations/0161_unifiedjob_host_status_counts.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.12 on 2022-04-27 02:16
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0160_alter_schedule_rrule'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='host_status_counts',
|
||||
field=models.JSONField(blank=True, default=None, editable=False, help_text='Playbook stats from the Ansible playbook_on_stats event.', null=True),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0162_alter_unifiedjob_dependent_jobs.py
Normal file
18
awx/main/migrations/0162_alter_unifiedjob_dependent_jobs.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.13 on 2022-05-02 21:27
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0161_unifiedjob_host_status_counts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='dependent_jobs',
|
||||
field=models.ManyToManyField(editable=False, related_name='unifiedjob_blocked_jobs', to='main.UnifiedJob'),
|
||||
),
|
||||
]
|
||||
23
awx/main/migrations/0163_convert_job_tags_to_textfield.py
Normal file
23
awx/main/migrations/0163_convert_job_tags_to_textfield.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-02 18:15
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0162_alter_unifiedjob_dependent_jobs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='job',
|
||||
name='job_tags',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_tags',
|
||||
field=models.TextField(blank=True, default=''),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,40 @@
|
||||
# Generated by Django 3.2.13 on 2022-06-21 21:29
|
||||
|
||||
from django.db import migrations
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("awx")
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
sources = InventorySource.objects.filter(update_on_project_update=True)
|
||||
for src in sources:
|
||||
if src.update_on_launch == False:
|
||||
src.update_on_launch = True
|
||||
src.save(update_fields=['update_on_launch'])
|
||||
logger.info(f"Setting update_on_launch to True for {src}")
|
||||
proj = src.source_project
|
||||
if proj and proj.scm_update_on_launch is False:
|
||||
proj.scm_update_on_launch = True
|
||||
proj.save(update_fields=['scm_update_on_launch'])
|
||||
logger.warning(f"Setting scm_update_on_launch to True for {proj}")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0163_convert_job_tags_to_textfield'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forwards, migrations.RunPython.noop),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='scm_last_revision',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='update_on_project_update',
|
||||
),
|
||||
]
|
||||
@@ -35,6 +35,7 @@ def gce(cred, env, private_data_dir):
|
||||
container_path = to_container_path(path, private_data_dir)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = container_path
|
||||
env['GCP_SERVICE_ACCOUNT_FILE'] = container_path
|
||||
env['GOOGLE_APPLICATION_CREDENTIALS'] = container_path
|
||||
|
||||
# Handle env variables for new module types.
|
||||
# This includes gcp_compute inventory plugin and
|
||||
|
||||
@@ -6,7 +6,7 @@ from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.db import models, DatabaseError
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc, now
|
||||
@@ -126,6 +126,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'host_name',
|
||||
'verbosity',
|
||||
]
|
||||
WRAPUP_EVENT = 'playbook_on_stats'
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
@@ -384,14 +385,6 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
job.get_event_queryset().filter(uuid__in=changed).update(changed=True)
|
||||
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([job.id])
|
||||
|
||||
connection.on_commit(_send_notifications)
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_str(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
@@ -470,6 +463,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
"""
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'job_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -600,6 +594,7 @@ UnpartitionedJobEvent._meta.db_table = '_unpartitioned_' + JobEvent._meta.db_tab
|
||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'project_update_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -641,6 +636,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
"""
|
||||
|
||||
VALID_KEYS = ['event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line', 'end_line', 'verbosity']
|
||||
WRAPUP_EVENT = 'EOF'
|
||||
|
||||
class Meta:
|
||||
abstract = True
|
||||
@@ -736,6 +732,8 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id', 'job_created']
|
||||
WRAPUP_EVENT = 'playbook_on_stats' # exception to BaseCommandEvent
|
||||
JOB_REFERENCE = 'ad_hoc_command_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -836,6 +834,7 @@ UnpartitionedAdHocCommandEvent._meta.db_table = '_unpartitioned_' + AdHocCommand
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'inventory_update_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -881,6 +880,7 @@ UnpartitionedInventoryUpdateEvent._meta.db_table = '_unpartitioned_' + Inventory
|
||||
class SystemJobEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id', 'job_created']
|
||||
JOB_REFERENCE = 'system_job_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
|
||||
@@ -985,22 +985,11 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
default=None,
|
||||
null=True,
|
||||
)
|
||||
scm_last_revision = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
update_on_project_update = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_(
|
||||
'This field is deprecated and will be removed in a future release. '
|
||||
'In future release, functionality will be migrated to source project update_on_launch.'
|
||||
),
|
||||
)
|
||||
|
||||
update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
)
|
||||
|
||||
update_cache_timeout = models.PositiveIntegerField(
|
||||
default=0,
|
||||
)
|
||||
@@ -1038,14 +1027,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.name = 'inventory source (%s)' % replace_text
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
# Reset revision if SCM source has changed parameters
|
||||
if self.source == 'scm' and not is_new_instance:
|
||||
before_is = self.__class__.objects.get(pk=self.pk)
|
||||
if before_is.source_path != self.source_path or before_is.source_project_id != self.source_project_id:
|
||||
# Reset the scm_revision if file changed to force update
|
||||
self.scm_last_revision = ''
|
||||
if 'scm_last_revision' not in update_fields:
|
||||
update_fields.append('scm_last_revision')
|
||||
|
||||
# Do the actual save.
|
||||
super(InventorySource, self).save(*args, **kwargs)
|
||||
@@ -1054,10 +1035,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
if replace_text in self.name:
|
||||
self.name = self.name.replace(replace_text, str(self.pk))
|
||||
super(InventorySource, self).save(update_fields=['name'])
|
||||
if self.source == 'scm' and is_new_instance and self.update_on_project_update:
|
||||
# Schedule a new Project update if one is not already queued
|
||||
if self.source_project and not self.source_project.project_updates.filter(status__in=['new', 'pending', 'waiting']).exists():
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields()
|
||||
@@ -1147,25 +1124,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
)
|
||||
return dict(error=list(error_notification_templates), started=list(started_notification_templates), success=list(success_notification_templates))
|
||||
|
||||
def clean_update_on_project_update(self):
|
||||
if (
|
||||
self.update_on_project_update is True
|
||||
and self.source == 'scm'
|
||||
and InventorySource.objects.filter(Q(inventory=self.inventory, update_on_project_update=True, source='scm') & ~Q(id=self.id)).exists()
|
||||
):
|
||||
raise ValidationError(_("More than one SCM-based inventory source with update on project update per-inventory not allowed."))
|
||||
return self.update_on_project_update
|
||||
|
||||
def clean_update_on_launch(self):
|
||||
if self.update_on_project_update is True and self.source == 'scm' and self.update_on_launch is True:
|
||||
raise ValidationError(
|
||||
_(
|
||||
"Cannot update SCM-based inventory source on launch if set to update on project update. "
|
||||
"Instead, configure the corresponding source project to update on launch."
|
||||
)
|
||||
)
|
||||
return self.update_on_launch
|
||||
|
||||
def clean_source_path(self):
|
||||
if self.source != 'scm' and self.source_path:
|
||||
raise ValidationError(_("Cannot set source_path if not SCM type."))
|
||||
@@ -1301,13 +1259,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res:
|
||||
if self.launch_type != 'scm' and self.source_project_update:
|
||||
self.source_project_update.cancel(job_explanation=job_explanation)
|
||||
return res
|
||||
|
||||
|
||||
class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
class Meta:
|
||||
|
||||
@@ -130,8 +130,7 @@ class JobOptions(BaseModel):
|
||||
)
|
||||
)
|
||||
)
|
||||
job_tags = models.CharField(
|
||||
max_length=1024,
|
||||
job_tags = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
@@ -744,6 +743,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
return "$hidden due to Ansible no_log flag$"
|
||||
return artifacts
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""Return unified job artifacts (from set_stats) to pass downstream in workflows"""
|
||||
if isinstance(self.artifacts, dict):
|
||||
return self.artifacts
|
||||
return {}
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@@ -407,41 +407,54 @@ class TaskManagerUnifiedJobMixin(models.Model):
|
||||
def get_jobs_fail_chain(self):
|
||||
return []
|
||||
|
||||
def dependent_jobs_finished(self):
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerJobMixin(TaskManagerUnifiedJobMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
return [self.project_update] if self.project_update else []
|
||||
|
||||
def dependent_jobs_finished(self):
|
||||
for j in self.dependent_jobs.all():
|
||||
if j.status in ['pending', 'waiting', 'running']:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class TaskManagerUpdateOnLaunchMixin(TaskManagerUnifiedJobMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
return list(self.dependent_jobs.all())
|
||||
|
||||
|
||||
class TaskManagerProjectUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
# project update can be a dependency of an inventory update, in which
|
||||
# case we need to fail the job that may have spawned the inventory
|
||||
# update.
|
||||
# The inventory update will fail, but since it is not running it will
|
||||
# not cascade fail to the job from the errback logic in apply_async. As
|
||||
# such we should capture it here.
|
||||
blocked_jobs = list(self.unifiedjob_blocked_jobs.all().prefetch_related("unifiedjob_blocked_jobs"))
|
||||
other_tasks = []
|
||||
for b in blocked_jobs:
|
||||
other_tasks += list(b.unifiedjob_blocked_jobs.all())
|
||||
return blocked_jobs + other_tasks
|
||||
|
||||
|
||||
class TaskManagerInventoryUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
def get_jobs_fail_chain(self):
|
||||
blocked_jobs = list(self.unifiedjob_blocked_jobs.all())
|
||||
other_updates = []
|
||||
if blocked_jobs:
|
||||
# blocked_jobs[0] is just a reference to a job that depends on this
|
||||
# inventory update.
|
||||
# We can look at the dependencies of this blocked job to find other
|
||||
# inventory sources that are safe to fail.
|
||||
# Since the dependencies could also include project updates,
|
||||
# we need to check for type.
|
||||
for dep in blocked_jobs[0].dependent_jobs.all():
|
||||
if type(dep) is type(self) and dep.id != self.id:
|
||||
other_updates.append(dep)
|
||||
return blocked_jobs + other_updates
|
||||
|
||||
|
||||
class ExecutionEnvironmentMixin(models.Model):
|
||||
class Meta:
|
||||
|
||||
@@ -408,6 +408,7 @@ class JobNotificationMixin(object):
|
||||
'inventory': 'Stub Inventory',
|
||||
'id': 42,
|
||||
'hosts': {},
|
||||
'extra_vars': {},
|
||||
'friendly_name': 'Job',
|
||||
'finished': False,
|
||||
'credential': 'Stub credential',
|
||||
@@ -421,21 +422,8 @@ class JobNotificationMixin(object):
|
||||
The context will contain allowed content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_ALLOWED_LIST the job's friendly name,
|
||||
and a url to the job run."""
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
try:
|
||||
has_event_property = any([f for f in self.event_class._meta.fields if f.name == 'event'])
|
||||
except NotImplementedError:
|
||||
has_event_property = False
|
||||
if has_event_property:
|
||||
qs = self.get_event_queryset()
|
||||
if qs:
|
||||
event = qs.only('event_data').filter(event='playbook_on_stats').first()
|
||||
if event:
|
||||
summary = event.get_host_status_counts()
|
||||
job_context['host_status_counts'] = summary
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job': {'host_status_counts': self.host_status_counts},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), ensure_ascii=False, indent=4),
|
||||
|
||||
@@ -354,7 +354,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
skip_update = bool(kwargs.pop('skip_update', False))
|
||||
self._skip_update = bool(kwargs.pop('skip_update', False))
|
||||
# Create auto-generated local path if project uses SCM.
|
||||
if self.pk and self.scm_type and not self.local_path.startswith('_'):
|
||||
slug_name = slugify(str(self.name)).replace(u'-', u'_')
|
||||
@@ -372,14 +372,16 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
self.save(update_fields=update_fields, skip_update=self._skip_update)
|
||||
# If we just created a new project with SCM, start the initial update.
|
||||
# also update if certain fields have changed
|
||||
relevant_change = any(pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None) for fd_name in self.FIELDS_TRIGGER_UPDATE)
|
||||
if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
|
||||
if (relevant_change or new_instance) and (not self._skip_update) and self.scm_type:
|
||||
self.update()
|
||||
|
||||
def _get_current_status(self):
|
||||
if getattr(self, '_skip_update', False):
|
||||
return self.status
|
||||
if self.scm_type:
|
||||
if self.current_job and self.current_job.status:
|
||||
return self.current_job.status
|
||||
|
||||
@@ -81,32 +81,41 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
dtend = models.DateTimeField(
|
||||
null=True, default=None, editable=False, help_text=_("The last occurrence of the schedule occurs before this time, aftewards the schedule expires.")
|
||||
)
|
||||
rrule = models.CharField(max_length=255, help_text=_("A value representing the schedules iCal recurrence rule."))
|
||||
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
|
||||
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
|
||||
|
||||
@classmethod
|
||||
def get_zoneinfo(self):
|
||||
def get_zoneinfo(cls):
|
||||
return sorted(get_zonefile_instance().zones)
|
||||
|
||||
@classmethod
|
||||
def get_zoneinfo_links(cls):
|
||||
return_val = {}
|
||||
zone_instance = get_zonefile_instance()
|
||||
for zone_name in zone_instance.zones:
|
||||
if str(zone_name) != str(zone_instance.zones[zone_name]._filename):
|
||||
return_val[zone_name] = zone_instance.zones[zone_name]._filename
|
||||
return return_val
|
||||
|
||||
@property
|
||||
def timezone(self):
|
||||
utc = tzutc()
|
||||
# All rules in a ruleset will have the same dtstart so we can just take the first rule
|
||||
tzinfo = Schedule.rrulestr(self.rrule)._rrule[0]._dtstart.tzinfo
|
||||
if tzinfo is utc:
|
||||
return 'UTC'
|
||||
all_zones = Schedule.get_zoneinfo()
|
||||
all_zones.sort(key=lambda x: -len(x))
|
||||
for r in Schedule.rrulestr(self.rrule)._rrule:
|
||||
if r._dtstart:
|
||||
tzinfo = r._dtstart.tzinfo
|
||||
if tzinfo is utc:
|
||||
return 'UTC'
|
||||
fname = getattr(tzinfo, '_filename', None)
|
||||
if fname:
|
||||
for zone in all_zones:
|
||||
if fname.endswith(zone):
|
||||
return zone
|
||||
fname = getattr(tzinfo, '_filename', None)
|
||||
if fname:
|
||||
for zone in all_zones:
|
||||
if fname.endswith(zone):
|
||||
return zone
|
||||
logger.warning('Could not detect valid zoneinfo for {}'.format(self.rrule))
|
||||
return ''
|
||||
|
||||
@property
|
||||
# TODO: How would we handle multiple until parameters? The UI is currently using this on the edit screen of a schedule
|
||||
def until(self):
|
||||
# The UNTIL= datestamp (if any) coerced from UTC to the local naive time
|
||||
# of the DTSTART
|
||||
@@ -134,34 +143,48 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
# timezone (America/New_York), and so we'll coerce to UTC _for you_
|
||||
# automatically.
|
||||
#
|
||||
if 'until=' in rrule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rrule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
# Find the UNTIL=N part of the string
|
||||
# naive_until = UNTIL=20200601T170000
|
||||
naive_until = match_until.group('until')
|
||||
# Find the DTSTART rule or raise an error, its usually the first rule but that is not strictly enforced
|
||||
start_date_rule = re.sub('^.*(DTSTART[^\s]+)\s.*$', r'\1', rrule)
|
||||
if not start_date_rule:
|
||||
raise ValueError('A DTSTART field needs to be in the rrule')
|
||||
|
||||
# What is the DTSTART timezone for:
|
||||
# DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000Z
|
||||
# local_tz = tzfile('/usr/share/zoneinfo/America/New_York')
|
||||
local_tz = dateutil.rrule.rrulestr(rrule.replace(naive_until, naive_until + 'Z'), tzinfos=UTC_TIMEZONES)._dtstart.tzinfo
|
||||
rules = re.split(r'\s+', rrule)
|
||||
for index in range(0, len(rules)):
|
||||
rule = rules[index]
|
||||
if 'until=' in rule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
# Make a datetime object with tzinfo=<the DTSTART timezone>
|
||||
# localized_until = datetime.datetime(2020, 6, 1, 17, 0, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
|
||||
localized_until = make_aware(datetime.datetime.strptime(re.sub('^UNTIL=', '', naive_until), "%Y%m%dT%H%M%S"), local_tz)
|
||||
# Find the UNTIL=N part of the string
|
||||
# naive_until = UNTIL=20200601T170000
|
||||
naive_until = match_until.group('until')
|
||||
|
||||
# Coerce the datetime to UTC and format it as a string w/ Zulu format
|
||||
# utc_until = UNTIL=20200601T220000Z
|
||||
utc_until = 'UNTIL=' + localized_until.astimezone(pytz.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
# What is the DTSTART timezone for:
|
||||
# DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000Z
|
||||
# local_tz = tzfile('/usr/share/zoneinfo/America/New_York')
|
||||
# We are going to construct a 'dummy' rule for parsing which will include the DTSTART and the rest of the rule
|
||||
temp_rule = "{} {}".format(start_date_rule, rule.replace(naive_until, naive_until + 'Z'))
|
||||
# If the rule is an EX rule we have to add an RRULE to it because an EX rule alone will not manifest into a ruleset
|
||||
if rule.lower().startswith('ex'):
|
||||
temp_rule = "{} {}".format(temp_rule, 'RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000Z')
|
||||
local_tz = dateutil.rrule.rrulestr(temp_rule, tzinfos=UTC_TIMEZONES, **{'forceset': True})._rrule[0]._dtstart.tzinfo
|
||||
|
||||
# rrule was: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
# rrule is now: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T220000Z
|
||||
rrule = rrule.replace(naive_until, utc_until)
|
||||
return rrule
|
||||
# Make a datetime object with tzinfo=<the DTSTART timezone>
|
||||
# localized_until = datetime.datetime(2020, 6, 1, 17, 0, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York'))
|
||||
localized_until = make_aware(datetime.datetime.strptime(re.sub('^UNTIL=', '', naive_until), "%Y%m%dT%H%M%S"), local_tz)
|
||||
|
||||
# Coerce the datetime to UTC and format it as a string w/ Zulu format
|
||||
# utc_until = UNTIL=20200601T220000Z
|
||||
utc_until = 'UNTIL=' + localized_until.astimezone(pytz.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||
|
||||
# rule was: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
# rule is now: DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T220000Z
|
||||
rules[index] = rule.replace(naive_until, utc_until)
|
||||
return " ".join(rules)
|
||||
|
||||
@classmethod
|
||||
def rrulestr(cls, rrule, fast_forward=True, **kwargs):
|
||||
@@ -176,20 +199,28 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
if r._dtstart and r._dtstart.tzinfo is None:
|
||||
raise ValueError('A valid TZID must be provided (e.g., America/New_York)')
|
||||
|
||||
if fast_forward and ('MINUTELY' in rrule or 'HOURLY' in rrule) and 'COUNT=' not in rrule:
|
||||
# Fast forward is a way for us to limit the number of events in the rruleset
|
||||
# If we are fastforwading and we don't have a count limited rule that is minutely or hourley
|
||||
# We will modify the start date of the rule to last week to prevent a large number of entries
|
||||
if fast_forward:
|
||||
try:
|
||||
# All rules in a ruleset will have the same dtstart value
|
||||
# so lets compare the first event to now to see if its > 7 days old
|
||||
first_event = x[0]
|
||||
# If the first event was over a week ago...
|
||||
if (now() - first_event).days > 7:
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
dtstart = x._rrule[0]._dtstart.strftime(':%Y%m%dT')
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime(':%Y%m%dT')
|
||||
new_rrule = rrule.replace(dtstart, new_start)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
for rule in x._rrule:
|
||||
# If any rule has a minutely or hourly rule without a count...
|
||||
if rule._freq in [dateutil.rrule.MINUTELY, dateutil.rrule.HOURLY] and not rule._count:
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime('%Y%m%d')
|
||||
# Now we want to repalce the DTSTART:<value>T with the new date (which includes the T)
|
||||
new_rrule = re.sub('(DTSTART[^:]*):[^T]+T', r'\1:{0}T'.format(new_start), rrule)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
return x
|
||||
|
||||
def __str__(self):
|
||||
@@ -206,6 +237,22 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
job_kwargs['_eager_fields'] = {'launch_type': 'scheduled', 'schedule': self}
|
||||
return job_kwargs
|
||||
|
||||
def get_end_date(ruleset):
|
||||
# if we have a complex ruleset with a lot of options getting the last index of the ruleset can take some time
|
||||
# And a ruleset without a count/until can come back as datetime.datetime(9999, 12, 31, 15, 0, tzinfo=tzfile('US/Eastern'))
|
||||
# So we are going to do a quick scan to make sure we would have an end date
|
||||
for a_rule in ruleset._rrule:
|
||||
# if this rule does not have until or count in it then we have no end date
|
||||
if not a_rule._until and not a_rule._count:
|
||||
return None
|
||||
|
||||
# If we made it this far we should have an end date and can ask the ruleset what the last date is
|
||||
# However, if the until/count is before dtstart we will get an IndexError when trying to get [-1]
|
||||
try:
|
||||
return ruleset[-1].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def update_computed_fields_no_save(self):
|
||||
affects_fields = ['next_run', 'dtstart', 'dtend']
|
||||
starting_values = {}
|
||||
@@ -229,12 +276,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
self.dtstart = future_rs[0].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtstart = None
|
||||
self.dtend = None
|
||||
if 'until' in self.rrule.lower() or 'count' in self.rrule.lower():
|
||||
try:
|
||||
self.dtend = future_rs[-1].astimezone(pytz.utc)
|
||||
except IndexError:
|
||||
self.dtend = None
|
||||
self.dtend = Schedule.get_end_date(future_rs)
|
||||
|
||||
changed = any(getattr(self, field_name) != starting_values[field_name] for field_name in affects_fields)
|
||||
return changed
|
||||
|
||||
@@ -533,7 +533,7 @@ class UnifiedJob(
|
||||
('workflow', _('Workflow')), # Job was started from a workflow job.
|
||||
('webhook', _('Webhook')), # Job was started from a webhook event.
|
||||
('sync', _('Sync')), # Job was started from a project sync.
|
||||
('scm', _('SCM Update')), # Job was created as an Inventory SCM sync.
|
||||
('scm', _('SCM Update')), # (deprecated) Job was created as an Inventory SCM sync.
|
||||
]
|
||||
|
||||
PASSWORD_FIELDS = ('start_args',)
|
||||
@@ -575,7 +575,8 @@ class UnifiedJob(
|
||||
dependent_jobs = models.ManyToManyField(
|
||||
'self',
|
||||
editable=False,
|
||||
related_name='%(class)s_blocked_jobs+',
|
||||
related_name='%(class)s_blocked_jobs',
|
||||
symmetrical=False,
|
||||
)
|
||||
execution_node = models.TextField(
|
||||
blank=True,
|
||||
@@ -717,6 +718,13 @@ class UnifiedJob(
|
||||
editable=False,
|
||||
help_text=_("The version of Ansible Core installed in the execution environment."),
|
||||
)
|
||||
host_status_counts = models.JSONField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("Playbook stats from the Ansible playbook_on_stats event."),
|
||||
)
|
||||
work_unit_id = models.CharField(
|
||||
max_length=255, blank=True, default=None, editable=False, null=True, help_text=_("The Receptor work unit ID associated with this job.")
|
||||
)
|
||||
@@ -1196,6 +1204,10 @@ class UnifiedJob(
|
||||
pass
|
||||
return None
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""Return unified job artifacts (from set_stats) to pass downstream in workflows"""
|
||||
return {}
|
||||
|
||||
def get_passwords_needed_to_start(self):
|
||||
return []
|
||||
|
||||
|
||||
@@ -318,8 +318,8 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
for parent_node in self.get_parent_nodes():
|
||||
is_root_node = False
|
||||
aa_dict.update(parent_node.ancestor_artifacts)
|
||||
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
|
||||
aa_dict.update(parent_node.job.artifacts)
|
||||
if parent_node.job:
|
||||
aa_dict.update(parent_node.job.get_effective_artifacts(parents_set=set([self.workflow_job_id])))
|
||||
if aa_dict and not is_root_node:
|
||||
self.ancestor_artifacts = aa_dict
|
||||
self.save(update_fields=['ancestor_artifacts'])
|
||||
@@ -659,6 +659,13 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
node_job_description = 'job #{0}, "{1}", which finished with status {2}.'.format(node.job.id, node.job.name, node.job.status)
|
||||
str_arr.append("- node #{0} spawns {1}".format(node.id, node_job_description))
|
||||
result['body'] = '\n'.join(str_arr)
|
||||
result.update(
|
||||
dict(
|
||||
inventory=self.inventory.name if self.inventory else None,
|
||||
limit=self.limit,
|
||||
extra_vars=self.display_extra_vars(),
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
@property
|
||||
@@ -682,6 +689,27 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
wj = wj.get_workflow_job()
|
||||
return ancestors
|
||||
|
||||
def get_effective_artifacts(self, **kwargs):
|
||||
"""
|
||||
For downstream jobs of a workflow nested inside of a workflow,
|
||||
we send aggregated artifacts from the nodes inside of the nested workflow
|
||||
"""
|
||||
artifacts = {}
|
||||
job_queryset = (
|
||||
UnifiedJob.objects.filter(unified_job_node__workflow_job=self)
|
||||
.defer('job_args', 'job_cwd', 'start_args', 'result_traceback')
|
||||
.order_by('finished', 'id')
|
||||
.filter(status__in=['successful', 'failed'])
|
||||
.iterator()
|
||||
)
|
||||
parents_set = kwargs.get('parents_set', set())
|
||||
new_parents_set = parents_set | {self.id}
|
||||
for job in job_queryset:
|
||||
if job.id in parents_set:
|
||||
continue
|
||||
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
|
||||
return artifacts
|
||||
|
||||
def get_notification_templates(self):
|
||||
return self.workflow_job_template.notification_templates
|
||||
|
||||
@@ -885,3 +913,12 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
@property
|
||||
def workflow_job(self):
|
||||
return self.unified_job_node.workflow_job
|
||||
|
||||
def notification_data(self):
|
||||
result = super(WorkflowApproval, self).notification_data()
|
||||
result.update(
|
||||
dict(
|
||||
extra_vars=self.workflow_job.display_extra_vars(),
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
@@ -8,7 +8,6 @@ import redis
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
__all__ = ['CallbackQueueDispatcher']
|
||||
|
||||
@@ -28,7 +27,6 @@ class CallbackQueueDispatcher(object):
|
||||
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
|
||||
self.connection = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics()
|
||||
|
||||
def dispatch(self, obj):
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
|
||||
@@ -26,7 +26,7 @@ class DependencyGraph(object):
|
||||
# The reason for tracking both inventory and inventory sources:
|
||||
# Consider InvA, which has two sources, InvSource1, InvSource2.
|
||||
# JobB might depend on InvA, which launches two updates, one for each source.
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# INVENTORY_UPDATES, instead of having to check for both entries in
|
||||
# INVENTORY_SOURCE_UPDATES.
|
||||
self.data[self.INVENTORY_UPDATES] = {}
|
||||
|
||||
@@ -6,6 +6,9 @@ from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import signal
|
||||
|
||||
# Django
|
||||
from django.db import transaction, connection
|
||||
@@ -34,15 +37,28 @@ from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.utils.common import create_partition
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
def timeit(func):
|
||||
def inner(*args, **kwargs):
|
||||
t_now = time.perf_counter()
|
||||
result = func(*args, **kwargs)
|
||||
dur = time.perf_counter() - t_now
|
||||
args[0].subsystem_metrics.inc("task_manager_" + func.__name__ + "_seconds", dur)
|
||||
return result
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
class TaskManager:
|
||||
def __init__(self):
|
||||
"""
|
||||
@@ -61,6 +77,13 @@ class TaskManager:
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||
# ensures each task manager metric will be overridden when pipe_execute
|
||||
# is called later.
|
||||
for m in self.subsystem_metrics.METRICS:
|
||||
if m.startswith("task_manager"):
|
||||
self.subsystem_metrics.set(m, 0)
|
||||
|
||||
def after_lock_init(self, all_sorted_tasks):
|
||||
"""
|
||||
@@ -79,13 +102,27 @@ class TaskManager:
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
blocked_by = task.dependent_jobs.first()
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@timeit
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
inventory_updates_qs = (
|
||||
@@ -111,6 +148,7 @@ class TaskManager:
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
|
||||
@timeit
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
if workflow_job.cancel_flag:
|
||||
@@ -210,14 +248,16 @@ class TaskManager:
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
status_changed = True
|
||||
if status_changed:
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
workflow_job.websocket_emit_status(workflow_job.status)
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
workflow_job.send_notification_templates('succeeded' if workflow_job.status == 'successful' else 'failed')
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
@timeit
|
||||
def start_task(self, task, instance_group, dependent_tasks=None, instance=None):
|
||||
self.subsystem_metrics.inc("task_manager_tasks_started", 1)
|
||||
self.start_task_limit -= 1
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
@@ -277,12 +317,15 @@ class TaskManager:
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
@timeit
|
||||
def process_running_tasks(self, running_tasks):
|
||||
for task in running_tasks:
|
||||
self.dependency_graph.add_job(task)
|
||||
|
||||
def create_project_update(self, task):
|
||||
project_task = Project.objects.get(id=task.project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
def create_project_update(self, task, project_id=None):
|
||||
if project_id is None:
|
||||
project_id = task.project_id
|
||||
project_task = Project.objects.get(id=project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||
|
||||
# Project created 1 seconds behind
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
@@ -302,14 +345,10 @@ class TaskManager:
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
return inventory_task
|
||||
|
||||
def capture_chain_failure_dependencies(self, task, dependencies):
|
||||
def add_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
for dep in dependencies:
|
||||
# Add task + all deps except self
|
||||
dep.dependent_jobs.add(*([task] + [d for d in dependencies if d != dep]))
|
||||
|
||||
def get_latest_inventory_update(self, inventory_source):
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||
if not latest_inventory_update.exists():
|
||||
@@ -335,8 +374,8 @@ class TaskManager:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_latest_project_update(self, job):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created")
|
||||
def get_latest_project_update(self, project_id):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=project_id, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
@@ -376,47 +415,73 @@ class TaskManager:
|
||||
return True
|
||||
return False
|
||||
|
||||
def gen_dep_for_job(self, task):
|
||||
created_dependencies = []
|
||||
dependencies = []
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task.project_id)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
latest_project_update = self.create_project_update(task)
|
||||
created_dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
# generator for inventory sources related to this task
|
||||
task_inv_sources = (invsrc for invsrc in self.all_inventory_sources if invsrc.inventory_id == task.inventory_id)
|
||||
for inventory_source in task_inv_sources:
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if dependencies:
|
||||
self.add_dependencies(task, dependencies)
|
||||
|
||||
return created_dependencies
|
||||
|
||||
def gen_dep_for_inventory_update(self, inventory_task):
|
||||
created_dependencies = []
|
||||
if inventory_task.source == "scm":
|
||||
invsrc = inventory_task.inventory_source
|
||||
if not invsrc.source_project.scm_update_on_launch:
|
||||
return created_dependencies
|
||||
|
||||
latest_src_project_update = self.get_latest_project_update(invsrc.source_project_id)
|
||||
if self.should_update_related_project(inventory_task, latest_src_project_update):
|
||||
latest_src_project_update = self.create_project_update(inventory_task, project_id=invsrc.source_project_id)
|
||||
created_dependencies.append(latest_src_project_update)
|
||||
self.add_dependencies(inventory_task, [latest_src_project_update])
|
||||
latest_src_project_update.scm_inventory_updates.add(inventory_task)
|
||||
return created_dependencies
|
||||
|
||||
@timeit
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
task.log_lifecycle("acknowledged")
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
if type(task) is Job:
|
||||
created_dependencies += self.gen_dep_for_job(task)
|
||||
elif type(task) is InventoryUpdate:
|
||||
created_dependencies += self.gen_dep_for_inventory_update(task)
|
||||
else:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
for inventory_source in [invsrc for invsrc in self.all_inventory_sources if invsrc.inventory == task.inventory]:
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
|
||||
UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
|
||||
return created_dependencies
|
||||
|
||||
@timeit
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = {wf.unified_job_template_id for wf in self.get_running_workflow_jobs()}
|
||||
tasks_to_update_job_explanation = []
|
||||
@@ -425,6 +490,7 @@ class TaskManager:
|
||||
break
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc("task_manager_tasks_blocked", 1)
|
||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||
if task.job_explanation != job_explanation:
|
||||
@@ -566,15 +632,22 @@ class TaskManager:
|
||||
|
||||
def process_tasks(self, all_sorted_tasks):
|
||||
running_tasks = [t for t in all_sorted_tasks if t.status in ['waiting', 'running']]
|
||||
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_running_processed", len(running_tasks))
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
deps_of_deps = self.generate_dependencies(dependencies)
|
||||
dependencies += deps_of_deps
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(dependencies))
|
||||
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc("task_manager_pending_processed", len(pending_tasks))
|
||||
|
||||
@timeit
|
||||
def _schedule(self):
|
||||
finished_wfjs = []
|
||||
all_sorted_tasks = self.get_tasks()
|
||||
@@ -610,6 +683,28 @@ class TaskManager:
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def record_aggregate_metrics(self, *args):
|
||||
if not settings.IS_TESTING():
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc("task_manager_schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
# long task manager to override useful metrics.
|
||||
current_time = time.time()
|
||||
time_last_recorded = current_time - self.subsystem_metrics.decode("task_manager_recorded_timestamp")
|
||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||
logger.debug(f"recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
self.subsystem_metrics.set("task_manager_recorded_timestamp", current_time)
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording metrics, last recorded {time_last_recorded} seconds ago")
|
||||
|
||||
def record_aggregate_metrics_and_exit(self, *args):
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def schedule(self):
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
@@ -619,5 +714,8 @@ class TaskManager:
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
with task_manager_bulk_reschedule():
|
||||
# if sigterm due to timeout, still record metrics
|
||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
||||
self._schedule()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug("Finishing Scheduler")
|
||||
|
||||
@@ -9,19 +9,19 @@ import stat
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
from django_guid import get_guid
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# AWX
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.constants import MINIMAL_EVENTS, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
|
||||
from awx.main.utils.update_model import update_model
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.tasks.signals import signal_callback
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.callback')
|
||||
|
||||
|
||||
class RunnerCallback:
|
||||
event_data_key = 'job_id'
|
||||
|
||||
def __init__(self, model=None):
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
@@ -33,10 +33,40 @@ class RunnerCallback:
|
||||
self.event_ct = 0
|
||||
self.model = model
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
||||
self.wrapup_event_dispatched = False
|
||||
self.extra_update_fields = {}
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
return update_model(self.model, pk, _attempt=0, _max_attempts=self.update_attempts, **updates)
|
||||
|
||||
@cached_property
|
||||
def wrapup_event_type(self):
|
||||
return self.instance.event_class.WRAPUP_EVENT
|
||||
|
||||
@cached_property
|
||||
def event_data_key(self):
|
||||
return self.instance.event_class.JOB_REFERENCE
|
||||
|
||||
def delay_update(self, skip_if_already_set=False, **kwargs):
|
||||
"""Stash fields that should be saved along with the job status change"""
|
||||
for key, value in kwargs.items():
|
||||
if key in self.extra_update_fields and skip_if_already_set:
|
||||
continue
|
||||
elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):
|
||||
if str(value) in self.extra_update_fields.get(key, ''):
|
||||
continue # if already set, avoid duplicating messages
|
||||
# In the case of these fields, we do not want to lose any prior information, so combine values
|
||||
self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)])
|
||||
else:
|
||||
self.extra_update_fields[key] = value
|
||||
|
||||
def get_delayed_update_fields(self):
|
||||
"""Return finalized dict of all fields that should be saved along with the job status change"""
|
||||
self.extra_update_fields['emitted_events'] = self.event_ct
|
||||
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
|
||||
self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
|
||||
return self.extra_update_fields
|
||||
|
||||
def event_handler(self, event_data):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
@@ -130,6 +160,9 @@ class RunnerCallback:
|
||||
elif self.recent_event_timings.maxlen:
|
||||
self.recent_event_timings.append(time.time())
|
||||
|
||||
if event_data.get('event', '') == self.wrapup_event_type:
|
||||
self.wrapup_event_dispatched = True
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -138,8 +171,7 @@ class RunnerCallback:
|
||||
Handle artifacts
|
||||
'''
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
self.delay_update(artifacts=event_data['event_data']['artifact_data'])
|
||||
|
||||
return False
|
||||
|
||||
@@ -148,7 +180,13 @@ class RunnerCallback:
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
"""
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
if signal_callback():
|
||||
return True
|
||||
try:
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
except Exception:
|
||||
logger.exception(f'Encountered error during cancel check for {unified_job_id}, canceling now')
|
||||
return True
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
@@ -170,6 +208,8 @@ class RunnerCallback:
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
if self.wrapup_event_type == 'EOF':
|
||||
self.wrapup_event_dispatched = True
|
||||
|
||||
def status_handler(self, status_data, runner_config):
|
||||
"""
|
||||
@@ -205,16 +245,10 @@ class RunnerCallback:
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
self.delay_update(result_traceback=result_traceback)
|
||||
|
||||
|
||||
class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'project_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForProjectUpdate, self).__init__(*args, **kwargs)
|
||||
self.playbook_new_revision = None
|
||||
@@ -231,9 +265,6 @@ class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||
|
||||
|
||||
class RunnerCallbackForInventoryUpdate(RunnerCallback):
|
||||
|
||||
event_data_key = 'inventory_update_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForInventoryUpdate, self).__init__(*args, **kwargs)
|
||||
self.end_line = 0
|
||||
@@ -245,9 +276,6 @@ class RunnerCallbackForInventoryUpdate(RunnerCallback):
|
||||
|
||||
|
||||
class RunnerCallbackForAdHocCommand(RunnerCallback):
|
||||
|
||||
event_data_key = 'ad_hoc_command_id'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RunnerCallbackForAdHocCommand, self).__init__(*args, **kwargs)
|
||||
self.host_map = {}
|
||||
@@ -255,4 +283,4 @@ class RunnerCallbackForAdHocCommand(RunnerCallback):
|
||||
|
||||
class RunnerCallbackForSystemJob(RunnerCallback):
|
||||
|
||||
event_data_key = 'system_job_id'
|
||||
pass
|
||||
|
||||
@@ -19,7 +19,6 @@ from uuid import uuid4
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
|
||||
|
||||
# Runner
|
||||
@@ -34,13 +33,11 @@ from gitdb.exc import BadName as BadGitName
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.constants import (
|
||||
ACTIVE_STATES,
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
JOB_FOLDER_PREFIX,
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@@ -65,6 +62,7 @@ from awx.main.tasks.callback import (
|
||||
RunnerCallbackForProjectUpdate,
|
||||
RunnerCallbackForSystemJob,
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
@@ -78,7 +76,7 @@ from awx.main.utils.common import (
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications, update_smart_memberships_for_inventory, update_inventory_computed_fields
|
||||
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields
|
||||
from awx.main.utils.update_model import update_model
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
@@ -119,12 +117,11 @@ class BaseTask(object):
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
return update_model(self.model, pk, _attempt=0, _max_attempts=self.update_attempts, **updates)
|
||||
|
||||
def write_private_data_file(self, private_data_dir, file_name, data, sub_dir=None, permissions=0o600):
|
||||
def write_private_data_file(self, private_data_dir, file_name, data, sub_dir=None, file_permissions=0o600):
|
||||
base_path = private_data_dir
|
||||
if sub_dir:
|
||||
base_path = os.path.join(private_data_dir, sub_dir)
|
||||
if not os.path.exists(base_path):
|
||||
os.mkdir(base_path, 0o700)
|
||||
os.makedirs(base_path, mode=0o700, exist_ok=True)
|
||||
|
||||
# If we got a file name create it, otherwise we want a temp file
|
||||
if file_name:
|
||||
@@ -134,7 +131,7 @@ class BaseTask(object):
|
||||
os.close(handle)
|
||||
|
||||
file = Path(file_path)
|
||||
file.touch(mode=permissions, exist_ok=True)
|
||||
file.touch(mode=file_permissions, exist_ok=True)
|
||||
with open(file_path, 'w') as f:
|
||||
f.write(data)
|
||||
return file_path
|
||||
@@ -257,9 +254,9 @@ class BaseTask(object):
|
||||
# Instead, ssh private key file is explicitly passed via an
|
||||
# env variable.
|
||||
else:
|
||||
private_data_files['credentials'][credential] = self.write_private_data_file(private_data_dir, None, data, 'env')
|
||||
private_data_files['credentials'][credential] = self.write_private_data_file(private_data_dir, None, data, sub_dir='env')
|
||||
for credential, data in private_data.get('certificates', {}).items():
|
||||
self.write_private_data_file(private_data_dir, 'ssh_key_data-cert.pub', data, 'artifacts')
|
||||
self.write_private_data_file(private_data_dir, 'ssh_key_data-cert.pub', data, sub_dir=os.path.join('artifacts', str(self.instance.id)))
|
||||
return private_data_files, ssh_key_data
|
||||
|
||||
def build_passwords(self, instance, runtime_passwords):
|
||||
@@ -282,7 +279,7 @@ class BaseTask(object):
|
||||
content = yaml.safe_dump(vars)
|
||||
else:
|
||||
content = safe_dump(vars, safe_dict)
|
||||
return self.write_private_data_file(private_data_dir, 'extravars', content, 'env')
|
||||
return self.write_private_data_file(private_data_dir, 'extravars', content, sub_dir='env')
|
||||
|
||||
def add_awx_venv(self, env):
|
||||
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
|
||||
@@ -321,13 +318,13 @@ class BaseTask(object):
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, 'inventory', 0o700)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
|
||||
def write_args_file(self, private_data_dir, args):
|
||||
return self.write_private_data_file(private_data_dir, 'cmdline', ansible_runner.utils.args2cmdline(*args), 'env')
|
||||
return self.write_private_data_file(private_data_dir, 'cmdline', ansible_runner.utils.args2cmdline(*args), sub_dir='env')
|
||||
|
||||
def build_credentials_list(self, instance):
|
||||
return []
|
||||
@@ -396,6 +393,7 @@ class BaseTask(object):
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
"""
|
||||
Run the job/task and capture its output.
|
||||
@@ -412,7 +410,6 @@ class BaseTask(object):
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
extra_update_fields = {}
|
||||
fact_modification_times = {}
|
||||
self.runner_callback.event_ct = 0
|
||||
|
||||
@@ -428,7 +425,7 @@ class BaseTask(object):
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
self.instance.log_lifecycle("preparing_playbook")
|
||||
if self.instance.cancel_flag:
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||
if self.instance.status != 'running':
|
||||
# Stop the task chain and prevent starting the job if it has
|
||||
@@ -523,7 +520,7 @@ class BaseTask(object):
|
||||
runner_settings['idle_timeout'] = idle_timeout
|
||||
|
||||
# Write out our own settings file
|
||||
self.write_private_data_file(private_data_dir, 'settings', json.dumps(runner_settings), 'env')
|
||||
self.write_private_data_file(private_data_dir, 'settings', json.dumps(runner_settings), sub_dir='env')
|
||||
|
||||
self.instance.log_lifecycle("running_playbook")
|
||||
if isinstance(self.instance, SystemJob):
|
||||
@@ -547,20 +544,19 @@ class BaseTask(object):
|
||||
rc = res.rc
|
||||
|
||||
if status in ('timeout', 'error'):
|
||||
job_explanation = f"Job terminated due to {status}"
|
||||
self.instance.job_explanation = self.instance.job_explanation or job_explanation
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation=f"Job terminated due to {status}")
|
||||
if status == 'timeout':
|
||||
status = 'failed'
|
||||
|
||||
extra_update_fields['job_explanation'] = self.instance.job_explanation
|
||||
# ensure failure notification sends even if playbook_on_stats event is not triggered
|
||||
handle_success_and_failure_notifications.apply_async([self.instance.id])
|
||||
|
||||
elif status == 'canceled':
|
||||
self.instance = self.update_model(pk)
|
||||
if (getattr(self.instance, 'cancel_flag', False) is False) and signal_callback():
|
||||
self.runner_callback.delay_update(job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||
status = 'failed'
|
||||
except ReceptorNodeNotFound as exc:
|
||||
extra_update_fields['job_explanation'] = str(exc)
|
||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||
except Exception:
|
||||
# this could catch programming or file system errors
|
||||
extra_update_fields['result_traceback'] = traceback.format_exc()
|
||||
self.runner_callback.delay_update(result_traceback=traceback.format_exc())
|
||||
logger.exception('%s Exception occurred while running task', self.instance.log_format)
|
||||
finally:
|
||||
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.runner_callback.event_ct)
|
||||
@@ -570,18 +566,19 @@ class BaseTask(object):
|
||||
except PostRunError as exc:
|
||||
if status == 'successful':
|
||||
status = exc.status
|
||||
extra_update_fields['job_explanation'] = exc.args[0]
|
||||
self.runner_callback.delay_update(job_explanation=exc.args[0])
|
||||
if exc.tb:
|
||||
extra_update_fields['result_traceback'] = exc.tb
|
||||
self.runner_callback.delay_update(result_traceback=exc.tb)
|
||||
except Exception:
|
||||
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
|
||||
|
||||
# We really shouldn't get into this one but just in case....
|
||||
if 'got an unexpected keyword argument' in extra_update_fields.get('result_traceback', ''):
|
||||
extra_update_fields['result_traceback'] = "{}\n\n{}".format(extra_update_fields['result_traceback'], ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
|
||||
|
||||
self.instance = self.update_model(pk)
|
||||
self.instance = self.update_model(pk, status=status, emitted_events=self.runner_callback.event_ct, **extra_update_fields)
|
||||
self.instance = self.update_model(pk, status=status, select_for_update=True, **self.runner_callback.get_delayed_update_fields())
|
||||
|
||||
# Field host_status_counts is used as a metric to check if event processing is finished
|
||||
# we send notifications if it is, if not, callback receiver will send them
|
||||
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
|
||||
self.instance.send_notification_templates('succeeded' if status == 'successful' else 'failed')
|
||||
|
||||
try:
|
||||
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
|
||||
@@ -1176,64 +1173,6 @@ class RunProjectUpdate(BaseTask):
|
||||
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
|
||||
return d
|
||||
|
||||
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
|
||||
scm_revision = project_update.project.scm_revision
|
||||
inv_update_class = InventoryUpdate._get_task_class()
|
||||
for inv_src in dependent_inventory_sources:
|
||||
if not inv_src.update_on_project_update:
|
||||
continue
|
||||
if inv_src.scm_last_revision == scm_revision:
|
||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
|
||||
continue
|
||||
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
|
||||
with transaction.atomic():
|
||||
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
|
||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
|
||||
continue
|
||||
|
||||
if settings.IS_K8S:
|
||||
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
|
||||
else:
|
||||
instance_group = project_update.instance_group
|
||||
|
||||
local_inv_update = inv_src.create_inventory_update(
|
||||
_eager_fields=dict(
|
||||
launch_type='scm',
|
||||
status='running',
|
||||
instance_group=instance_group,
|
||||
execution_node=project_update.execution_node,
|
||||
controller_node=project_update.execution_node,
|
||||
source_project_update=project_update,
|
||||
celery_task_id=project_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
local_inv_update.log_lifecycle("controller_node_chosen")
|
||||
local_inv_update.log_lifecycle("execution_node_chosen")
|
||||
try:
|
||||
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
except Exception:
|
||||
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
|
||||
|
||||
try:
|
||||
project_update.refresh_from_db()
|
||||
except ProjectUpdate.DoesNotExist:
|
||||
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
|
||||
break
|
||||
try:
|
||||
local_inv_update.refresh_from_db()
|
||||
except InventoryUpdate.DoesNotExist:
|
||||
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
|
||||
continue
|
||||
if project_update.cancel_flag:
|
||||
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
|
||||
break
|
||||
if local_inv_update.cancel_flag:
|
||||
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
|
||||
if local_inv_update.status == 'successful':
|
||||
inv_src.scm_last_revision = scm_revision
|
||||
inv_src.save(update_fields=['scm_last_revision'])
|
||||
|
||||
def release_lock(self, instance):
|
||||
try:
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
||||
@@ -1443,12 +1382,6 @@ class RunProjectUpdate(BaseTask):
|
||||
p.inventory_files = p.inventories
|
||||
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
|
||||
|
||||
# Update any inventories that depend on this project
|
||||
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
|
||||
if len(dependent_inventory_sources) > 0:
|
||||
if status == 'successful' and instance.launch_type != 'sync':
|
||||
self._update_dependent_inventories(instance, dependent_inventory_sources)
|
||||
|
||||
def build_execution_environment_params(self, instance, private_data_dir):
|
||||
if settings.IS_K8S:
|
||||
return {}
|
||||
@@ -1459,8 +1392,8 @@ class RunProjectUpdate(BaseTask):
|
||||
params.setdefault('container_volume_mounts', [])
|
||||
params['container_volume_mounts'].extend(
|
||||
[
|
||||
f"{project_path}:{project_path}:Z",
|
||||
f"{cache_path}:{cache_path}:Z",
|
||||
f"{project_path}:{project_path}:z",
|
||||
f"{cache_path}:{cache_path}:z",
|
||||
]
|
||||
)
|
||||
return params
|
||||
@@ -1609,7 +1542,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
if injector is not None:
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
self.write_private_data_file(private_data_dir, injector.filename, content, 'inventory', 0o700)
|
||||
self.write_private_data_file(private_data_dir, injector.filename, content, sub_dir='inventory', file_permissions=0o700)
|
||||
rel_path = os.path.join('inventory', injector.filename)
|
||||
elif src == 'scm':
|
||||
rel_path = os.path.join('project', inventory_update.source_path)
|
||||
@@ -1628,9 +1561,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
source_project = None
|
||||
if inventory_update.inventory_source:
|
||||
source_project = inventory_update.inventory_source.source_project
|
||||
if (
|
||||
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
|
||||
): # never ever update manual projects
|
||||
if inventory_update.source == 'scm' and source_project and source_project.scm_type: # never ever update manual projects
|
||||
|
||||
# Check if the content cache exists, so that we do not unnecessarily re-download roles
|
||||
sync_needs = ['update_{}'.format(source_project.scm_type)]
|
||||
@@ -1663,8 +1594,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
|
||||
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
|
||||
except Exception:
|
||||
inventory_update = self.update_model(
|
||||
inventory_update.pk,
|
||||
@@ -1675,9 +1604,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
),
|
||||
)
|
||||
raise
|
||||
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
|
||||
# This follows update, not sync, so make copy here
|
||||
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
|
||||
|
||||
def post_run_hook(self, inventory_update, status):
|
||||
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
|
||||
|
||||
@@ -24,10 +24,7 @@ from awx.main.utils.common import (
|
||||
parse_yaml_or_json,
|
||||
cleanup_new_process,
|
||||
)
|
||||
from awx.main.constants import (
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE,
|
||||
)
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
@@ -350,6 +347,11 @@ class AWXReceptorJob:
|
||||
resultsock.shutdown(socket.SHUT_RDWR)
|
||||
resultfile.close()
|
||||
elif res.status == 'error':
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if 'result_traceback' in self.task.runner_callback.extra_update_fields:
|
||||
return res
|
||||
|
||||
try:
|
||||
unit_status = receptor_ctl.simple_command(f'work status {self.unit_id}')
|
||||
detail = unit_status.get('Detail', None)
|
||||
@@ -365,28 +367,19 @@ class AWXReceptorJob:
|
||||
logger.warning(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||
self.task.update_model(self.task.instance.pk, status='pending')
|
||||
return
|
||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||
# is saved via the status_handler passed in to the processor.
|
||||
if state_name == 'Succeeded':
|
||||
return res
|
||||
|
||||
if not self.task.instance.result_traceback:
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.instance.result_traceback = receptor_output
|
||||
if 'got an unexpected keyword argument' in receptor_output:
|
||||
self.task.instance.result_traceback = "{}\n\n{}".format(receptor_output, ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
elif detail:
|
||||
self.task.instance.result_traceback = detail
|
||||
self.task.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
try:
|
||||
resultsock = receptor_ctl.get_work_results(self.unit_id, return_sockfile=True)
|
||||
lines = resultsock.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
elif detail:
|
||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
63
awx/main/tasks/signals.py
Normal file
63
awx/main/tasks/signals.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import signal
|
||||
import functools
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.signals')
|
||||
|
||||
|
||||
__all__ = ['with_signal_handling', 'signal_callback']
|
||||
|
||||
|
||||
class SignalState:
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.is_active = False
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def set_flag(self, *args):
|
||||
"""Method to pass into the python signal.signal method to receive signals"""
|
||||
self.sigterm_flag = True
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_flag)
|
||||
signal.signal(signal.SIGINT, self.set_flag)
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
self.reset()
|
||||
|
||||
|
||||
signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return signal_state.sigterm_flag
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
"""
|
||||
Change signal handling to make signal_callback return True in event of SIGTERM or SIGINT.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
def _wrapped(*args, **kwargs):
|
||||
try:
|
||||
this_is_outermost_caller = False
|
||||
if not signal_state.is_active:
|
||||
signal_state.connect_signals()
|
||||
this_is_outermost_caller = True
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
if this_is_outermost_caller:
|
||||
signal_state.restore_signals()
|
||||
|
||||
return _wrapped
|
||||
@@ -103,7 +103,8 @@ def dispatch_startup():
|
||||
#
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
Metrics().clear_values()
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
@@ -113,10 +114,6 @@ def inform_cluster_of_shutdown():
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
try:
|
||||
reaper.reap(this_inst)
|
||||
except Exception:
|
||||
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
@@ -695,7 +692,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status in ('successful', 'failed'):
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
@@ -716,25 +713,6 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_success_and_failure_notifications(job_id):
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
retries = 0
|
||||
while retries < settings.AWX_NOTIFICATION_JOB_FINISH_MAX_RETRY:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
return
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warning(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
- ansible.builtin.import_playbook: foo
|
||||
@@ -0,0 +1,2 @@
|
||||
---
|
||||
- ansible.builtin.include: foo
|
||||
@@ -2,8 +2,9 @@
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
"GCP_ENV_TYPE": "tower",
|
||||
"GCP_PROJECT": "fooo",
|
||||
"GCP_SERVICE_ACCOUNT_FILE": "{{ file_reference }}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ def test_empty():
|
||||
"workflow_job_template": 0,
|
||||
"unified_job": 0,
|
||||
"pending_jobs": 0,
|
||||
"database_connections": 1,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ EXPECTED_VALUES = {
|
||||
'awx_license_instance_total': 0,
|
||||
'awx_license_instance_free': 0,
|
||||
'awx_pending_jobs_total': 0,
|
||||
'awx_database_connections_total': 1,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -532,6 +532,49 @@ def test_vault_password_required(post, organization, admin):
|
||||
assert 'required fields (vault_password)' in j.job_explanation
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_vault_id_immutable(post, patch, organization, admin):
|
||||
vault = CredentialType.defaults['vault']()
|
||||
vault.save()
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
{
|
||||
'credential_type': vault.pk,
|
||||
'organization': organization.id,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {'vault_id': 'password', 'vault_password': 'password'},
|
||||
},
|
||||
admin,
|
||||
)
|
||||
assert response.status_code == 201
|
||||
assert Credential.objects.count() == 1
|
||||
response = patch(
|
||||
reverse('api:credential_detail', kwargs={'pk': response.data['id']}), {'inputs': {'vault_id': 'password2', 'vault_password': 'password'}}, admin
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert response.data['inputs'][0] == 'Vault IDs cannot be changed once they have been created.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_patch_without_vault_id_valid(post, patch, organization, admin):
|
||||
vault = CredentialType.defaults['vault']()
|
||||
vault.save()
|
||||
response = post(
|
||||
reverse('api:credential_list'),
|
||||
{
|
||||
'credential_type': vault.pk,
|
||||
'organization': organization.id,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {'vault_id': 'password', 'vault_password': 'password'},
|
||||
},
|
||||
admin,
|
||||
)
|
||||
assert response.status_code == 201
|
||||
assert Credential.objects.count() == 1
|
||||
response = patch(reverse('api:credential_detail', kwargs={'pk': response.data['id']}), {'name': 'worst_credential_ever'}, admin)
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
#
|
||||
# Net Credentials
|
||||
#
|
||||
|
||||
@@ -9,9 +9,7 @@ from awx.api.versioning import reverse
|
||||
@pytest.fixture
|
||||
def ec2_source(inventory, project):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
return inventory.inventory_sources.create(
|
||||
name='some_source', update_on_project_update=True, source='ec2', source_project=project, scm_last_revision=project.scm_revision
|
||||
)
|
||||
return inventory.inventory_sources.create(name='some_source', source='ec2', source_project=project)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -70,11 +70,11 @@ def test_job_job_events_children_summary(get, organization_factory, job_template
|
||||
job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event="playbook_on_play_start", counter=2, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="runner_on_start", counter=3, stdout='a' * 1024, job_created=job.created
|
||||
job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="playbook_on_task_start", counter=3, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_task_start", counter=5, stdout='a' * 1024, job_created=job.created
|
||||
job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_play_start", counter=5, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
job.emitted_events = job.get_event_queryset().count()
|
||||
job.status = "successful"
|
||||
@@ -84,3 +84,50 @@ def test_job_job_events_children_summary(get, organization_factory, job_template
|
||||
assert response.data["children_summary"] == {1: {"rowNumber": 0, "numChildren": 4}, 2: {"rowNumber": 1, "numChildren": 2}}
|
||||
assert response.data["meta_event_nested_uuid"] == {4: "uuid2"}
|
||||
assert response.data["event_processing_finished"] == True
|
||||
assert response.data["is_tree"] == True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_job_events_children_summary_is_tree(get, organization_factory, job_template_factory):
|
||||
'''
|
||||
children_summary should return {is_tree: False} if the event structure is not tree-like
|
||||
'''
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["event_processing_finished"] == False
|
||||
'''
|
||||
E1
|
||||
E2
|
||||
E3
|
||||
E4 (verbose)
|
||||
E5
|
||||
E6 <-- parent is E2, but comes after another "branch" E5
|
||||
'''
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid1', parent_uuid='', event="playbook_on_start", counter=1, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event="playbook_on_play_start", counter=2, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event="playbook_on_task_start", counter=3, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event="playbook_on_play_start", counter=5, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
JobEvent.create_from_data(
|
||||
job_id=job.pk, uuid='uuid6', parent_uuid='uuid2', event="playbook_on_task_start", counter=6, stdout='a' * 1024, job_created=job.created
|
||||
).save()
|
||||
job.emitted_events = job.get_event_queryset().count()
|
||||
job.status = "successful"
|
||||
job.save()
|
||||
url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin, expect=200)
|
||||
assert response.data["children_summary"] == {}
|
||||
assert response.data["meta_event_nested_uuid"] == {}
|
||||
assert response.data["event_processing_finished"] == True
|
||||
assert response.data["is_tree"] == False
|
||||
|
||||
@@ -13,9 +13,7 @@ from awx.main.models import InventorySource, Inventory, ActivityStream
|
||||
@pytest.fixture
|
||||
def scm_inventory(inventory, project):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
inventory.inventory_sources.create(
|
||||
name='foobar', update_on_project_update=True, source='scm', source_project=project, scm_last_revision=project.scm_revision
|
||||
)
|
||||
inventory.inventory_sources.create(name='foobar', source='scm', source_project=project)
|
||||
return inventory
|
||||
|
||||
|
||||
@@ -23,9 +21,7 @@ def scm_inventory(inventory, project):
|
||||
def factory_scm_inventory(inventory, project):
|
||||
def fn(**kwargs):
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
return inventory.inventory_sources.create(
|
||||
source_project=project, overwrite_vars=True, source='scm', scm_last_revision=project.scm_revision, **kwargs
|
||||
)
|
||||
return inventory.inventory_sources.create(source_project=project, overwrite_vars=True, source='scm', **kwargs)
|
||||
|
||||
return fn
|
||||
|
||||
@@ -544,15 +540,12 @@ class TestControlledBySCM:
|
||||
def test_safe_method_works(self, get, options, scm_inventory, admin_user):
|
||||
get(scm_inventory.get_absolute_url(), admin_user, expect=200)
|
||||
options(scm_inventory.get_absolute_url(), admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision != ''
|
||||
|
||||
def test_vars_edit_reset(self, patch, scm_inventory, admin_user):
|
||||
patch(scm_inventory.get_absolute_url(), {'variables': 'hello: world'}, admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_name_edit_allowed(self, patch, scm_inventory, admin_user):
|
||||
patch(scm_inventory.get_absolute_url(), {'variables': '---', 'name': 'newname'}, admin_user, expect=200)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision != ''
|
||||
|
||||
def test_host_associations_reset(self, post, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -560,14 +553,12 @@ class TestControlledBySCM:
|
||||
g = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
post(reverse('api:host_groups_list', kwargs={'pk': h.id}), {'id': g.id}, admin_user, expect=204)
|
||||
post(reverse('api:group_hosts_list', kwargs={'pk': g.id}), {'id': h.id}, admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_group_group_associations_reset(self, post, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
g1 = inv_src.groups.create(name='barland', inventory=scm_inventory)
|
||||
g2 = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
post(reverse('api:group_children_list', kwargs={'pk': g1.id}), {'id': g2.id}, admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_host_group_delete_reset(self, delete, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -575,7 +566,6 @@ class TestControlledBySCM:
|
||||
g = inv_src.groups.create(name='fooland', inventory=scm_inventory)
|
||||
delete(h.get_absolute_url(), admin_user, expect=204)
|
||||
delete(g.get_absolute_url(), admin_user, expect=204)
|
||||
assert InventorySource.objects.get(inventory=scm_inventory.pk).scm_last_revision == ''
|
||||
|
||||
def test_remove_scm_inv_src(self, delete, scm_inventory, admin_user):
|
||||
inv_src = scm_inventory.inventory_sources.first()
|
||||
@@ -588,7 +578,6 @@ class TestControlledBySCM:
|
||||
{
|
||||
'name': 'new inv src',
|
||||
'source_project': project.pk,
|
||||
'update_on_project_update': False,
|
||||
'source': 'scm',
|
||||
'overwrite_vars': True,
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
@@ -597,27 +586,6 @@ class TestControlledBySCM:
|
||||
expect=201,
|
||||
)
|
||||
|
||||
def test_adding_inv_src_prohibited(self, post, scm_inventory, project, admin_user):
|
||||
post(
|
||||
reverse('api:inventory_inventory_sources_list', kwargs={'pk': scm_inventory.id}),
|
||||
{'name': 'new inv src', 'source_project': project.pk, 'update_on_project_update': True, 'source': 'scm', 'overwrite_vars': True},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_two_update_on_project_update_inv_src_prohibited(self, patch, scm_inventory, factory_scm_inventory, project, admin_user):
|
||||
scm_inventory2 = factory_scm_inventory(name="scm_inventory2")
|
||||
res = patch(
|
||||
reverse('api:inventory_source_detail', kwargs={'pk': scm_inventory2.id}),
|
||||
{
|
||||
'update_on_project_update': True,
|
||||
},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
content = json.loads(res.content)
|
||||
assert content['update_on_project_update'] == ["More than one SCM-based inventory source with update on project update " "per-inventory not allowed."]
|
||||
|
||||
def test_adding_inv_src_without_proj_access_prohibited(self, post, project, inventory, rando):
|
||||
inventory.admin_role.members.add(rando)
|
||||
post(
|
||||
|
||||
@@ -220,7 +220,7 @@ class TestControllerNode:
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
r = get(reverse('api:inventory_update_detail', kwargs={'pk': inventory_update.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
assert 'controller_node' in r.data
|
||||
|
||||
r = get(reverse('api:system_job_detail', kwargs={'pk': system_job.pk}), admin_user, expect=200)
|
||||
assert 'controller_node' not in r.data
|
||||
|
||||
@@ -111,21 +111,41 @@ def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, su
|
||||
[
|
||||
("", "This field may not be blank"),
|
||||
("DTSTART:NONSENSE", "Valid DTSTART required in rrule"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
("DTSTART:20300308T050000Z DTSTART:20310308T050000", "Multiple DTSTART is not supported"),
|
||||
("DTSTART:20300308T050000Z", "RRULE required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
|
||||
("DTSTART:20300308T050000Z", "One or more rule required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1; EXDATE:20220401", "EXDATE not allowed in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1; RDATE:20220401", "RDATE not allowed in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=SECONDLY;INTERVAL=5;COUNT=6", "SECONDLY is not supported"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Multiple BYMONTHDAYs not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYMONTH=1,2", "Multiple BYMONTHs not supported"), # noqa
|
||||
# Individual rule test
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE", "INTERVAL required in rrule"),
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO", "BYDAY with numeric prefix not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYYEARDAY=100", "BYYEARDAY not supported"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=YEARLY;INTERVAL=1;BYWEEKNO=20", "BYWEEKNO not supported"),
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000", "COUNT > 999 is unsupported"), # noqa
|
||||
# Individual rule test with multiple rules
|
||||
## Bad Rule: RRULE:NONSENSE
|
||||
("DTSTART:20300308T050000Z RRULE:NONSENSE RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU", "INTERVAL required in rrule"),
|
||||
## Bad Rule: RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO
|
||||
(
|
||||
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=YEARLY;INTERVAL=1;BYDAY=5MO",
|
||||
"BYDAY with numeric prefix not supported",
|
||||
), # noqa
|
||||
## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z
|
||||
(
|
||||
"DTSTART:20030925T104941Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z",
|
||||
"RRULE may not contain both COUNT and UNTIL",
|
||||
), # noqa
|
||||
## Bad Rule: RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000
|
||||
(
|
||||
"DTSTART:20300308T050000Z RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2000",
|
||||
"COUNT > 999 is unsupported",
|
||||
), # noqa
|
||||
# Multiple errors, first condition should be returned
|
||||
("DTSTART:NONSENSE RRULE:NONSENSE RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=3,4", "Valid DTSTART required in rrule"),
|
||||
# Parsing Tests
|
||||
("DTSTART;TZID=US-Eastern:19961105T090000 RRULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5", "A valid TZID must be provided"), # noqa
|
||||
("DTSTART:20300308T050000Z RRULE:FREQ=REGULARLY;INTERVAL=1", "rrule parsing failed validation: invalid 'FREQ': REGULARLY"), # noqa
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
],
|
||||
)
|
||||
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
@@ -143,6 +163,29 @@ def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
assert error in smart_str(resp.content)
|
||||
|
||||
|
||||
def test_multiple_invalid_rrules(post, admin_user, project, inventory):
|
||||
job_template = JobTemplate.objects.create(name='test-jt', project=project, playbook='helloworld.yml', inventory=inventory)
|
||||
url = reverse('api:job_template_schedules_list', kwargs={'pk': job_template.id})
|
||||
resp = post(
|
||||
url,
|
||||
{
|
||||
'name': 'Some Schedule',
|
||||
'rrule': "EXRULE:FREQ=SECONDLY DTSTART;TZID=US-Eastern:19961105T090000 RRULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5;UNTIL=20220101 DTSTART;TZID=US-Eastern:19961105T090000",
|
||||
},
|
||||
admin_user,
|
||||
expect=400,
|
||||
)
|
||||
expected_result = {
|
||||
"rrule": [
|
||||
"Multiple DTSTART is not supported.",
|
||||
"INTERVAL required in rrule: RULE:FREQ=SECONDLY",
|
||||
"RRULE may not contain both COUNT and UNTIL: RULE:FREQ=MINUTELY;INTERVAL=10;COUNT=5;UNTIL=20220101",
|
||||
"rrule parsing failed validation: 'NoneType' object has no attribute 'group'",
|
||||
]
|
||||
}
|
||||
assert expected_result == resp.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_users_can_preview_schedules(post, alice):
|
||||
url = reverse('api:schedule_rrule')
|
||||
@@ -381,11 +424,83 @@ def test_dst_rollback_duplicates(post, admin_user):
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'rrule, expected_result',
|
||||
(
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300302T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20300304T1500 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU',
|
||||
['2030-03-02 15:00:00-05:00', '2030-03-04 15:00:00-05:00'],
|
||||
id="Every day except sundays",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=US/Eastern:20300428T170000 RRULE:INTERVAL=1;FREQ=DAILY;COUNT=4 EXRULE:INTERVAL=1;FREQ=DAILY;BYMONTH=4;BYMONTHDAY=30',
|
||||
['2030-04-28 17:00:00-04:00', '2030-04-29 17:00:00-04:00', '2030-05-01 17:00:00-04:00'],
|
||||
id="Every day except April 30th",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300313T164500 RRULE:INTERVAL=5;FREQ=MINUTELY EXRULE:FREQ=MINUTELY;INTERVAL=5;BYDAY=WE;BYHOUR=17,18',
|
||||
[
|
||||
'2030-03-13 16:45:00-04:00',
|
||||
'2030-03-13 16:50:00-04:00',
|
||||
'2030-03-13 16:55:00-04:00',
|
||||
'2030-03-13 19:00:00-04:00',
|
||||
'2030-03-13 19:05:00-04:00',
|
||||
'2030-03-13 19:10:00-04:00',
|
||||
'2030-03-13 19:15:00-04:00',
|
||||
'2030-03-13 19:20:00-04:00',
|
||||
'2030-03-13 19:25:00-04:00',
|
||||
'2030-03-13 19:30:00-04:00',
|
||||
],
|
||||
id="Every 5 minutes but not Wednesdays from 5-7pm",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20300426T100100 RRULE:INTERVAL=15;FREQ=MINUTELY;BYDAY=MO,TU,WE,TH,FR;BYHOUR=10,11 EXRULE:INTERVAL=15;FREQ=MINUTELY;BYDAY=MO,TU,WE,TH,FR;BYHOUR=11;BYMINUTE=3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,34,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59',
|
||||
[
|
||||
'2030-04-26 10:01:00-04:00',
|
||||
'2030-04-26 10:16:00-04:00',
|
||||
'2030-04-26 10:31:00-04:00',
|
||||
'2030-04-26 10:46:00-04:00',
|
||||
'2030-04-26 11:01:00-04:00',
|
||||
'2030-04-29 10:01:00-04:00',
|
||||
'2030-04-29 10:16:00-04:00',
|
||||
'2030-04-29 10:31:00-04:00',
|
||||
'2030-04-29 10:46:00-04:00',
|
||||
'2030-04-29 11:01:00-04:00',
|
||||
],
|
||||
id="Every 15 minutes Monday - Friday from 10:01am to 11:02pm (inclusive)",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART:20301219T130551Z RRULE:FREQ=MONTHLY;INTERVAL=1;BYDAY=SA;BYMONTHDAY=12,13,14,15,16,17,18',
|
||||
[
|
||||
'2031-01-18 13:05:51+00:00',
|
||||
'2031-02-15 13:05:51+00:00',
|
||||
'2031-03-15 13:05:51+00:00',
|
||||
'2031-04-12 13:05:51+00:00',
|
||||
'2031-05-17 13:05:51+00:00',
|
||||
'2031-06-14 13:05:51+00:00',
|
||||
'2031-07-12 13:05:51+00:00',
|
||||
'2031-08-16 13:05:51+00:00',
|
||||
'2031-09-13 13:05:51+00:00',
|
||||
'2031-10-18 13:05:51+00:00',
|
||||
],
|
||||
id="Any Saturday whose month day is between 12 and 18",
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_complex_schedule(post, admin_user, rrule, expected_result):
|
||||
# Every day except Sunday, 2022-05-01 is a Sunday
|
||||
|
||||
url = reverse('api:schedule_rrule')
|
||||
r = post(url, {'rrule': rrule}, admin_user, expect=200)
|
||||
|
||||
assert list(map(str, r.data['local'])) == expected_result
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_zoneinfo(get, admin_user):
|
||||
url = reverse('api:schedule_zoneinfo')
|
||||
r = get(url, admin_user, expect=200)
|
||||
assert {'name': 'America/New_York'} in r.data
|
||||
assert 'America/New_York' in r.data['zones']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
26
awx/main/tests/functional/commands/test_callback_receiver.py
Normal file
26
awx/main/tests/functional/commands/test_callback_receiver.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.dispatch.worker.callback import job_stats_wrapup
|
||||
from awx.main.models.jobs import Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_wrapup_does_not_send_notifications(mocker):
|
||||
job = Job.objects.create(status='running')
|
||||
assert job.host_status_counts is None
|
||||
mock = mocker.patch('awx.main.models.notifications.JobNotificationMixin.send_notification_templates')
|
||||
job_stats_wrapup(job.id)
|
||||
job.refresh_from_db()
|
||||
assert job.host_status_counts == {}
|
||||
mock.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_wrapup_does_send_notifications(mocker):
|
||||
job = Job.objects.create(status='successful')
|
||||
assert job.host_status_counts is None
|
||||
mock = mocker.patch('awx.main.models.notifications.JobNotificationMixin.send_notification_templates')
|
||||
job_stats_wrapup(job.id)
|
||||
job.refresh_from_db()
|
||||
assert job.host_status_counts == {}
|
||||
mock.assert_called_once_with('succeeded')
|
||||
@@ -52,10 +52,12 @@ class TestKeyRegeneration:
|
||||
settings.cache.delete('REDHAT_PASSWORD')
|
||||
|
||||
# verify that the old SECRET_KEY doesn't work
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
with pytest.raises(InvalidToken):
|
||||
settings.REDHAT_PASSWORD
|
||||
|
||||
# verify that the new SECRET_KEY *does* work
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
with override_settings(SECRET_KEY=new_key):
|
||||
assert settings.REDHAT_PASSWORD == 'sensitive'
|
||||
|
||||
|
||||
@@ -347,9 +347,7 @@ def scm_inventory_source(inventory, project):
|
||||
source_project=project,
|
||||
source='scm',
|
||||
source_path='inventory_file',
|
||||
update_on_project_update=True,
|
||||
inventory=inventory,
|
||||
scm_last_revision=project.scm_revision,
|
||||
)
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
inv_src.save()
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
# AWX
|
||||
from awx.main.models import Host, Inventory, InventorySource, InventoryUpdate, CredentialType, Credential, Job
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
@@ -123,19 +121,6 @@ class TestActiveCount:
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSCMUpdateFeatures:
|
||||
def test_automatic_project_update_on_create(self, inventory, project):
|
||||
inv_src = InventorySource(source_project=project, source_path='inventory_file', inventory=inventory, update_on_project_update=True, source='scm')
|
||||
with mock.patch.object(inv_src, 'update') as mck_update:
|
||||
inv_src.save()
|
||||
mck_update.assert_called_once_with()
|
||||
|
||||
def test_reset_scm_revision(self, scm_inventory_source):
|
||||
starting_rev = scm_inventory_source.scm_last_revision
|
||||
assert starting_rev != ''
|
||||
scm_inventory_source.source_path = '/newfolder/newfile.ini'
|
||||
scm_inventory_source.save()
|
||||
assert scm_inventory_source.scm_last_revision == ''
|
||||
|
||||
def test_source_location(self, scm_inventory_source):
|
||||
# Combines project directory with the inventory file specified
|
||||
inventory_update = InventoryUpdate(inventory_source=scm_inventory_source, source_path=scm_inventory_source.source_path)
|
||||
@@ -167,22 +152,6 @@ class TestRelatedJobs:
|
||||
assert job.id in [jerb.id for jerb in group._get_related_jobs()]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSCMClean:
|
||||
def test_clean_update_on_project_update_multiple(self, inventory):
|
||||
inv_src1 = InventorySource(inventory=inventory, update_on_project_update=True, source='scm')
|
||||
inv_src1.clean_update_on_project_update()
|
||||
inv_src1.save()
|
||||
|
||||
inv_src1.source_vars = '---\nhello: world'
|
||||
inv_src1.clean_update_on_project_update()
|
||||
|
||||
inv_src2 = InventorySource(inventory=inventory, update_on_project_update=True, source='scm')
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
inv_src2.clean_update_on_project_update()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventorySourceInjectors:
|
||||
def test_extra_credentials(self, project, credential):
|
||||
|
||||
@@ -251,18 +251,17 @@ def test_utc_until(job_template, until, dtend):
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'dtstart, until',
|
||||
'rrule, length',
|
||||
[
|
||||
['DTSTART:20380601T120000Z', '20380601T170000'], # noon UTC to 5PM UTC
|
||||
['DTSTART;TZID=America/New_York:20380601T120000', '20380601T170000'], # noon EST to 5PM EST
|
||||
['DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000', 6], # noon UTC to 5PM UTC (noon, 1pm, 2, 3, 4, 5pm)
|
||||
['DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000', 6], # noon EST to 5PM EST
|
||||
],
|
||||
)
|
||||
def test_tzinfo_naive_until(job_template, dtstart, until):
|
||||
rrule = '{} RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL={}'.format(dtstart, until) # noqa
|
||||
def test_tzinfo_naive_until(job_template, rrule, length):
|
||||
s = Schedule(name='Some Schedule', rrule=rrule, unified_job_template=job_template)
|
||||
s.save()
|
||||
gen = Schedule.rrulestr(s.rrule).xafter(now(), count=20)
|
||||
assert len(list(gen)) == 6 # noon, 1PM, 2, 3, 4, 5PM
|
||||
assert len(list(gen)) == length
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -309,6 +308,12 @@ def test_beginning_of_time(job_template):
|
||||
[
|
||||
['DTSTART:20300112T210000Z RRULE:FREQ=DAILY;INTERVAL=1', 'UTC'],
|
||||
['DTSTART;TZID=US/Eastern:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1', 'US/Eastern'],
|
||||
['DTSTART;TZID=US/Eastern:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU', 'US/Eastern'],
|
||||
# Technically the serializer should never let us get 2 dtstarts in a rule but its still valid and the rrule will prefer the last DTSTART
|
||||
[
|
||||
'DTSTART;TZID=US/Eastern:20300112T210000 RRULE:FREQ=DAILY;INTERVAL=1 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU DTSTART;TZID=US/Pacific:20300112T210000',
|
||||
'US/Pacific',
|
||||
],
|
||||
],
|
||||
)
|
||||
def test_timezone_property(job_template, rrule, tz):
|
||||
@@ -389,3 +394,163 @@ def test_duplicate_name_within_template(job_template):
|
||||
s2.save()
|
||||
|
||||
assert str(ierror.value) == "UNIQUE constraint failed: main_schedule.unified_job_template_id, main_schedule.name"
|
||||
|
||||
|
||||
# Test until with multiple entries (should only return the first)
|
||||
# NOTE: this test may change once we determine how the UI will start to handle this field
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'rrule, expected_until',
|
||||
[
|
||||
pytest.param('DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1', '', id="No until"),
|
||||
pytest.param('DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000Z', '2038-06-01T17:00:00', id="One until in UTC"),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000',
|
||||
'2038-06-01T17:00:00',
|
||||
id="One until in local TZ",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T220000 RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000',
|
||||
'2038-06-01T22:00:00',
|
||||
id="Multiple untils (return only the first one",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_until_with_complex_schedules(job_template, rrule, expected_until):
|
||||
sched = Schedule(name='Some Schedule', rrule=rrule, unified_job_template=job_template)
|
||||
assert sched.until == expected_until
|
||||
|
||||
|
||||
# Test coerce_naive_until, this method takes a naive until field and forces it into utc
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'rrule, expected_result',
|
||||
[
|
||||
pytest.param(
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1',
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1',
|
||||
id="No untils present",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000Z',
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000Z',
|
||||
id="One until already in UTC",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000',
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T220000Z',
|
||||
id="One until with local tz",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=MINUTLEY;INTERVAL=1;UNTIL=20380601T170000Z EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000Z',
|
||||
'DTSTART:20380601T120000Z RRULE:FREQ=MINUTLEY;INTERVAL=1;UNTIL=20380601T170000Z EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000Z',
|
||||
id="Multiple untils all in UTC",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000 EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000',
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T220000Z EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T220000Z',
|
||||
id="Multiple untils with local tz",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000Z EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T170000',
|
||||
'DTSTART;TZID=America/New_York:20380601T120000 RRULE:FREQ=MINUTELY;INTERVAL=1;UNTIL=20380601T170000Z EXRULE:FREQ=HOURLY;INTERVAL=1;UNTIL=20380601T220000Z',
|
||||
id="Multiple untils mixed",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_coerce_naive_until(rrule, expected_result):
|
||||
new_rrule = Schedule.coerce_naive_until(rrule)
|
||||
assert new_rrule == expected_result
|
||||
|
||||
|
||||
# Test skipping days with exclusion
|
||||
@pytest.mark.django_db
|
||||
def test_skip_sundays():
|
||||
rrule = '''
|
||||
DTSTART;TZID=America/New_York:20220310T150000
|
||||
RRULE:INTERVAL=1;FREQ=DAILY
|
||||
EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU
|
||||
'''
|
||||
timezone = pytz.timezone("America/New_York")
|
||||
friday_apr_29th = datetime(2022, 4, 29, 0, 0, 0, 0, timezone)
|
||||
monday_may_2nd = datetime(2022, 5, 2, 23, 59, 59, 999, timezone)
|
||||
ruleset = Schedule.rrulestr(rrule)
|
||||
gen = ruleset.between(friday_apr_29th, monday_may_2nd, True)
|
||||
# We should only get Fri, Sat and Mon (skipping Sunday)
|
||||
assert len(list(gen)) == 3
|
||||
saturday_night = datetime(2022, 4, 30, 23, 59, 59, 9999, timezone)
|
||||
monday_morning = datetime(2022, 5, 2, 0, 0, 0, 0, timezone)
|
||||
gen = ruleset.between(saturday_night, monday_morning, True)
|
||||
assert len(list(gen)) == 0
|
||||
|
||||
|
||||
# Test the get_end_date function
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'rrule, expected_result',
|
||||
[
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20210310T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20210430T150000Z EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU;COUNT=5',
|
||||
datetime(2021, 4, 29, 19, 0, 0, tzinfo=pytz.utc),
|
||||
id="Single rule in rule set with UTC TZ aware until",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20220430T150000 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU;COUNT=5',
|
||||
datetime(2022, 4, 30, 19, 0, tzinfo=pytz.utc),
|
||||
id="Single rule in ruleset with naive until",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;COUNT=4 EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU;COUNT=5',
|
||||
datetime(2022, 3, 12, 20, 0, tzinfo=pytz.utc),
|
||||
id="Single rule in ruleset with count",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY EXRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU;COUNT=5',
|
||||
None,
|
||||
id="Single rule in ruleset with no end",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY',
|
||||
None,
|
||||
id="Single rule in rule with no end",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20220430T150000Z',
|
||||
datetime(2022, 4, 29, 19, 0, tzinfo=pytz.utc),
|
||||
id="Single rule in rule with UTZ TZ aware until",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;UNTIL=20220430T150000',
|
||||
datetime(2022, 4, 30, 19, 0, tzinfo=pytz.utc),
|
||||
id="Single rule in rule with naive until",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=SU RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=MO',
|
||||
None,
|
||||
id="Multi rule with no end",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=SU RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=MO;COUNT=4',
|
||||
None,
|
||||
id="Multi rule one with no end and one with an count",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220310T150000 RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=SU;UNTIL=20220430T1500Z RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=MO;COUNT=4',
|
||||
datetime(2022, 4, 24, 19, 0, tzinfo=pytz.utc),
|
||||
id="Multi rule one with until and one with an count",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20010430T1500 RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=SU;COUNT=1',
|
||||
datetime(2001, 5, 6, 19, 0, tzinfo=pytz.utc),
|
||||
id="Rule with count but ends in the past",
|
||||
),
|
||||
pytest.param(
|
||||
'DTSTART;TZID=America/New_York:20220430T1500 RRULE:INTERVAL=1;FREQ=DAILY;BYDAY=SU;UNTIL=20010430T1500',
|
||||
None,
|
||||
id="Rule with until that ends in the past",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_end_date(rrule, expected_result):
|
||||
ruleset = Schedule.rrulestr(rrule)
|
||||
assert expected_result == Schedule.get_end_date(ruleset)
|
||||
|
||||
@@ -19,6 +19,7 @@ from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList
|
||||
# Django
|
||||
from django.test import TransactionTestCase
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.timezone import now
|
||||
|
||||
|
||||
class TestWorkflowDAGFunctional(TransactionTestCase):
|
||||
@@ -381,3 +382,38 @@ def test_workflow_ancestors_recursion_prevention(organization):
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj, unified_job_template=wfjt, job=wfj) # well, this is a problem
|
||||
# mostly, we just care that this assertion finishes in finite time
|
||||
assert wfj.get_ancestor_workflows() == []
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCombinedArtifacts:
|
||||
@pytest.fixture
|
||||
def wfj_artifacts(self, job_template, organization):
|
||||
wfjt = WorkflowJobTemplate.objects.create(organization=organization, name='has_artifacts')
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt, launch_type='workflow')
|
||||
job = job_template.create_unified_job(_eager_fields=dict(artifacts={'foooo': 'bar'}, status='successful', finished=now()))
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj, unified_job_template=job_template, job=job)
|
||||
return wfj
|
||||
|
||||
def test_multiple_types(self, project, wfj_artifacts):
|
||||
project_update = project.create_unified_job()
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj_artifacts, unified_job_template=project, job=project_update)
|
||||
|
||||
assert wfj_artifacts.get_effective_artifacts() == {'foooo': 'bar'}
|
||||
|
||||
def test_precedence_based_on_time(self, wfj_artifacts, job_template):
|
||||
later_job = job_template.create_unified_job(
|
||||
_eager_fields=dict(artifacts={'foooo': 'zoo'}, status='successful', finished=now()) # finished later, should win
|
||||
)
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj_artifacts, unified_job_template=job_template, job=later_job)
|
||||
|
||||
assert wfj_artifacts.get_effective_artifacts() == {'foooo': 'zoo'}
|
||||
|
||||
def test_bad_data_with_artifacts(self, organization):
|
||||
# This is toxic database data, this tests that it doesn't create an infinite loop
|
||||
wfjt = WorkflowJobTemplate.objects.create(organization=organization, name='child')
|
||||
wfj = WorkflowJob.objects.create(workflow_job_template=wfjt, launch_type='workflow')
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj, unified_job_template=wfjt, job=wfj)
|
||||
job = Job.objects.create(artifacts={'foo': 'bar'}, status='successful')
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj, job=job)
|
||||
# mostly, we just care that this assertion finishes in finite time
|
||||
assert wfj.get_effective_artifacts() == {'foo': 'bar'}
|
||||
|
||||
@@ -324,6 +324,22 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, [], instance)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_update_launches_project_update(controlplane_instance_group, scm_inventory_source):
|
||||
ii = scm_inventory_source
|
||||
project = scm_inventory_source.source_project
|
||||
project.scm_update_on_launch = True
|
||||
project.save()
|
||||
iu = ii.create_inventory_update()
|
||||
iu.status = "pending"
|
||||
iu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_project_update", wraps=tm.create_project_update) as mock_pu:
|
||||
tm.schedule()
|
||||
mock_pu.assert_called_with(iu, project_id=project.id)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_dependency_with_already_updated(controlplane_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj', inventory='inv', credential='cred', jobs=["job_should_start"])
|
||||
@@ -382,7 +398,7 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls(
|
||||
[mock.call(iu, controlplane_instance_group, [j1, j2, pu], instance), mock.call(pu, controlplane_instance_group, [j1, j2, iu], instance)]
|
||||
[mock.call(iu, controlplane_instance_group, [j1, j2], instance), mock.call(pu, controlplane_instance_group, [j1, j2], instance)]
|
||||
)
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
@@ -464,7 +480,6 @@ def test_generate_dependencies_only_once(job_template_factory):
|
||||
job.status = "pending"
|
||||
job.name = "job_gen_dep"
|
||||
job.save()
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
# job starts with dependencies_processed as False
|
||||
assert not job.dependencies_processed
|
||||
@@ -478,10 +493,6 @@ def test_generate_dependencies_only_once(job_template_factory):
|
||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||
# called with job in the argument list
|
||||
tm = TaskManager()
|
||||
tm.generate_dependencies = mock.MagicMock()
|
||||
tm.generate_dependencies = mock.MagicMock(return_value=[])
|
||||
tm._schedule()
|
||||
|
||||
# .call_args is tuple, (positional_args, kwargs), [0][0] then is
|
||||
# the first positional arg, i.e. the first argument of
|
||||
# .generate_dependencies()
|
||||
assert tm.generate_dependencies.call_args[0][0] == []
|
||||
tm.generate_dependencies.assert_has_calls([mock.call([]), mock.call([])])
|
||||
|
||||
@@ -10,6 +10,8 @@ from awx.main.models.notifications import NotificationTemplate, Notification
|
||||
from awx.main.models.inventory import Inventory, InventorySource
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_get_notification_template_list(get, user, notification_template):
|
||||
@@ -163,7 +165,7 @@ def test_custom_environment_injection(post, user, organization):
|
||||
)
|
||||
assert response.status_code == 201
|
||||
template = NotificationTemplate.objects.get(pk=response.data['id'])
|
||||
with pytest.raises(ConnectionError), mock.patch('django.conf.settings.AWX_TASK_ENV', {'HTTPS_PROXY': '192.168.50.100:1234'}), mock.patch.object(
|
||||
with pytest.raises(ConnectionError), override_settings(AWX_TASK_ENV={'HTTPS_PROXY': '192.168.50.100:1234'}), mock.patch.object(
|
||||
HTTPAdapter, 'send'
|
||||
) as fake_send:
|
||||
|
||||
|
||||
@@ -4,9 +4,8 @@ import os
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunProjectUpdate, RunInventoryUpdate
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import ProjectUpdate, InventoryUpdate, InventorySource, Instance, Job
|
||||
from awx.main.models import Instance, Job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -27,63 +26,6 @@ def test_no_worker_info_on_AWX_nodes(node_type):
|
||||
execution_node_health_check(hostname)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestDependentInventoryUpdate:
|
||||
def test_dependent_inventory_updates_is_called(self, scm_inventory_source, scm_revision_file, mock_me):
|
||||
task = RunProjectUpdate()
|
||||
task.revision_path = scm_revision_file
|
||||
proj_update = scm_inventory_source.source_project.create_project_update()
|
||||
with mock.patch.object(RunProjectUpdate, '_update_dependent_inventories') as inv_update_mck:
|
||||
with mock.patch.object(RunProjectUpdate, 'release_lock'):
|
||||
task.post_run_hook(proj_update, 'successful')
|
||||
inv_update_mck.assert_called_once_with(proj_update, mock.ANY)
|
||||
|
||||
def test_no_unwanted_dependent_inventory_updates(self, project, scm_revision_file, mock_me):
|
||||
task = RunProjectUpdate()
|
||||
task.revision_path = scm_revision_file
|
||||
proj_update = project.create_project_update()
|
||||
with mock.patch.object(RunProjectUpdate, '_update_dependent_inventories') as inv_update_mck:
|
||||
with mock.patch.object(RunProjectUpdate, 'release_lock'):
|
||||
task.post_run_hook(proj_update, 'successful')
|
||||
assert not inv_update_mck.called
|
||||
|
||||
def test_dependent_inventory_updates(self, scm_inventory_source, default_instance_group, mock_me):
|
||||
task = RunProjectUpdate()
|
||||
scm_inventory_source.scm_last_revision = ''
|
||||
proj_update = ProjectUpdate.objects.create(project=scm_inventory_source.source_project)
|
||||
with mock.patch.object(RunInventoryUpdate, 'run') as iu_run_mock:
|
||||
with mock.patch('awx.main.tasks.jobs.create_partition'):
|
||||
task._update_dependent_inventories(proj_update, [scm_inventory_source])
|
||||
assert InventoryUpdate.objects.count() == 1
|
||||
inv_update = InventoryUpdate.objects.first()
|
||||
iu_run_mock.assert_called_once_with(inv_update.id)
|
||||
assert inv_update.source_project_update_id == proj_update.pk
|
||||
|
||||
def test_dependent_inventory_project_cancel(self, project, inventory, default_instance_group, mock_me):
|
||||
"""
|
||||
Test that dependent inventory updates exhibit good behavior on cancel
|
||||
of the source project update
|
||||
"""
|
||||
task = RunProjectUpdate()
|
||||
proj_update = ProjectUpdate.objects.create(project=project)
|
||||
|
||||
kwargs = dict(source_project=project, source='scm', source_path='inventory_file', update_on_project_update=True, inventory=inventory)
|
||||
|
||||
is1 = InventorySource.objects.create(name="test-scm-inv", **kwargs)
|
||||
is2 = InventorySource.objects.create(name="test-scm-inv2", **kwargs)
|
||||
|
||||
def user_cancels_project(pk):
|
||||
ProjectUpdate.objects.all().update(cancel_flag=True)
|
||||
|
||||
with mock.patch.object(RunInventoryUpdate, 'run') as iu_run_mock:
|
||||
with mock.patch('awx.main.tasks.jobs.create_partition'):
|
||||
iu_run_mock.side_effect = user_cancels_project
|
||||
task._update_dependent_inventories(proj_update, [is1, is2])
|
||||
# Verify that it bails after 1st update, detecting a cancel
|
||||
assert is2.inventory_updates.count() == 0
|
||||
iu_run_mock.assert_called_once()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_job_folder(request):
|
||||
pdd_path = tempfile.mkdtemp(prefix='awx_123_')
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import json
|
||||
|
||||
# AWX
|
||||
from awx.api.serializers import (
|
||||
JobDetailSerializer,
|
||||
JobSerializer,
|
||||
JobOptionsSerializer,
|
||||
ProjectUpdateDetailSerializer,
|
||||
)
|
||||
from awx.api.serializers import JobSerializer, JobOptionsSerializer
|
||||
|
||||
from awx.main.models import (
|
||||
Label,
|
||||
@@ -108,7 +102,7 @@ class TestJobOptionsSerializerGetSummaryFields:
|
||||
|
||||
|
||||
class TestJobDetailSerializerGetHostStatusCountFields(object):
|
||||
def test_hosts_are_counted_once(self, job, mocker):
|
||||
def test_hosts_are_counted_once(self):
|
||||
mock_event = JobEvent(
|
||||
**{
|
||||
'event': 'playbook_on_stats',
|
||||
@@ -133,26 +127,11 @@ class TestJobDetailSerializerGetHostStatusCountFields(object):
|
||||
}
|
||||
)
|
||||
|
||||
mock_qs = namedtuple('mock_qs', ['get'])(mocker.MagicMock(return_value=mock_event))
|
||||
only = mocker.MagicMock(return_value=mock_qs)
|
||||
job.get_event_queryset = lambda *args, **kwargs: mocker.MagicMock(only=only)
|
||||
|
||||
serializer = JobDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(job)
|
||||
|
||||
assert host_status_counts == {'ok': 1, 'changed': 1, 'dark': 2}
|
||||
|
||||
def test_host_status_counts_is_empty_dict_without_stats_event(self, job):
|
||||
job.get_event_queryset = lambda *args, **kwargs: JobEvent.objects.none()
|
||||
|
||||
serializer = JobDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(job)
|
||||
|
||||
assert host_status_counts == {}
|
||||
assert mock_event.get_host_status_counts() == {'ok': 1, 'changed': 1, 'dark': 2}
|
||||
|
||||
|
||||
class TestProjectUpdateDetailSerializerGetHostStatusCountFields(object):
|
||||
def test_hosts_are_counted_once(self, project_update, mocker):
|
||||
def test_hosts_are_counted_once(self):
|
||||
mock_event = ProjectUpdateEvent(
|
||||
**{
|
||||
'event': 'playbook_on_stats',
|
||||
@@ -177,18 +156,4 @@ class TestProjectUpdateDetailSerializerGetHostStatusCountFields(object):
|
||||
}
|
||||
)
|
||||
|
||||
mock_qs = namedtuple('mock_qs', ['get'])(mocker.MagicMock(return_value=mock_event))
|
||||
project_update.project_update_events.only = mocker.MagicMock(return_value=mock_qs)
|
||||
|
||||
serializer = ProjectUpdateDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(project_update)
|
||||
|
||||
assert host_status_counts == {'ok': 1, 'changed': 1, 'dark': 2}
|
||||
|
||||
def test_host_status_counts_is_empty_dict_without_stats_event(self, project_update):
|
||||
project_update.project_update_events = ProjectUpdateEvent.objects.none()
|
||||
|
||||
serializer = ProjectUpdateDetailSerializer()
|
||||
host_status_counts = serializer.get_host_status_counts(project_update)
|
||||
|
||||
assert host_status_counts == {}
|
||||
assert mock_event.get_host_status_counts() == {'ok': 1, 'changed': 1, 'dark': 2}
|
||||
|
||||
@@ -69,21 +69,21 @@ class TestJobTemplateLabelList:
|
||||
|
||||
class TestInventoryInventorySourcesUpdate:
|
||||
@pytest.mark.parametrize(
|
||||
"can_update, can_access, is_source, is_up_on_proj, expected",
|
||||
"can_update, can_access, is_source, expected",
|
||||
[
|
||||
(True, True, "ec2", False, [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
|
||||
(False, True, "gce", False, [{'status': 'Could not start because `can_update` returned False', 'inventory_source': 1}]),
|
||||
(True, False, "scm", True, [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
|
||||
(True, True, "ec2", [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
|
||||
(False, True, "gce", [{'status': 'Could not start because `can_update` returned False', 'inventory_source': 1}]),
|
||||
(True, False, "scm", [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
|
||||
],
|
||||
)
|
||||
def test_post(self, mocker, can_update, can_access, is_source, is_up_on_proj, expected):
|
||||
def test_post(self, mocker, can_update, can_access, is_source, expected):
|
||||
class InventoryUpdate:
|
||||
id = 1
|
||||
|
||||
class Project:
|
||||
name = 'project'
|
||||
|
||||
InventorySource = namedtuple('InventorySource', ['source', 'update_on_project_update', 'pk', 'can_update', 'update', 'source_project'])
|
||||
InventorySource = namedtuple('InventorySource', ['source', 'pk', 'can_update', 'update', 'source_project'])
|
||||
|
||||
class InventorySources(object):
|
||||
def all(self):
|
||||
@@ -92,7 +92,6 @@ class TestInventoryInventorySourcesUpdate:
|
||||
pk=1,
|
||||
source=is_source,
|
||||
source_project=Project,
|
||||
update_on_project_update=is_up_on_proj,
|
||||
can_update=can_update,
|
||||
update=lambda: InventoryUpdate,
|
||||
)
|
||||
|
||||
@@ -1,28 +1,13 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from awx.main.models import (
|
||||
UnifiedJob,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
)
|
||||
|
||||
|
||||
def test_cancel(mocker):
|
||||
with mock.patch.object(UnifiedJob, 'cancel', return_value=True) as parent_cancel:
|
||||
iu = InventoryUpdate()
|
||||
|
||||
iu.save = mocker.MagicMock()
|
||||
build_job_explanation_mock = mocker.MagicMock()
|
||||
iu._build_job_explanation = mocker.MagicMock(return_value=build_job_explanation_mock)
|
||||
|
||||
iu.cancel()
|
||||
|
||||
parent_cancel.assert_called_with(is_chain=False, job_explanation=None)
|
||||
|
||||
|
||||
def test__build_job_explanation():
|
||||
iu = InventoryUpdate(id=3, name='I_am_an_Inventory_Update')
|
||||
|
||||
@@ -53,9 +38,3 @@ class TestControlledBySCM:
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
inv_src.clean_source_path()
|
||||
|
||||
def test_clean_update_on_launch_update_on_project_update(self):
|
||||
inv_src = InventorySource(update_on_project_update=True, update_on_launch=True, source='scm')
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
inv_src.clean_update_on_launch()
|
||||
|
||||
52
awx/main/tests/unit/tasks/test_runner_callback.py
Normal file
52
awx/main/tests/unit/tasks/test_runner_callback.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from awx.main.tasks.callback import RunnerCallback
|
||||
from awx.main.constants import ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
def test_delay_update(mock_me):
|
||||
rc = RunnerCallback()
|
||||
rc.delay_update(foo='bar')
|
||||
assert rc.extra_update_fields == {'foo': 'bar'}
|
||||
rc.delay_update(foo='foobar')
|
||||
assert rc.extra_update_fields == {'foo': 'foobar'}
|
||||
rc.delay_update(bar='foo')
|
||||
assert rc.get_delayed_update_fields() == {'foo': 'foobar', 'bar': 'foo', 'emitted_events': 0}
|
||||
|
||||
|
||||
def test_delay_update_skip_if_set(mock_me):
|
||||
rc = RunnerCallback()
|
||||
rc.delay_update(foo='bar', skip_if_already_set=True)
|
||||
assert rc.extra_update_fields == {'foo': 'bar'}
|
||||
rc.delay_update(foo='foobar', skip_if_already_set=True)
|
||||
assert rc.extra_update_fields == {'foo': 'bar'}
|
||||
|
||||
|
||||
def test_delay_update_failure_fields(mock_me):
|
||||
rc = RunnerCallback()
|
||||
rc.delay_update(job_explanation='1')
|
||||
rc.delay_update(job_explanation=_('2'))
|
||||
assert rc.extra_update_fields == {'job_explanation': '1\n2'}
|
||||
rc.delay_update(result_traceback='1')
|
||||
rc.delay_update(result_traceback=_('2'))
|
||||
rc.delay_update(result_traceback=_('3'), skip_if_already_set=True)
|
||||
assert rc.extra_update_fields == {'job_explanation': '1\n2', 'result_traceback': '1\n2'}
|
||||
|
||||
|
||||
def test_duplicate_updates(mock_me):
|
||||
rc = RunnerCallback()
|
||||
rc.delay_update(job_explanation='really long summary...')
|
||||
rc.delay_update(job_explanation='really long summary...')
|
||||
rc.delay_update(job_explanation='really long summary...')
|
||||
assert rc.extra_update_fields == {'job_explanation': 'really long summary...'}
|
||||
|
||||
|
||||
def test_special_ansible_runner_message(mock_me):
|
||||
rc = RunnerCallback()
|
||||
rc.delay_update(result_traceback='Traceback:\ngot an unexpected keyword argument\nFile: foo.py')
|
||||
rc.delay_update(result_traceback='Traceback:\ngot an unexpected keyword argument\nFile: bar.py')
|
||||
assert rc.get_delayed_update_fields().get('result_traceback') == (
|
||||
'Traceback:\ngot an unexpected keyword argument\nFile: foo.py\n'
|
||||
'Traceback:\ngot an unexpected keyword argument\nFile: bar.py\n'
|
||||
f'{ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE}'
|
||||
)
|
||||
50
awx/main/tests/unit/tasks/test_signals.py
Normal file
50
awx/main/tests/unit/tasks/test_signals.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import signal
|
||||
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, with_signal_handling
|
||||
|
||||
|
||||
def test_outer_inner_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the outer context, its value should persist in the inner context
|
||||
"""
|
||||
|
||||
@with_signal_handling
|
||||
def f2():
|
||||
assert signal_callback()
|
||||
|
||||
@with_signal_handling
|
||||
def f1():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
assert signal_callback()
|
||||
f2()
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
|
||||
|
||||
def test_inner_outer_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the inner context, its value should persist in the outer context
|
||||
"""
|
||||
|
||||
@with_signal_handling
|
||||
def f2():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
assert signal_callback()
|
||||
|
||||
@with_signal_handling
|
||||
def f1():
|
||||
assert signal_callback() is False
|
||||
f2()
|
||||
assert signal_callback()
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
@@ -4,6 +4,7 @@ import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import fcntl
|
||||
from unittest import mock
|
||||
@@ -36,12 +37,23 @@ from awx.main.models.credential import HIDDEN_PASSWORD, ManagedCredentialType
|
||||
from awx.main.tasks import jobs, system
|
||||
from awx.main.utils import encrypt_field, encrypt_value
|
||||
from awx.main.utils.safe_yaml import SafeLoader
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_host_path
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT
|
||||
|
||||
from awx.main.utils.licensing import Licenser
|
||||
from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
def to_host_path(path, private_data_dir):
|
||||
"""Given a path inside of the EE container, this gives the absolute path
|
||||
on the host machine within the private_data_dir
|
||||
"""
|
||||
if not os.path.isabs(private_data_dir):
|
||||
raise RuntimeError('The private_data_dir path must be absolute')
|
||||
if CONTAINER_ROOT != path and Path(CONTAINER_ROOT) not in Path(path).resolve().parents:
|
||||
raise RuntimeError(f'Cannot convert path {path} unless it is a subdir of {CONTAINER_ROOT}')
|
||||
return path.replace(CONTAINER_ROOT, private_data_dir, 1)
|
||||
|
||||
|
||||
class TestJobExecution(object):
|
||||
EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-----'
|
||||
|
||||
@@ -910,7 +922,8 @@ class TestJobCredentials(TestJobExecution):
|
||||
assert env['AWS_SECURITY_TOKEN'] == 'token'
|
||||
assert safe_env['AWS_SECRET_ACCESS_KEY'] == HIDDEN_PASSWORD
|
||||
|
||||
def test_gce_credentials(self, private_data_dir, job, mock_me):
|
||||
@pytest.mark.parametrize("cred_env_var", ['GCE_CREDENTIALS_FILE_PATH', 'GOOGLE_APPLICATION_CREDENTIALS'])
|
||||
def test_gce_credentials(self, cred_env_var, private_data_dir, job, mock_me):
|
||||
gce = CredentialType.defaults['gce']()
|
||||
credential = Credential(pk=1, credential_type=gce, inputs={'username': 'bob', 'project': 'some-project', 'ssh_key_data': self.EXAMPLE_PRIVATE_KEY})
|
||||
credential.inputs['ssh_key_data'] = encrypt_field(credential, 'ssh_key_data')
|
||||
@@ -919,7 +932,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
env = {}
|
||||
safe_env = {}
|
||||
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
|
||||
runner_path = env['GCE_CREDENTIALS_FILE_PATH']
|
||||
runner_path = env[cred_env_var]
|
||||
local_path = to_host_path(runner_path, private_data_dir)
|
||||
json_data = json.load(open(local_path, 'rb'))
|
||||
assert json_data['type'] == 'service_account'
|
||||
@@ -1304,6 +1317,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
assert env['AZURE_AD_USER'] == 'bob'
|
||||
assert env['AZURE_PASSWORD'] == 'secret'
|
||||
|
||||
# Because this is testing a mix of multiple cloud creds, we are not going to test the GOOGLE_APPLICATION_CREDENTIALS here
|
||||
path = to_host_path(env['GCE_CREDENTIALS_FILE_PATH'], private_data_dir)
|
||||
json_data = json.load(open(path, 'rb'))
|
||||
assert json_data['type'] == 'service_account'
|
||||
@@ -1633,7 +1647,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
|
||||
assert safe_env['AZURE_PASSWORD'] == HIDDEN_PASSWORD
|
||||
|
||||
def test_gce_source(self, inventory_update, private_data_dir, mocker, mock_me):
|
||||
@pytest.mark.parametrize("cred_env_var", ['GCE_CREDENTIALS_FILE_PATH', 'GOOGLE_APPLICATION_CREDENTIALS'])
|
||||
def test_gce_source(self, cred_env_var, inventory_update, private_data_dir, mocker, mock_me):
|
||||
task = jobs.RunInventoryUpdate()
|
||||
task.instance = inventory_update
|
||||
gce = CredentialType.defaults['gce']()
|
||||
@@ -1657,7 +1672,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
|
||||
|
||||
assert env['GCE_ZONE'] == expected_gce_zone
|
||||
json_data = json.load(open(env['GCE_CREDENTIALS_FILE_PATH'], 'rb'))
|
||||
json_data = json.load(open(env[cred_env_var], 'rb'))
|
||||
assert json_data['type'] == 'service_account'
|
||||
assert json_data['private_key'] == self.EXAMPLE_PRIVATE_KEY
|
||||
assert json_data['client_email'] == 'bob'
|
||||
@@ -1919,26 +1934,6 @@ def test_managed_injector_redaction(injector_cls):
|
||||
assert 'very_secret_value' not in str(build_safe_env(env))
|
||||
|
||||
|
||||
@mock.patch('logging.getLogger')
|
||||
def test_notification_job_not_finished(logging_getLogger, mocker):
|
||||
uj = mocker.MagicMock()
|
||||
uj.finished = False
|
||||
logger = mocker.Mock()
|
||||
logging_getLogger.return_value = logger
|
||||
|
||||
with mocker.patch('awx.main.models.UnifiedJob.objects.get', uj):
|
||||
system.handle_success_and_failure_notifications(1)
|
||||
assert logger.warning.called_with(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
def test_notification_job_finished(mocker):
|
||||
uj = mocker.MagicMock(send_notification_templates=mocker.MagicMock(), finished=True)
|
||||
|
||||
with mocker.patch('awx.main.models.UnifiedJob.objects.get', mocker.MagicMock(return_value=uj)):
|
||||
system.handle_success_and_failure_notifications(1)
|
||||
uj.send_notification_templates.assert_called()
|
||||
|
||||
|
||||
def test_job_run_no_ee(mock_me):
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import shutil
|
||||
import os
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.utils.execution_environments import to_container_path, to_host_path
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
|
||||
|
||||
private_data_dir = '/tmp/pdd_iso/awx_xxx'
|
||||
@@ -10,26 +14,33 @@ private_data_dir = '/tmp/pdd_iso/awx_xxx'
|
||||
'container_path,host_path',
|
||||
[
|
||||
('/runner', private_data_dir),
|
||||
('/runner/foo', '{0}/foo'.format(private_data_dir)),
|
||||
('/runner/foo/bar', '{0}/foo/bar'.format(private_data_dir)),
|
||||
('/runner{0}'.format(private_data_dir), '{0}{0}'.format(private_data_dir)),
|
||||
('/runner/foo', f'{private_data_dir}/foo'),
|
||||
('/runner', f'{private_data_dir}/foobar/..'), # private_data_dir path needs to be resolved
|
||||
('/runner/bar', f'{private_data_dir}/bar/foo/..'),
|
||||
('/runner/foo/bar', f'{private_data_dir}/foo/bar'),
|
||||
(f'/runner{private_data_dir}', f'{private_data_dir}{private_data_dir}'),
|
||||
],
|
||||
)
|
||||
def test_switch_paths(container_path, host_path):
|
||||
assert to_container_path(host_path, private_data_dir) == container_path
|
||||
assert to_host_path(container_path, private_data_dir) == host_path
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'container_path',
|
||||
[
|
||||
('/foobar'),
|
||||
('/runner/..'),
|
||||
],
|
||||
)
|
||||
def test_invalid_container_path(container_path):
|
||||
with pytest.raises(RuntimeError):
|
||||
to_host_path(container_path, private_data_dir)
|
||||
def test_symlink_isolation_dir(request):
|
||||
rand_str = str(uuid4())[:8]
|
||||
dst_path = f'/tmp/ee_{rand_str}_symlink_dst'
|
||||
src_path = f'/tmp/ee_{rand_str}_symlink_src'
|
||||
|
||||
def remove_folders():
|
||||
os.unlink(dst_path)
|
||||
shutil.rmtree(src_path)
|
||||
|
||||
request.addfinalizer(remove_folders)
|
||||
os.mkdir(src_path)
|
||||
os.symlink(src_path, dst_path)
|
||||
|
||||
pdd = f'{dst_path}/awx_xxx'
|
||||
|
||||
assert to_container_path(f'{pdd}/env/tmp1234', pdd) == '/runner/env/tmp1234'
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -17,7 +17,7 @@ logger = logging.getLogger('awx.main.utils.ansible')
|
||||
__all__ = ['skip_directory', 'could_be_playbook', 'could_be_inventory']
|
||||
|
||||
|
||||
valid_playbook_re = re.compile(r'^\s*?-?\s*?(?:hosts|include|import_playbook):\s*?.*?$')
|
||||
valid_playbook_re = re.compile(r'^\s*?-?\s*?(?:hosts|(ansible\.builtin\.)?include|(ansible\.builtin\.)?import_playbook):\s*?.*?$')
|
||||
valid_inventory_re = re.compile(r'^[a-zA-Z0-9_.=\[\]]')
|
||||
|
||||
|
||||
|
||||
@@ -58,17 +58,9 @@ def to_container_path(path, private_data_dir):
|
||||
"""
|
||||
if not os.path.isabs(private_data_dir):
|
||||
raise RuntimeError('The private_data_dir path must be absolute')
|
||||
if private_data_dir != path and Path(private_data_dir) not in Path(path).resolve().parents:
|
||||
raise RuntimeError(f'Cannot convert path {path} unless it is a subdir of {private_data_dir}')
|
||||
return path.replace(private_data_dir, CONTAINER_ROOT, 1)
|
||||
|
||||
|
||||
def to_host_path(path, private_data_dir):
|
||||
"""Given a path inside of the EE container, this gives the absolute path
|
||||
on the host machine within the private_data_dir
|
||||
"""
|
||||
if not os.path.isabs(private_data_dir):
|
||||
raise RuntimeError('The private_data_dir path must be absolute')
|
||||
if CONTAINER_ROOT != path and Path(CONTAINER_ROOT) not in Path(path).resolve().parents:
|
||||
raise RuntimeError(f'Cannot convert path {path} unless it is a subdir of {CONTAINER_ROOT}')
|
||||
return path.replace(CONTAINER_ROOT, private_data_dir, 1)
|
||||
# due to how tempfile.mkstemp works, we are probably passed a resolved path, but unresolved private_data_dir
|
||||
resolved_path = Path(path).resolve()
|
||||
resolved_pdd = Path(private_data_dir).resolve()
|
||||
if resolved_pdd != resolved_path and resolved_pdd not in resolved_path.parents:
|
||||
raise RuntimeError(f'Cannot convert path {resolved_path} unless it is a subdir of {resolved_pdd}')
|
||||
return str(resolved_path).replace(str(resolved_pdd), CONTAINER_ROOT, 1)
|
||||
|
||||
@@ -3,18 +3,23 @@ from django.db import transaction, DatabaseError, InterfaceError
|
||||
import logging
|
||||
import time
|
||||
|
||||
from awx.main.tasks.signals import signal_callback
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.utils')
|
||||
|
||||
|
||||
def update_model(model, pk, _attempt=0, _max_attempts=5, **updates):
|
||||
def update_model(model, pk, _attempt=0, _max_attempts=5, select_for_update=False, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
given fields.
|
||||
"""
|
||||
try:
|
||||
with transaction.atomic():
|
||||
# Retrieve the model instance.
|
||||
instance = model.objects.get(pk=pk)
|
||||
if select_for_update:
|
||||
instance = model.objects.select_for_update().get(pk=pk)
|
||||
else:
|
||||
instance = model.objects.get(pk=pk)
|
||||
|
||||
# Update the appropriate fields and save the model
|
||||
# instance, then return the new instance.
|
||||
@@ -34,7 +39,10 @@ def update_model(model, pk, _attempt=0, _max_attempts=5, **updates):
|
||||
# Attempt to retry the update, assuming we haven't already
|
||||
# tried too many times.
|
||||
if _attempt < _max_attempts:
|
||||
time.sleep(5)
|
||||
for i in range(5):
|
||||
time.sleep(1)
|
||||
if signal_callback():
|
||||
raise RuntimeError(f'Could not fetch {pk} because of receiving abort signal')
|
||||
return update_model(model, pk, _attempt=_attempt + 1, _max_attempts=_max_attempts, **updates)
|
||||
else:
|
||||
logger.error('Failed to update %s after %d retries.', model._meta.object_name, _attempt)
|
||||
|
||||
@@ -26,7 +26,9 @@
|
||||
tasks:
|
||||
|
||||
- name: delete project directory before update
|
||||
command: "rm -rf {{project_path}}/*" # volume mounted, cannot delete folder itself
|
||||
command: "find -delete" # volume mounted, cannot delete folder itself
|
||||
args:
|
||||
chdir: "{{ project_path }}"
|
||||
tags:
|
||||
- delete
|
||||
|
||||
|
||||
@@ -241,6 +241,10 @@ SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS = 3
|
||||
# Interval in seconds for saving local metrics to redis
|
||||
SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS = 2
|
||||
|
||||
# Record task manager metrics at the following interval in seconds
|
||||
# If using Prometheus, it is recommended to be => the Prometheus scrape interval
|
||||
SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL = 15
|
||||
|
||||
# The maximum allowed jobs to start on a given task manager cycle
|
||||
START_TASK_LIMIT = 100
|
||||
|
||||
@@ -955,6 +959,7 @@ RECEPTOR_RELEASE_WORK = True
|
||||
|
||||
MIDDLEWARE = [
|
||||
'django_guid.middleware.guid_middleware',
|
||||
'awx.main.middleware.SettingsCacheMiddleware',
|
||||
'awx.main.middleware.TimingMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'awx.main.middleware.MigrationRanCheckMiddleware',
|
||||
@@ -997,9 +1002,6 @@ BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10
|
||||
# How often websocket process will generate stats
|
||||
BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
|
||||
|
||||
# Number of times to retry sending a notification when waiting on a job to finish.
|
||||
AWX_NOTIFICATION_JOB_FINISH_MAX_RETRY = 5
|
||||
|
||||
DJANGO_GUID = {'GUID_HEADER_NAME': 'X-API-Request-Id'}
|
||||
|
||||
# Name of the default task queue
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
},
|
||||
"babelOptions": {
|
||||
"presets": ["@babel/preset-react"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"plugins": ["react-hooks", "jsx-a11y", "i18next", "@babel"],
|
||||
"extends": [
|
||||
@@ -96,9 +96,18 @@
|
||||
"modifier",
|
||||
"data-cy",
|
||||
"fieldName",
|
||||
"splitButtonVariant"
|
||||
"splitButtonVariant",
|
||||
"pageKey"
|
||||
],
|
||||
"ignore": [
|
||||
"Ansible",
|
||||
"Tower",
|
||||
"JSON",
|
||||
"YAML",
|
||||
"lg",
|
||||
"hh:mm AM/PM",
|
||||
"Twilio"
|
||||
],
|
||||
"ignore": ["Ansible", "Tower", "JSON", "YAML", "lg", "hh:mm AM/PM", "Twilio"],
|
||||
"ignoreComponent": [
|
||||
"AboutModal",
|
||||
"code",
|
||||
@@ -139,7 +148,7 @@
|
||||
"object-curly-newline": "off",
|
||||
"no-trailing-spaces": ["error"],
|
||||
"no-unused-expressions": ["error", { "allowShortCircuit": true }],
|
||||
"react/jsx-props-no-spreading":["off"],
|
||||
"react/jsx-props-no-spreading": ["off"],
|
||||
"react/prefer-stateless-function": "off",
|
||||
"react/prop-types": "off",
|
||||
"react/sort-comp": ["error", {}],
|
||||
|
||||
391
awx/ui/SEARCH.md
391
awx/ui/SEARCH.md
@@ -2,26 +2,27 @@
|
||||
|
||||
## UX Considerations
|
||||
|
||||
Historically, the code that powers search in the AngularJS version of the AWX UI is very complex and prone to bugs. In order to reduce that complexity, we've made some UX decisions to help make the code easier to maintain.
|
||||
Historically, the code that powers search in the AngularJS version of the AWX UI is very complex and prone to bugs. In order to reduce that complexity, we've made some UX decisions to help make the code easier to maintain.
|
||||
|
||||
**ALL query params namespaced and in url bar**
|
||||
|
||||
This includes lists that aren't necessarily hyperlinked, like lookup lists. The reason behind this is so we can treat the url bar as the source of truth for queries always. Any params that have both a key AND value that is in the defaultParams section of the qs config are stripped out of the search string (see "Encoding for UI vs. API" for more info on this point)
|
||||
This includes lists that aren't necessarily hyperlinked, like lookup lists. The reason behind this is so we can treat the url bar as the source of truth for queries always. Any params that have both a key AND value that is in the defaultParams section of the qs config are stripped out of the search string (see "Encoding for UI vs. API" for more info on this point)
|
||||
|
||||
**Django fuzzy search (`?search=`) is not accessible outside of "advanced search"**
|
||||
|
||||
In current smart search typing a term with no key utilizes `?search=` i.e. for "foo" tag, `?search=foo` is given. `?search=` looks on a static list of field name "guesses" (such as name, description, etc.), as well as specific fields as defined for each endpoint (for example, the events endpoint looks for a "stdout" field as well). Due to the fact a key will always be present on the left-hand of simple search, it doesn't make sense to use `?search=` as the default.
|
||||
In current smart search typing a term with no key utilizes `?search=` i.e. for "foo" tag, `?search=foo` is given. `?search=` looks on a static list of field name "guesses" (such as name, description, etc.), as well as specific fields as defined for each endpoint (for example, the events endpoint looks for a "stdout" field as well). Due to the fact a key will always be present on the left-hand of simple search, it doesn't make sense to use `?search=` as the default.
|
||||
|
||||
We may allow passing of `?search=` through our future advanced search interface. Some details that were gathered in planning phases about `?search=` that might be helpful in the future:
|
||||
|
||||
We may allow passing of `?search=` through our future advanced search interface. Some details that were gathered in planning phases about `?search=` that might be helpful in the future:
|
||||
- `?search=` tags are OR'd together (union is returned).
|
||||
- `?search=foo&name=bar` returns items that have a name field of bar (not case insensitive) AND some text field with foo on it
|
||||
- `?search=foo&search=bar&name=baz` returns (foo in name OR foo in description OR ...) AND (bar in name OR bar in description OR ...) AND (baz in name)
|
||||
- similarly `?related__search=` looks on the static list of "guesses" for models related to the endpoint. The specific fields are not "searched" for `?related__search=`.
|
||||
- similarly `?related__search=` looks on the static list of "guesses" for models related to the endpoint. The specific fields are not "searched" for `?related__search=`.
|
||||
- `?related__search=` not currently used in awx ui
|
||||
|
||||
**A note on clicking a tag to putting it back into the search bar**
|
||||
|
||||
This was brought up as a nice to have when we were discussing our initial implementation of search in the new application. Since there isn't a way we would be able to know if the user created the tag from the simple or advanced search interface, we wouldn't know where to put it back. This breaks our idea of using the query params as the exclusive source of truth, so we've decided against implementing it for now.
|
||||
This was brought up as a nice to have when we were discussing our initial implementation of search in the new application. Since there isn't a way we would be able to know if the user created the tag from the simple or advanced search interface, we wouldn't know where to put it back. This breaks our idea of using the query params as the exclusive source of truth, so we've decided against implementing it for now.
|
||||
|
||||
## Tasklist
|
||||
|
||||
@@ -50,171 +51,197 @@ This was brought up as a nice to have when we were discussing our initial implem
|
||||
- DONE remove button for search tags of duplicate keys are broken, fix that
|
||||
|
||||
### TODO pre-holiday break
|
||||
|
||||
- Update COLUMNS to SORT_COLUMNS and SEARCH_COLUMNS
|
||||
- Update to using new PF Toolbar component (currently an experimental component)
|
||||
- Change the right-hand input based on the type of key selected on the left-hand side. In addition to text input, for our MVP we will support:
|
||||
- Change the right-hand input based on the type of key selected on the left-hand side. In addition to text input, for our MVP we will support:
|
||||
- number input
|
||||
- select input (multiple-choice configured from UI or Options)
|
||||
- Update the following lists to have the following keys:
|
||||
|
||||
**Jobs list** (signed off earlier in chat)
|
||||
- Name (which is also the name of the job template) - search is ?name=jt
|
||||
- Job ID - search is ?id=13
|
||||
- Label name - search is ?labels__name=foo
|
||||
- Job type (dropdown on right with the different types) ?type = job
|
||||
- Created by (username) - search is ?created_by__username=admin
|
||||
- Status - search (dropdown on right with different statuses) is ?status=successful
|
||||
|
||||
- Name (which is also the name of the job template) - search is ?name=jt
|
||||
- Job ID - search is ?id=13
|
||||
- Label name - search is ?labels\_\_name=foo
|
||||
- Job type (dropdown on right with the different types) ?type = job
|
||||
- Created by (username) - search is ?created_by\_\_username=admin
|
||||
- Status - search (dropdown on right with different statuses) is ?status=successful
|
||||
|
||||
Instances of jobs list include:
|
||||
- Jobs list
|
||||
- Host completed jobs list
|
||||
- JT completed jobs list
|
||||
|
||||
- Jobs list
|
||||
- Host completed jobs list
|
||||
- JT completed jobs list
|
||||
|
||||
**Organization list**
|
||||
- Name - search is ?name=org
|
||||
- ? Team name (of a team in the org) - search is ?teams__name=ansible
|
||||
- ? Username (of a user in the org) - search is ?users__username=johndoe
|
||||
|
||||
- Name - search is ?name=org
|
||||
- ? Team name (of a team in the org) - search is ?teams\_\_name=ansible
|
||||
- ? Username (of a user in the org) - search is ?users\_\_username=johndoe
|
||||
|
||||
Instances of orgs list include:
|
||||
- Orgs list
|
||||
- User orgs list
|
||||
- Lookup on Project
|
||||
- Lookup on Credential
|
||||
- Lookup on Inventory
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
- Orgs list
|
||||
- User orgs list
|
||||
- Lookup on Project
|
||||
- Lookup on Credential
|
||||
- Lookup on Inventory
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
**Instance Groups list**
|
||||
- Name - search is ?name=ig
|
||||
- ? is_container_group boolean choice (doesn't work right now in API but will soon) - search is ?is_container_group=true
|
||||
- ? credential name - search is ?credentials__name=kubey
|
||||
|
||||
- Name - search is ?name=ig
|
||||
- ? is_container_group boolean choice (doesn't work right now in API but will soon) - search is ?is_container_group=true
|
||||
- ? credential name - search is ?credentials\_\_name=kubey
|
||||
|
||||
Instance of instance groups list include:
|
||||
- Lookup on Org
|
||||
- Lookup on JT
|
||||
- Lookup on Inventory
|
||||
|
||||
- Lookup on Org
|
||||
- Lookup on JT
|
||||
- Lookup on Inventory
|
||||
|
||||
**Users list**
|
||||
- Username - search is ?username=johndoe
|
||||
- First Name - search is ?first_name=John
|
||||
- Last Name - search is ?last_name=Doe
|
||||
- ? (if not superfluous, would not include on Team users list) Team Name - search is ?teams__name=team_of_john_does (note API issue: User has no field named "teams")
|
||||
- ? (only for access or permissions list) Role Name - search is ?roles__name=Admin (note API issue: Role has no field "name")
|
||||
- ? (if not superfluous, would not include on Organization users list) ORg Name - search is ?organizations__name=org_of_jhn_does
|
||||
|
||||
- Username - search is ?username=johndoe
|
||||
- First Name - search is ?first_name=John
|
||||
- Last Name - search is ?last_name=Doe
|
||||
- ? (if not superfluous, would not include on Team users list) Team Name - search is ?teams\_\_name=team_of_john_does (note API issue: User has no field named "teams")
|
||||
- ? (only for access or permissions list) Role Name - search is ?roles\_\_name=Admin (note API issue: Role has no field "name")
|
||||
- ? (if not superfluous, would not include on Organization users list) ORg Name - search is ?organizations\_\_name=org_of_jhn_does
|
||||
|
||||
Instance of user lists include:
|
||||
- User list
|
||||
- Org user list
|
||||
- Access list for Org, JT, Project, Credential, Inventory, User and Team
|
||||
- Access list for JT
|
||||
- Access list Project
|
||||
- Access list for Credential
|
||||
- Access list for Inventory
|
||||
- Access list for User
|
||||
- Access list for Team
|
||||
- Team add users list
|
||||
- Users list in access wizard (to add new roles for a particular list) for Org
|
||||
- Users list in access wizard (to add new roles for a particular list) for JT
|
||||
- Users list in access wizard (to add new roles for a particular list) for Project
|
||||
- Users list in access wizard (to add new roles for a particular list) for Credential
|
||||
- Users list in access wizard (to add new roles for a particular list) for Inventory
|
||||
|
||||
- User list
|
||||
- Org user list
|
||||
- Access list for Org, JT, Project, Credential, Inventory, User and Team
|
||||
- Access list for JT
|
||||
- Access list Project
|
||||
- Access list for Credential
|
||||
- Access list for Inventory
|
||||
- Access list for User
|
||||
- Access list for Team
|
||||
- Team add users list
|
||||
- Users list in access wizard (to add new roles for a particular list) for Org
|
||||
- Users list in access wizard (to add new roles for a particular list) for JT
|
||||
- Users list in access wizard (to add new roles for a particular list) for Project
|
||||
- Users list in access wizard (to add new roles for a particular list) for Credential
|
||||
- Users list in access wizard (to add new roles for a particular list) for Inventory
|
||||
|
||||
**Teams list**
|
||||
- Name - search is ?name=teamname
|
||||
- ? Username (of a user in the team) - search is ?users__username=johndoe
|
||||
- ? (if not superfluous, would not include on Organizations teams list) Org Name - search is ?organizations__name=org_of_john_does
|
||||
|
||||
- Name - search is ?name=teamname
|
||||
- ? Username (of a user in the team) - search is ?users\_\_username=johndoe
|
||||
- ? (if not superfluous, would not include on Organizations teams list) Org Name - search is ?organizations\_\_name=org_of_john_does
|
||||
|
||||
Instance of team lists include:
|
||||
- Team list
|
||||
- Org team list
|
||||
- User team list
|
||||
- Team list in access wizard (to add new roles for a particular list) for Org
|
||||
- Team list in access wizard (to add new roles for a particular list) for JT
|
||||
- Team list in access wizard (to add new roles for a particular list) for Project
|
||||
- Team list in access wizard (to add new roles for a particular list) for Credential
|
||||
- Team list in access wizard (to add new roles for a particular list) for Inventory
|
||||
|
||||
- Team list
|
||||
- Org team list
|
||||
- User team list
|
||||
- Team list in access wizard (to add new roles for a particular list) for Org
|
||||
- Team list in access wizard (to add new roles for a particular list) for JT
|
||||
- Team list in access wizard (to add new roles for a particular list) for Project
|
||||
- Team list in access wizard (to add new roles for a particular list) for Credential
|
||||
- Team list in access wizard (to add new roles for a particular list) for Inventory
|
||||
|
||||
**Credentials list**
|
||||
- Name
|
||||
- ? Type (dropdown on right with different types)
|
||||
- ? Created by (username)
|
||||
- ? Modified by (username)
|
||||
|
||||
- Name
|
||||
- ? Type (dropdown on right with different types)
|
||||
- ? Created by (username)
|
||||
- ? Modified by (username)
|
||||
|
||||
Instance of credential lists include:
|
||||
- Credential list
|
||||
- Lookup for JT
|
||||
- Lookup for Project
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
- Credential list
|
||||
- Lookup for JT
|
||||
- Lookup for Project
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
**Projects list**
|
||||
- Name - search is ?name=proj
|
||||
- ? Type (dropdown on right with different types) - search is scm_type=git
|
||||
- ? SCM URL - search is ?scm_url=github.com/ansible/test-playbooks
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=proj
|
||||
- ? Type (dropdown on right with different types) - search is scm_type=git
|
||||
- ? SCM URL - search is ?scm_url=github.com/ansible/test-playbooks
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of project lists include:
|
||||
- Project list
|
||||
- Lookup for JT
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
- Project list
|
||||
- Lookup for JT
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
**Templates list**
|
||||
- Name - search is ?name=cleanup
|
||||
- ? Type (dropdown on right with different types) - search is ?type=playbook_run
|
||||
- ? Playbook name - search is ?job_template__playbook=debug.yml
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=cleanup
|
||||
- ? Type (dropdown on right with different types) - search is ?type=playbook_run
|
||||
- ? Playbook name - search is ?job_template\_\_playbook=debug.yml
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of template lists include:
|
||||
- Template list
|
||||
- Project Templates list
|
||||
|
||||
- Template list
|
||||
- Project Templates list
|
||||
|
||||
**Inventories list**
|
||||
- Name - search is ?name=inv
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=inv
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of inventory lists include:
|
||||
- Inventory list
|
||||
- Lookup for JT
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
- Inventory list
|
||||
- Lookup for JT
|
||||
- User access add wizard list
|
||||
- Team access add wizard list
|
||||
|
||||
**Groups list**
|
||||
- Name - search is ?name=group_name
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=group_name
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of group lists include:
|
||||
- Group list
|
||||
|
||||
- Group list
|
||||
|
||||
**Hosts list**
|
||||
- Name - search is ?name=hostname
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=hostname
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of host lists include:
|
||||
- Host list
|
||||
|
||||
- Host list
|
||||
|
||||
**Notifications list**
|
||||
- Name - search is ?name=notification_template_name
|
||||
- ? Type (dropdown on right with different types) - search is ?type=slack
|
||||
- ? Created by (username) - search is ?created_by__username=admin
|
||||
- ? Modified by (username) - search is ?modified_by__username=admin
|
||||
|
||||
- Name - search is ?name=notification_template_name
|
||||
- ? Type (dropdown on right with different types) - search is ?type=slack
|
||||
- ? Created by (username) - search is ?created_by\_\_username=admin
|
||||
- ? Modified by (username) - search is ?modified_by\_\_username=admin
|
||||
|
||||
Instance of notification lists include:
|
||||
- Org notification list
|
||||
- JT notification list
|
||||
- Project notification list
|
||||
|
||||
- Org notification list
|
||||
- JT notification list
|
||||
- Project notification list
|
||||
|
||||
### TODO backlog
|
||||
- Change the right-hand input based on the type of key selected on the left-hand side. We will eventually want to support:
|
||||
|
||||
- Change the right-hand input based on the type of key selected on the left-hand side. We will eventually want to support:
|
||||
- lookup input (selection of particular resources, based on API list endpoints)
|
||||
- date picker input
|
||||
- Update the following lists to have the following keys:
|
||||
- Update all __name and __username related field search-based keys to be type-ahead lookup based searches
|
||||
- Update all **name and **username related field search-based keys to be type-ahead lookup based searches
|
||||
|
||||
## Code Details
|
||||
|
||||
@@ -230,13 +257,13 @@ The component looks like this:
|
||||
/>
|
||||
```
|
||||
|
||||
**qsConfig** is used to get namespace so that multiple lists can be on the page. When tags are modified they append namespace to query params. The qsConfig is also used to get "type" of fields in order to correctly parse values as int or date as it is translating.
|
||||
**qsConfig** is used to get namespace so that multiple lists can be on the page. When tags are modified they append namespace to query params. The qsConfig is also used to get "type" of fields in order to correctly parse values as int or date as it is translating.
|
||||
|
||||
**columns** are passed as an array, as defined in the screen where the list is located. You pass a bool `isDefault` to indicate that should be the key that shows up in the left-hand dropdown as default in the UI. If you don't pass any columns, a default of `isDefault=true` will be added to a name column, which is nearly universally shared throughout the models of awx.
|
||||
**columns** are passed as an array, as defined in the screen where the list is located. You pass a bool `isDefault` to indicate that should be the key that shows up in the left-hand dropdown as default in the UI. If you don't pass any columns, a default of `isDefault=true` will be added to a name column, which is nearly universally shared throughout the models of awx.
|
||||
|
||||
There is a type attribute that can be `'string'`, `'number'` or `'choice'` (and in the future, `'date'` and `'lookup'`), which will change the type of input on the right-hand side of the search bar. For a key that has a set number of choices, you will pass a choices attribute, which is an array in the format choices: [{label: 'Foo', value: 'foo'}]
|
||||
There is a type attribute that can be `'string'`, `'number'` or `'choice'` (and in the future, `'date'` and `'lookup'`), which will change the type of input on the right-hand side of the search bar. For a key that has a set number of choices, you will pass a choices attribute, which is an array in the format choices: [{label: 'Foo', value: 'foo'}]
|
||||
|
||||
**onSearch** calls the `mergeParams` qs util in order to add new tags to the queryset. mergeParams is used so that we can support duplicate keys (see mergeParams vs. replaceParams for more info).
|
||||
**onSearch** calls the `mergeParams` qs util in order to add new tags to the queryset. mergeParams is used so that we can support duplicate keys (see mergeParams vs. replaceParams for more info).
|
||||
|
||||
### ListHeader component
|
||||
|
||||
@@ -253,15 +280,16 @@ All of these functions act on the react-router history using the `pushHistorySta
|
||||
|
||||
**a note on sort_columns and search_columns**
|
||||
|
||||
We have split out column configuration into separate search and sort column array props--these are passed to the search and sort columns. Both accept an isDefault prop for one of the items in the array to be the default option selected when going to the page. Sort column items can pass an isNumeric boolean in order to chnage the iconography of the sort UI element. Search column items can pass type and if applicable choices, in order to configure the right-hand side of the search bar.
|
||||
We have split out column configuration into separate search and sort column array props--these are passed to the search and sort columns. Both accept an isDefault prop for one of the items in the array to be the default option selected when going to the page. Sort column items can pass an isNumeric boolean in order to chnage the iconography of the sort UI element. Search column items can pass type and if applicable choices, in order to configure the right-hand side of the search bar.
|
||||
|
||||
### FilterTags component
|
||||
|
||||
Similar to the way the list grabs data based on changes to the react-router params, the `FilterTags` component updates when new params are added. This component is a fairly straight-forward map (only slightly complex, because it needed to do a nested map over any values with duplicate keys that were represented by an inner-array). Both key and value are displayed for the tag.
|
||||
Similar to the way the list grabs data based on changes to the react-router params, the `FilterTags` component updates when new params are added. This component is a fairly straight-forward map (only slightly complex, because it needed to do a nested map over any values with duplicate keys that were represented by an inner-array). Both key and value are displayed for the tag.
|
||||
|
||||
### qs utility
|
||||
|
||||
The qs (queryset) utility is used to make the search speak the language of the REST API. The main functions of the utilities are to:
|
||||
The qs (queryset) utility is used to make the search speak the language of the REST API. The main functions of the utilities are to:
|
||||
|
||||
- add, replace and remove filters
|
||||
- translate filters as url params (for linking and maintaining state), in-memory representation (as JS objects), and params that Django REST Framework understands.
|
||||
|
||||
@@ -269,7 +297,7 @@ More info in the below sections:
|
||||
|
||||
#### Encoding for UI vs. API
|
||||
|
||||
For the UI url params, we want to only encode those params that aren't defaults, as the default behavior was defined through configuration and we don't need these in the url as a source of truth. For the API, we need to pass these params so that they are taken into account when the response is built.
|
||||
For the UI url params, we want to only encode those params that aren't defaults, as the default behavior was defined through configuration and we don't need these in the url as a source of truth. For the API, we need to pass these params so that they are taken into account when the response is built.
|
||||
|
||||
#### mergeParams vs. replaceParams
|
||||
|
||||
@@ -283,13 +311,13 @@ From a UX perspective, we wanted to be able to support searching on the same key
|
||||
}
|
||||
```
|
||||
|
||||
Concatenating terms in this way gives you the intersection of both terms (i.e. foo must be "bar" and "baz"). This is helpful for the most-common type of searching, substring (`__icontains`) searches. This will increase filtering, allowing the user to drill-down into the list as terms are added.
|
||||
Concatenating terms in this way gives you the intersection of both terms (i.e. foo must be "bar" and "baz"). This is helpful for the most-common type of searching, substring (`__icontains`) searches. This will increase filtering, allowing the user to drill-down into the list as terms are added.
|
||||
|
||||
**replaceParams** is used to support sorting, setting page_size, etc. These params only allow one choice, and we need to replace a particular key's value if one is passed.
|
||||
**replaceParams** is used to support sorting, setting page_size, etc. These params only allow one choice, and we need to replace a particular key's value if one is passed.
|
||||
|
||||
#### Working with REST API
|
||||
|
||||
The REST API is coupled with the qs util through the `paramsSerializer`, due to the fact we need axios to support the array for duplicate key values in the object representation of the params to pass to the get request. This is done where axios is configured in the Base.js file, so all requests and request types should support our array syntax for duplicate keys automatically.
|
||||
The REST API is coupled with the qs util through the `paramsSerializer`, due to the fact we need axios to support the array for duplicate key values in the object representation of the params to pass to the get request. This is done where axios is configured in the Base.js file, so all requests and request types should support our array syntax for duplicate keys automatically.
|
||||
|
||||
# Advanced Search - this section is a mess, update eventually
|
||||
|
||||
@@ -305,85 +333,84 @@ Current thinking is Advanced Search will be post-3.6, or at least late 3.6 after
|
||||
|
||||
That being said, we want to plan it out so we make sure the infrastructure of how we set up adding/removing tags, what shows up in the url bar, etc. all doesn't have to be redone.
|
||||
|
||||
Users will get to advanced search with a button to the right of search bar. When selected type-ahead key thing opens, left dropdown of search bar goes away, and x is given to get back to regular search (this is in the mockups)
|
||||
Users will get to advanced search with a button to the right of search bar. When selected type-ahead key thing opens, left dropdown of search bar goes away, and x is given to get back to regular search (this is in the mockups)
|
||||
|
||||
It is okay to only make this typing representation available initially (i.e. they start doing stuff with the type-ahead and the phases, no more typing in to make a query that way).
|
||||
|
||||
when you click through or type in the search bar for the various phases of crafting the query ("not", "related resource project", "related resource key name", "value foo") which might be represented in the top bar as a series of tags that can be added and removed before submitting the tag.
|
||||
|
||||
We will try to form options data from a static file. Because options data is static, we may be able to generate and store as a static file of some sort (that we can use for managing smart search). Alan had ideas around this. If we do this it will mean we don't have to make a ton of requests as we craft smart search filters. It sounds like the cli may start using something similar.
|
||||
We will try to form options data from a static file. Because options data is static, we may be able to generate and store as a static file of some sort (that we can use for managing smart search). Alan had ideas around this. If we do this it will mean we don't have to make a ton of requests as we craft smart search filters. It sounds like the cli may start using something similar.
|
||||
|
||||
## Smart search flow
|
||||
|
||||
Smart search will be able to craft the tag through various states. Note that the phases don't necessarily need to be completed in sequential order.
|
||||
Smart search will be able to craft the tag through various states. Note that the phases don't necessarily need to be completed in sequential order.
|
||||
|
||||
PHASE 1: prefix operators
|
||||
PHASE 1: prefix operators
|
||||
|
||||
**TODO: Double check there's no reason we need to include or__ and chain__ and can just do not__**
|
||||
**TODO: Double check there's no reason we need to include or** and chain** and can just do not\_\_**
|
||||
|
||||
- not__
|
||||
- or__
|
||||
- chain__
|
||||
- not\_\_
|
||||
- or\_\_
|
||||
- chain\_\_
|
||||
|
||||
how these work:
|
||||
how these work:
|
||||
|
||||
To exclude results matching certain criteria, prefix the field parameter with not__:
|
||||
To exclude results matching certain criteria, prefix the field parameter with not\_\_:
|
||||
|
||||
?not__field=value
|
||||
By default, all query string filters are AND'ed together, so only the results matching all filters will be returned. To combine results matching any one of multiple criteria, prefix each query string parameter with or__:
|
||||
?not**field=value
|
||||
By default, all query string filters are AND'ed together, so only the results matching all filters will be returned. To combine results matching any one of multiple criteria, prefix each query string parameter with or**:
|
||||
|
||||
?or__field=value&or__field=othervalue
|
||||
?or__not__field=value&or__field=othervalue
|
||||
(Added in Ansible Tower 1.4.5) The default AND filtering applies all filters simultaneously to each related object being filtered across database relationships. The chain filter instead applies filters separately for each related object. To use, prefix the query string parameter with chain__:
|
||||
?or**field=value&or**field=othervalue
|
||||
?or**not**field=value&or**field=othervalue
|
||||
(Added in Ansible Controller 1.4.5) The default AND filtering applies all filters simultaneously to each related object being filtered across database relationships. The chain filter instead applies filters separately for each related object. To use, prefix the query string parameter with chain**:
|
||||
|
||||
?chain__related__field=value&chain__related__field2=othervalue
|
||||
?chain__not__related__field=value&chain__related__field2=othervalue
|
||||
If the first query above were written as ?related__field=value&related__field2=othervalue, it would return only the primary objects where the same related object satisfied both conditions. As written using the chain filter, it would return the intersection of primary objects matching each condition.
|
||||
?chain**related**field=value&chain**related**field2=othervalue
|
||||
?chain**not**related**field=value&chain**related**field2=othervalue
|
||||
If the first query above were written as ?related**field=value&related\_\_field2=othervalue, it would return only the primary objects where the same related object satisfied both conditions. As written using the chain filter, it would return the intersection of primary objects matching each condition.
|
||||
|
||||
PHASE 2: related fields, given by array, where __search is appended to them, i.e.
|
||||
PHASE 2: related fields, given by array, where \_\_search is appended to them, i.e.
|
||||
|
||||
```
|
||||
"related_search_fields": [
|
||||
"credentials__search",
|
||||
"labels__search",
|
||||
"created_by__search",
|
||||
"modified_by__search",
|
||||
"notification_templates__search",
|
||||
"custom_inventory_scripts__search",
|
||||
"notification_templates_error__search",
|
||||
"notification_templates_success__search",
|
||||
"notification_templates_any__search",
|
||||
"teams__search",
|
||||
"projects__search",
|
||||
"inventories__search",
|
||||
"applications__search",
|
||||
"workflows__search",
|
||||
"instance_groups__search"
|
||||
],
|
||||
```
|
||||
```
|
||||
"related_search_fields": [
|
||||
"credentials__search",
|
||||
"labels__search",
|
||||
"created_by__search",
|
||||
"modified_by__search",
|
||||
"notification_templates__search",
|
||||
"custom_inventory_scripts__search",
|
||||
"notification_templates_error__search",
|
||||
"notification_templates_success__search",
|
||||
"notification_templates_any__search",
|
||||
"teams__search",
|
||||
"projects__search",
|
||||
"inventories__search",
|
||||
"applications__search",
|
||||
"workflows__search",
|
||||
"instance_groups__search"
|
||||
],
|
||||
```
|
||||
|
||||
PHASE 3: keys, give by object key names for data.actions.GET
|
||||
- type is given for each key which we could use to help craft the value
|
||||
PHASE 3: keys, give by object key names for data.actions.GET - type is given for each key which we could use to help craft the value
|
||||
|
||||
PHASE 4: after key postfix operators can be
|
||||
PHASE 4: after key postfix operators can be
|
||||
|
||||
**TODO: will need to figure out which ones we support**
|
||||
|
||||
- exact: Exact match (default lookup if not specified).
|
||||
- iexact: Case-insensitive version of exact.
|
||||
- contains: Field contains value.
|
||||
- icontains: Case-insensitive version of contains.
|
||||
- startswith: Field starts with value.
|
||||
- istartswith: Case-insensitive version of startswith.
|
||||
- endswith: Field ends with value.
|
||||
- iendswith: Case-insensitive version of endswith.
|
||||
- regex: Field matches the given regular expression.
|
||||
- iregex: Case-insensitive version of regex.
|
||||
- gt: Greater than comparison.
|
||||
- gte: Greater than or equal to comparison.
|
||||
- lt: Less than comparison.
|
||||
- lte: Less than or equal to comparison.
|
||||
- isnull: Check whether the given field or related object is null; expects a boolean value.
|
||||
- in: Check whether the given field's value is present in the list provided; expects a list of items.
|
||||
- exact: Exact match (default lookup if not specified).
|
||||
- iexact: Case-insensitive version of exact.
|
||||
- contains: Field contains value.
|
||||
- icontains: Case-insensitive version of contains.
|
||||
- startswith: Field starts with value.
|
||||
- istartswith: Case-insensitive version of startswith.
|
||||
- endswith: Field ends with value.
|
||||
- iendswith: Case-insensitive version of endswith.
|
||||
- regex: Field matches the given regular expression.
|
||||
- iregex: Case-insensitive version of regex.
|
||||
- gt: Greater than comparison.
|
||||
- gte: Greater than or equal to comparison.
|
||||
- lt: Less than comparison.
|
||||
- lte: Less than or equal to comparison.
|
||||
- isnull: Check whether the given field or related object is null; expects a boolean value.
|
||||
- in: Check whether the given field's value is present in the list provided; expects a list of items.
|
||||
|
||||
PHASE 5: The value. Based on options, we can give hints or validation based on type of value (like number fields don't accept "foo" or whatever)
|
||||
PHASE 5: The value. Based on options, we can give hints or validation based on type of value (like number fields don't accept "foo" or whatever)
|
||||
|
||||
@@ -2,15 +2,39 @@ This document is meant to provide some guidance into the functionality of Job Ou
|
||||
|
||||
## Overview of the feature/screen. Summary of what it does/is
|
||||
|
||||
1. Elapsed time / unfollow button
|
||||
2. Page up and page down buttons
|
||||
3. Unique qualities of the different job types.
|
||||
Joboutput is a feature that allows users to see how their job is doing as it is being run.
|
||||
This feature displays data sent to the UI via websockets that are connected to several
|
||||
different endpoints in the API.
|
||||
|
||||
- Some don’t allow search by event data and thus Event is not an option in the drop down
|
||||
- Some don’t have expand, collapse
|
||||
The job output has 2 different states that result in different functionality. One state
|
||||
is when, the job is actively running. There is limited functionality because of how the
|
||||
job events are processed when they reach the UI. While the job is running, and
|
||||
output is coming into the UI, the following features turn off:
|
||||
|
||||
4. Differences in the output from when a job is running and when a job is complete.
|
||||
5. Which features are enabled when it’s running and which aren’t.
|
||||
1. [Search](#Search)- The ability to search the output of a job.
|
||||
2. [Expand/Collapse](#Expand/Collapse)- The ability to expand and collapse job events, tasks, plays, or even the
|
||||
job itself. The only part of the job ouput that is not collapsable is the playbook summary (only jobs that
|
||||
are executed from a Job Template have Expand/Collapse functionality).
|
||||
|
||||
The following features are enabled:
|
||||
|
||||
1. Follow/unfollow - `Follow` indicates you are streaming the output on the screen
|
||||
as it comes into the UI. If you see some output that you want to examine closer while the job is running
|
||||
scroll to it, and click `Unfollow`, and the output will stop streaming onto the screen. This feature is only
|
||||
enabled when the job is running and is not complete. If the user scrolls up in the output the UI will unfollow.
|
||||
2. Page up and page down buttons- Use these buttons to navigate quickly up and down the output.
|
||||
|
||||

|
||||
|
||||
After the job is complete, the Follow/Unfollow button disabled, and Expand/Collapse and Search become enabled.
|
||||

|
||||
|
||||
Not all job types are created equal. Some jobs have a concept of parent-child events. Job events can be inside a Task,
|
||||
a Task can be inside a Play, and a Play inside a Playbook. Leveraging this concept to enable Expand/Collapse for these
|
||||
job types, allows you to collapse and hide the children of a particular line of output. This parent-child event
|
||||
relationship only exists on jobs executed from a job template. All other types of jobs do not
|
||||
have this event concept, and therefore, do not have Expand/Collapse functionality. By default all job
|
||||
events are expanded.
|
||||
|
||||
## How output works generally.
|
||||
|
||||
@@ -26,11 +50,13 @@ This document is meant to provide some guidance into the functionality of Job Ou
|
||||
## Non-standard cases
|
||||
|
||||
1. When an event comes into the output that has a parent, but the parent hasn’t arrived yet.
|
||||
2. When an event that has children arrives in output, but the children are not present yet
|
||||
2. When an event with children arrives in output, but the children are not yet present.
|
||||
|
||||
## Expand collapse a single event- how it works and how it changes the state object
|
||||
## Expand/Collapse
|
||||
|
||||
## Expand collapse all- how it works and how it changes the state object
|
||||
### Expand collapse a single event - how it works and how it changes the state object
|
||||
|
||||
### Expand collapse all - how it works and how it changes the state object
|
||||
|
||||
## Search
|
||||
|
||||
|
||||
BIN
awx/ui/docs/images/JobOutput-complete.png
Normal file
BIN
awx/ui/docs/images/JobOutput-complete.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
BIN
awx/ui/docs/images/JobOutput-running.png
Normal file
BIN
awx/ui/docs/images/JobOutput-running.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 26 KiB |
1404
awx/ui/package-lock.json
generated
1404
awx/ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -6,32 +6,32 @@
|
||||
"node": ">=16.13.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@lingui/react": "3.9.0",
|
||||
"@patternfly/patternfly": "4.183.1",
|
||||
"@patternfly/react-core": "4.198.19",
|
||||
"@patternfly/react-icons": "4.49.19",
|
||||
"@patternfly/react-table": "4.67.19",
|
||||
"ace-builds": "^1.4.12",
|
||||
"@lingui/react": "3.14.0",
|
||||
"@patternfly/patternfly": "4.202.1",
|
||||
"@patternfly/react-core": "^4.221.3",
|
||||
"@patternfly/react-icons": "4.75.1",
|
||||
"@patternfly/react-table": "4.93.1",
|
||||
"ace-builds": "^1.6.0",
|
||||
"ansi-to-html": "0.7.2",
|
||||
"axios": "0.22.0",
|
||||
"codemirror": "^5.47.0",
|
||||
"d3": "7.1.1",
|
||||
"axios": "0.27.2",
|
||||
"codemirror": "^6.0.1",
|
||||
"d3": "7.4.4",
|
||||
"dagre": "^0.8.4",
|
||||
"dompurify": "2.3.8",
|
||||
"formik": "2.2.9",
|
||||
"has-ansi": "5.0.1",
|
||||
"html-entities": "2.3.2",
|
||||
"js-yaml": "^3.13.1",
|
||||
"luxon": "^2.0.1",
|
||||
"js-yaml": "4.1.0",
|
||||
"luxon": "^2.4.0",
|
||||
"prop-types": "^15.6.2",
|
||||
"react": "17.0.2",
|
||||
"react-ace": "^9.3.0",
|
||||
"react-ace": "^10.1.0",
|
||||
"react-dom": "17.0.2",
|
||||
"react-error-boundary": "^3.1.3",
|
||||
"react-router-dom": "^5.1.2",
|
||||
"react-error-boundary": "^3.1.4",
|
||||
"react-router-dom": "^5.3.3",
|
||||
"react-virtualized": "^9.21.1",
|
||||
"rrule": "2.6.4",
|
||||
"sanitize-html": "2.4.0",
|
||||
"styled-components": "5.3.0"
|
||||
"rrule": "2.7.0",
|
||||
"styled-components": "5.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.16.10",
|
||||
@@ -46,7 +46,7 @@
|
||||
"@lingui/macro": "^3.7.1",
|
||||
"@nteract/mockument": "^1.0.4",
|
||||
"@testing-library/jest-dom": "^5.16.2",
|
||||
"@testing-library/react": "^12.1.4",
|
||||
"@testing-library/react": "^12.1.5",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "0.6.5",
|
||||
"babel-plugin-macros": "3.1.0",
|
||||
"enzyme": "^3.10.0",
|
||||
@@ -56,14 +56,14 @@
|
||||
"eslint-config-airbnb": "19.0.4",
|
||||
"eslint-config-prettier": "8.3.0",
|
||||
"eslint-import-resolver-webpack": "0.13.2",
|
||||
"eslint-plugin-i18next": "5.1.2",
|
||||
"eslint-plugin-i18next": "5.2.1",
|
||||
"eslint-plugin-import": "2.25.4",
|
||||
"eslint-plugin-jsx-a11y": "6.5.1",
|
||||
"eslint-plugin-react": "7.28.0",
|
||||
"eslint-plugin-react-hooks": "4.3.0",
|
||||
"http-proxy-middleware": "^1.0.3",
|
||||
"jest-websocket-mock": "^2.0.2",
|
||||
"mock-socket": "^9.0.3",
|
||||
"mock-socket": "^9.1.3",
|
||||
"prettier": "2.3.2",
|
||||
"react-scripts": "5.0.0"
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user