mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 04:28:23 -03:30
Compare commits
505 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c09cad3e6d | ||
|
|
ce20c8e77b | ||
|
|
73bb475503 | ||
|
|
6df5c0331a | ||
|
|
dc7bd73431 | ||
|
|
017bb63023 | ||
|
|
5d4fc9613d | ||
|
|
f126a6343b | ||
|
|
40f5ff362c | ||
|
|
1ed170fff0 | ||
|
|
d5deedc822 | ||
|
|
9992bf03b0 | ||
|
|
04839a037a | ||
|
|
f541fe9904 | ||
|
|
162ea776fd | ||
|
|
f1273d5810 | ||
|
|
390e1f9a0a | ||
|
|
8dc788dbcb | ||
|
|
397908543d | ||
|
|
3be29d54ad | ||
|
|
03fb12d4c2 | ||
|
|
69388edaf9 | ||
|
|
1e750cfed9 | ||
|
|
1ba51c0357 | ||
|
|
2fa27000ab | ||
|
|
057bd6e625 | ||
|
|
d0b7d970c4 | ||
|
|
5ffffebe34 | ||
|
|
b98544264b | ||
|
|
0d7ef709bf | ||
|
|
1211faf8df | ||
|
|
dc327ceaeb | ||
|
|
0f25720634 | ||
|
|
28a62ea774 | ||
|
|
d3c5397721 | ||
|
|
f06490a5f8 | ||
|
|
9fddf7c5cf | ||
|
|
673f722b71 | ||
|
|
4de477686e | ||
|
|
4cc734ce6e | ||
|
|
f08bf4766d | ||
|
|
77b0b9a4e3 | ||
|
|
487d78cc72 | ||
|
|
f0a6567cd8 | ||
|
|
8246d4a298 | ||
|
|
cd83030668 | ||
|
|
88d492371b | ||
|
|
9316ace3f6 | ||
|
|
55b5060944 | ||
|
|
0be68fe84f | ||
|
|
5da02690c1 | ||
|
|
c417c5e219 | ||
|
|
02cccbe608 | ||
|
|
2afa406b7f | ||
|
|
1831b2591a | ||
|
|
3cdd35f2cf | ||
|
|
1422bb2043 | ||
|
|
695787be4e | ||
|
|
e1b8f30d8f | ||
|
|
293924168f | ||
|
|
7c72be7025 | ||
|
|
4a85983eb7 | ||
|
|
82c510e51e | ||
|
|
0508a9267c | ||
|
|
11e416995a | ||
|
|
bb3fc3caa8 | ||
|
|
ab40006535 | ||
|
|
53fe08af61 | ||
|
|
a8083296e6 | ||
|
|
95e796a88a | ||
|
|
0c0028541d | ||
|
|
32cc7e976a | ||
|
|
3495f75fa0 | ||
|
|
493e6cc527 | ||
|
|
7510b243bb | ||
|
|
e06ebb1f11 | ||
|
|
cc616206b3 | ||
|
|
06b04007a0 | ||
|
|
6db4732bf3 | ||
|
|
ead7907173 | ||
|
|
68f0ae612e | ||
|
|
8c1bc97c2f | ||
|
|
b64c2d6861 | ||
|
|
645f7f6dac | ||
|
|
0a5e9da287 | ||
|
|
a60abe38a8 | ||
|
|
51abbb9464 | ||
|
|
560b4ebf71 | ||
|
|
5a7a1b8f20 | ||
|
|
f1ca272394 | ||
|
|
8697387d13 | ||
|
|
1e68519c99 | ||
|
|
0947d30682 | ||
|
|
c993c8b3b9 | ||
|
|
2ce09d0dcc | ||
|
|
25bca532c2 | ||
|
|
fb897891c9 | ||
|
|
421d8f215c | ||
|
|
3db92ca668 | ||
|
|
c6fa85036e | ||
|
|
0b4a296181 | ||
|
|
1665acd58a | ||
|
|
75a27c38c2 | ||
|
|
7b9bcd0481 | ||
|
|
15444bef70 | ||
|
|
f4bc69d5f7 | ||
|
|
29d57ea403 | ||
|
|
ea5bd45d03 | ||
|
|
165a529ae0 | ||
|
|
1c31a56395 | ||
|
|
0c120782d4 | ||
|
|
6ba053d4ee | ||
|
|
3ecd26b5d8 | ||
|
|
bdf753ce23 | ||
|
|
0f54516b38 | ||
|
|
a56e32b59b | ||
|
|
026d5e6bdb | ||
|
|
03e73156ea | ||
|
|
0b6208047c | ||
|
|
f14129de9b | ||
|
|
6c1ba03235 | ||
|
|
85bb4e976f | ||
|
|
d1d3711fee | ||
|
|
d671366cad | ||
|
|
f9043864ce | ||
|
|
a1ded8db3f | ||
|
|
cf6b6d831f | ||
|
|
736cd4df36 | ||
|
|
8d99f79de4 | ||
|
|
1c70773cc2 | ||
|
|
c76a7d638f | ||
|
|
7a455d08d7 | ||
|
|
2c5bcf268d | ||
|
|
32cee852f0 | ||
|
|
8c1cd9ee71 | ||
|
|
da951714d1 | ||
|
|
2309feb6bd | ||
|
|
4d2c64ebb4 | ||
|
|
04f6fe6cd2 | ||
|
|
372baa12a5 | ||
|
|
21e6e5701e | ||
|
|
276a18b339 | ||
|
|
666acb9756 | ||
|
|
0eff3e6bac | ||
|
|
9af16c18c9 | ||
|
|
f0bcfc6024 | ||
|
|
d946103961 | ||
|
|
52d6f36b7c | ||
|
|
b10eb6f4c2 | ||
|
|
21aa1fc11f | ||
|
|
d4a3143b0e | ||
|
|
4478052b71 | ||
|
|
b92c5076a2 | ||
|
|
a3383716ab | ||
|
|
61846e88ca | ||
|
|
93e90228a2 | ||
|
|
395af1b5e4 | ||
|
|
5d7bdb3cbc | ||
|
|
95d40f037d | ||
|
|
ef67f9c65d | ||
|
|
8a4421dc0c | ||
|
|
2900bbbbd8 | ||
|
|
a6c09daf1e | ||
|
|
92c6ddf13c | ||
|
|
c6ac2f56dc | ||
|
|
7a130a0616 | ||
|
|
188f0417d2 | ||
|
|
d38401fd18 | ||
|
|
dadf8940cc | ||
|
|
6a36f802c2 | ||
|
|
5509686f5b | ||
|
|
1ee241199e | ||
|
|
67d828cf80 | ||
|
|
86c91509b3 | ||
|
|
bfd1abd79c | ||
|
|
c169cf6d58 | ||
|
|
9679c154f3 | ||
|
|
a317b6bede | ||
|
|
f73de11acc | ||
|
|
6f16d64929 | ||
|
|
8a6656aa90 | ||
|
|
95ba6de172 | ||
|
|
63aacd4e38 | ||
|
|
b39b80b036 | ||
|
|
214c27a5cf | ||
|
|
ae83032ff3 | ||
|
|
2e0dd61bb6 | ||
|
|
25aae9abc6 | ||
|
|
8b20d770a2 | ||
|
|
6726b72fe2 | ||
|
|
e610b77f8d | ||
|
|
2490929fd5 | ||
|
|
29ec0dc82a | ||
|
|
301d7a02c2 | ||
|
|
81e6ead99c | ||
|
|
158fe23d7c | ||
|
|
b3705357ba | ||
|
|
f460f70513 | ||
|
|
b6ced2b8dc | ||
|
|
be18803250 | ||
|
|
f26d975005 | ||
|
|
a7a17a2063 | ||
|
|
9e657059f3 | ||
|
|
486bcd80f8 | ||
|
|
e4f21ec294 | ||
|
|
3cb3819be9 | ||
|
|
122b36dcc5 | ||
|
|
2d1a859719 | ||
|
|
f882ac420d | ||
|
|
373cd9c20b | ||
|
|
673afdf1b5 | ||
|
|
7a16782ebf | ||
|
|
8ede74a7f6 | ||
|
|
19da9955ce | ||
|
|
e6e1f97048 | ||
|
|
0a63c2d4a0 | ||
|
|
493109782d | ||
|
|
0195bab931 | ||
|
|
e570810bdb | ||
|
|
9f1e8a1ae2 | ||
|
|
9a0c159943 | ||
|
|
9aa56b1247 | ||
|
|
d4d21a1511 | ||
|
|
27a2c842ac | ||
|
|
87dcc49429 | ||
|
|
805ca53765 | ||
|
|
070034047c | ||
|
|
53c50947d1 | ||
|
|
108a6e11f4 | ||
|
|
a4bc306b96 | ||
|
|
9106c3f813 | ||
|
|
3340ef9c91 | ||
|
|
fc171deb79 | ||
|
|
f2dac36dd1 | ||
|
|
fb1a5c0db5 | ||
|
|
d6a06c40f1 | ||
|
|
a6383e7f79 | ||
|
|
3063073395 | ||
|
|
9eda6359f0 | ||
|
|
1deaf55ba4 | ||
|
|
44c50bbbf7 | ||
|
|
c9e7747f2d | ||
|
|
199b4b6b47 | ||
|
|
f06485feca | ||
|
|
4bd910493a | ||
|
|
cd100fd770 | ||
|
|
157adb828e | ||
|
|
b26e33ca34 | ||
|
|
27a1254883 | ||
|
|
535bbfcc39 | ||
|
|
d2d511f596 | ||
|
|
25ca8d22d6 | ||
|
|
378a0711c2 | ||
|
|
8fd9225629 | ||
|
|
ee8c1638c5 | ||
|
|
4add72b9d2 | ||
|
|
54dd24b96b | ||
|
|
7d06fc74dd | ||
|
|
1a2e56c785 | ||
|
|
a7b29f6112 | ||
|
|
39b26c8f0e | ||
|
|
1ade9b3a7d | ||
|
|
82c5803e59 | ||
|
|
9f4172ce7b | ||
|
|
ef56571772 | ||
|
|
6911a59f39 | ||
|
|
7a63785255 | ||
|
|
a695274cb6 | ||
|
|
44fed1d7c1 | ||
|
|
be48d3eefd | ||
|
|
3cc6a4cf44 | ||
|
|
ddf4fbc4ce | ||
|
|
f0e7f2dbcd | ||
|
|
579d49033a | ||
|
|
210d5084f0 | ||
|
|
53e8a9e709 | ||
|
|
15effd7ade | ||
|
|
b919befc90 | ||
|
|
faded278e3 | ||
|
|
768ac01f58 | ||
|
|
4052603238 | ||
|
|
b306c6f258 | ||
|
|
4b6b8f2bdd | ||
|
|
70420dc3e4 | ||
|
|
50ca2d47ce | ||
|
|
faa0a6cf9a | ||
|
|
01228cea02 | ||
|
|
cbb461ab71 | ||
|
|
b551608f16 | ||
|
|
7a25f22078 | ||
|
|
b43d8e2c7f | ||
|
|
2ad84b60b3 | ||
|
|
d1981fcb4a | ||
|
|
df8ce801cf | ||
|
|
50e6348bef | ||
|
|
5ab449c90f | ||
|
|
66a9ffc376 | ||
|
|
16c6e2d716 | ||
|
|
ac5b53b13c | ||
|
|
6d433cc42a | ||
|
|
7c442f3f50 | ||
|
|
d79f73ab7a | ||
|
|
b26eaa3bd2 | ||
|
|
de46fb409e | ||
|
|
8078daa733 | ||
|
|
e4931bde6c | ||
|
|
e2b0a4f7a7 | ||
|
|
5da6b02801 | ||
|
|
326184da0f | ||
|
|
dad5533816 | ||
|
|
ca07946c24 | ||
|
|
2ce276455a | ||
|
|
4db6b8c1fe | ||
|
|
03209fe2f2 | ||
|
|
7832639c25 | ||
|
|
121db42699 | ||
|
|
ddb6c775b1 | ||
|
|
7467779ea9 | ||
|
|
66f140bb70 | ||
|
|
151f9e79ed | ||
|
|
f83343592b | ||
|
|
12504c9bc3 | ||
|
|
023cc68ba2 | ||
|
|
ec8ac6f1a7 | ||
|
|
82c4f6bb88 | ||
|
|
5beb68f527 | ||
|
|
2eb1e4bbe3 | ||
|
|
d3b20e6585 | ||
|
|
310cc2fd03 | ||
|
|
1fd6ba0bfc | ||
|
|
b64f966db1 | ||
|
|
891eeb22a5 | ||
|
|
0f6e221c14 | ||
|
|
239f20ede5 | ||
|
|
ffbbcd2bf6 | ||
|
|
b648957c8e | ||
|
|
31fe500921 | ||
|
|
2131703ca0 | ||
|
|
c429563126 | ||
|
|
1a1d66d2a2 | ||
|
|
30871bd6cf | ||
|
|
321135da3d | ||
|
|
2a23b4c719 | ||
|
|
f7d2f7a5e6 | ||
|
|
e371de38ed | ||
|
|
84af610a1f | ||
|
|
ef9f9129ba | ||
|
|
7b188aafea | ||
|
|
6ce227a6b6 | ||
|
|
1c97b9a046 | ||
|
|
137111351c | ||
|
|
c5a1e4c704 | ||
|
|
4f058245e4 | ||
|
|
ecdf6cccf8 | ||
|
|
4d7edbbad0 | ||
|
|
0f9f3f58e2 | ||
|
|
34c4967d27 | ||
|
|
6123b8e148 | ||
|
|
b86d365dde | ||
|
|
4efbd45b3c | ||
|
|
fb97687d14 | ||
|
|
0f53d9b911 | ||
|
|
5a785798b0 | ||
|
|
14168297bd | ||
|
|
7e1814e234 | ||
|
|
bdf11aa962 | ||
|
|
46807205f8 | ||
|
|
d749c172eb | ||
|
|
81db8091ea | ||
|
|
5c1a33382c | ||
|
|
db6f565dca | ||
|
|
6b4effc85a | ||
|
|
74a0c5bac5 | ||
|
|
661cf0afb3 | ||
|
|
fbb74a9896 | ||
|
|
200901e53b | ||
|
|
0eddd5ce7f | ||
|
|
a7cabec3d0 | ||
|
|
b98b3ced1c | ||
|
|
8501a45531 | ||
|
|
14b610dabf | ||
|
|
a1d1e70e43 | ||
|
|
0fa0a517ac | ||
|
|
28f9c0be0b | ||
|
|
373edbf8c0 | ||
|
|
b19bcdd882 | ||
|
|
08b96a0bd7 | ||
|
|
1e45e2ab9b | ||
|
|
2a58605727 | ||
|
|
c7ab3ea86e | ||
|
|
67046513ae | ||
|
|
f9b439ae82 | ||
|
|
80b08d17e3 | ||
|
|
f642c520bd | ||
|
|
221ddeb915 | ||
|
|
d90d0fb503 | ||
|
|
2c529f50af | ||
|
|
acfa1c4d1d | ||
|
|
ea2afeec1f | ||
|
|
a5cfc3036f | ||
|
|
ec484f81cf | ||
|
|
2ffa22e38f | ||
|
|
8fb313638c | ||
|
|
0eb1984b22 | ||
|
|
f259b0a71b | ||
|
|
82df3ebddb | ||
|
|
c87d7b0d79 | ||
|
|
612e91263c | ||
|
|
445042c0f4 | ||
|
|
0c289205de | ||
|
|
ba45592d93 | ||
|
|
7e0f2b0f08 | ||
|
|
fb30528197 | ||
|
|
48f1910075 | ||
|
|
0cb2d79889 | ||
|
|
1af1a5e9da | ||
|
|
c0d38e91f5 | ||
|
|
2f737f644f | ||
|
|
0574baf7f7 | ||
|
|
f70473dc0b | ||
|
|
de0b25862b | ||
|
|
6ff15a928a | ||
|
|
d10d1963c1 | ||
|
|
c6acca08d5 | ||
|
|
9946959599 | ||
|
|
9ce171d349 | ||
|
|
12f2975809 | ||
|
|
945125454b | ||
|
|
ea67c70437 | ||
|
|
f2f2483708 | ||
|
|
8c61a49e01 | ||
|
|
88ff68295b | ||
|
|
d93f62c030 | ||
|
|
ae5b11a2a9 | ||
|
|
baade775ab | ||
|
|
bd2da80cea | ||
|
|
550ab82f63 | ||
|
|
cece7ff741 | ||
|
|
c0b812c47a | ||
|
|
b256e5b79d | ||
|
|
2ed3038a5c | ||
|
|
779ca8b260 | ||
|
|
137fedfc9b | ||
|
|
256a47618f | ||
|
|
dfaa69be51 | ||
|
|
c34fa30ea7 | ||
|
|
3e5ee9d57a | ||
|
|
c7dd3996df | ||
|
|
e342919735 | ||
|
|
d0991bab9e | ||
|
|
efcbea1fc5 | ||
|
|
1537b84ec8 | ||
|
|
30b7535ca2 | ||
|
|
fe02c0b157 | ||
|
|
177901eca6 | ||
|
|
c2c93e7a66 | ||
|
|
00e60d2698 | ||
|
|
ec1408fbd1 | ||
|
|
23c3e62211 | ||
|
|
383c2bba58 | ||
|
|
8adb53b5a8 | ||
|
|
92401e5328 | ||
|
|
d360fb212e | ||
|
|
4c1b0297e7 | ||
|
|
a0b14b994d | ||
|
|
168c022d3e | ||
|
|
5adfacba64 | ||
|
|
52eeace20f | ||
|
|
21ff1d714d | ||
|
|
eeca5512be | ||
|
|
143a4a61b3 | ||
|
|
fef24355ab | ||
|
|
d70dfec6b6 | ||
|
|
49eccfb19f | ||
|
|
223c5bdaf6 | ||
|
|
e740cfcb52 | ||
|
|
65b03174ea | ||
|
|
134d84ded9 | ||
|
|
5707d65d0f | ||
|
|
9956538224 | ||
|
|
7818b2008f | ||
|
|
043aff6a8c | ||
|
|
7725c6f18f | ||
|
|
16a3f7c2df | ||
|
|
92f567539f | ||
|
|
951f6d4636 | ||
|
|
811fa514d2 | ||
|
|
a097602d7f | ||
|
|
e6cfd726c6 | ||
|
|
d3cc439fa8 | ||
|
|
50de068a02 | ||
|
|
5d838b8980 | ||
|
|
4ec7ba0107 | ||
|
|
fa7a459e50 | ||
|
|
b74990c480 | ||
|
|
6d052fdab4 | ||
|
|
51538b7688 | ||
|
|
8ac3cc1542 | ||
|
|
908263df50 | ||
|
|
7f6e022852 | ||
|
|
d324c12348 | ||
|
|
fd5e22a3f6 | ||
|
|
25903431bc | ||
|
|
f8374def64 | ||
|
|
9bbaa6993f |
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -5,7 +5,7 @@ about: Create a report to help us improve
|
||||
---
|
||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||
|
||||
- http://webchat.freenode.net/?channels=ansible-awx
|
||||
- http://web.libera.chat/?channels=#ansible-awx
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -5,7 +5,7 @@ about: Suggest an idea for this project
|
||||
---
|
||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||
|
||||
- http://webchat.freenode.net/?channels=ansible-awx
|
||||
- http://web.libera.chat/?channels=#ansible-awx
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||
|
||||
25
CHANGELOG.md
25
CHANGELOG.md
@@ -1,6 +1,29 @@
|
||||
# Changelog
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
# 19.2.2 (June 28, 2021)
|
||||
|
||||
- Fixed bug where symlinks pointing to directories were not preserved (https://github.com/ansible/ansible-runner/pull/736)
|
||||
- Various bugfixes found during testing (https://github.com/ansible/awx/pull/10532)
|
||||
|
||||
# 19.2.1 (June 17, 2021)
|
||||
|
||||
- There are now 2 default Instance Groups: 'controlplane' and 'default' (https://github.com/ansible/awx/pull/10324)
|
||||
- Removed deprecated modules: `tower_send`, `tower_receive`, `tower_workflow_template` (https://github.com/ansible/awx/pull/9980)
|
||||
- Improved UI performance when a large amount of events are being emitted by jobs (https://github.com/ansible/awx/pull/10053)
|
||||
- Settings UI Revert All button now issues a DELETE instead of PATCHing all fields (https://github.com/ansible/awx/pull/10376)
|
||||
- Fixed a bug with the schedule date/time picker in Firefox (https://github.com/ansible/awx/pull/10291)
|
||||
- UI now preselects the system default Galaxy credential when creating a new organization (https://github.com/ansible/awx/pull/10395)
|
||||
- Added favicon (https://github.com/ansible/awx/pull/10388)
|
||||
- Removed `not` option from smart inventory host filter search as it's not supported by the API (https://github.com/ansible/awx/pull/10380)
|
||||
- Added button to allow user to refetch project revision after project sync has finished (https://github.com/ansible/awx/pull/10334)
|
||||
- Fixed bug where extraneous CONFIG requests were made on logout (https://github.com/ansible/awx/pull/10379)
|
||||
- Fixed bug where users were unable to cancel inventory syncs (https://github.com/ansible/awx/pull/10346)
|
||||
- Added missing dashboard graph filters (https://github.com/ansible/awx/pull/10349)
|
||||
- Added support for typing in to single select lookup form fields (https://github.com/ansible/awx/pull/10257)
|
||||
- Fixed various bugs related to user sessions (https://github.com/ansible/awx/pull/9908)
|
||||
- Fixed bug where sorting in modals would close the modal (https://github.com/ansible/awx/pull/10215)
|
||||
- Added support for Red Hat Insights as an inventory source (https://github.com/ansible/awx/pull/8650)
|
||||
- Fixed bugs when selecting items in a list then sorting/paginating (https://github.com/ansible/awx/pull/10329)
|
||||
|
||||
# 19.2.0 (June 1, 2021)
|
||||
- Fixed race condition that would sometimes cause jobs to error out at the very end of an otherwise successful run (https://github.com/ansible/receptor/pull/328)
|
||||
|
||||
18
Makefile
18
Makefile
@@ -173,7 +173,7 @@ init:
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename=controlplane --instance_percent=100;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
@@ -288,6 +288,11 @@ swagger: reports
|
||||
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
@@ -315,7 +320,7 @@ test_collection:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi && \
|
||||
pip install ansible && \
|
||||
pip install ansible-core && \
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
@@ -551,10 +556,13 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# generate UI .pot
|
||||
# generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||
|
||||
# generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||
|
||||
# generate API django .pot .po
|
||||
LANG = "en-us"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](irc.libera.chat - #ansible-awx)
|
||||
[](https://libera.chat)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -20,7 +20,7 @@ Contributing
|
||||
- All code submissions are made through pull requests against the `devel` branch.
|
||||
- All contributors must use git commit --signoff for any commit to be merged and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md)
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs. `git merge` for this reason.
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on webchat.freenode.net and talk about what you would like to do or add first. This not only helps everyone know what's going on, but it also helps save time and effort if the community decides some changes are needed.
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on web.libera.chat and talk about what you would like to do or add first. This not only helps everyone know what's going on, but it also helps save time and effort if the community decides some changes are needed.
|
||||
|
||||
Reporting Issues
|
||||
----------------
|
||||
@@ -37,5 +37,5 @@ Get Involved
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on webchat.freenode.net
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
|
||||
@@ -34,6 +34,7 @@ else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
@@ -149,6 +150,12 @@ def manage():
|
||||
from django.conf import settings
|
||||
from django.core.management import execute_from_command_line
|
||||
|
||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||
if not MODE == 'development':
|
||||
if (connection.pg_version // 10000) < 12:
|
||||
sys.stderr.write("Postgres version 12 is required\n")
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||
sys.stdout.write('%s\n' % __version__)
|
||||
# If running as a user without permission to read settings, display an
|
||||
|
||||
@@ -133,7 +133,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
Filter using field lookups provided via query string parameters.
|
||||
"""
|
||||
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate')
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||
|
||||
SUPPORTED_LOOKUPS = (
|
||||
'exact',
|
||||
|
||||
@@ -39,6 +39,7 @@ from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credenti
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
@@ -184,9 +185,6 @@ class APIView(views.APIView):
|
||||
"""
|
||||
Log warning for 400 requests. Add header with elapsed time.
|
||||
"""
|
||||
from awx.main.utils import get_licenser
|
||||
from awx.main.utils.licensing import OpenLicense
|
||||
|
||||
#
|
||||
# If the URL was rewritten, and we get a 404, we should entirely
|
||||
# replace the view in the request context with an ApiErrorView()
|
||||
@@ -226,7 +224,7 @@ class APIView(views.APIView):
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), OpenLicense) else 'Red Hat Ansible Tower'
|
||||
response['X-API-Product-Name'] = server_product_name()
|
||||
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
if time_started:
|
||||
|
||||
@@ -24,7 +24,7 @@ from rest_framework.request import clone_request
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import NotificationTemplate
|
||||
from awx.main.tasks import AWXReceptorJob
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||
|
||||
# Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
@@ -211,7 +211,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
continue
|
||||
|
||||
if field == "pod_spec_override":
|
||||
meta['default'] = AWXReceptorJob().pod_definition
|
||||
meta['default'] = get_default_pod_spec()
|
||||
|
||||
# Add type choices if available from the serializer.
|
||||
if field == 'type' and hasattr(serializer, 'get_type_choices'):
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
# Django REST Framework
|
||||
from django.conf import settings
|
||||
from django.core.paginator import Paginator as DjangoPaginator
|
||||
from rest_framework import pagination
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.utils.urls import replace_query_param
|
||||
from rest_framework.settings import api_settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class DisabledPaginator(DjangoPaginator):
|
||||
@@ -65,3 +69,65 @@ class Pagination(pagination.PageNumberPagination):
|
||||
if self.count_disabled:
|
||||
return Response({'results': data})
|
||||
return super(Pagination, self).get_paginated_response(data)
|
||||
|
||||
|
||||
class LimitPagination(pagination.BasePagination):
|
||||
default_limit = api_settings.PAGE_SIZE
|
||||
limit_query_param = 'limit'
|
||||
limit_query_description = _('Number of results to return per page.')
|
||||
max_page_size = settings.MAX_PAGE_SIZE
|
||||
|
||||
def paginate_queryset(self, queryset, request, view=None):
|
||||
self.limit = self.get_limit(request)
|
||||
self.request = request
|
||||
|
||||
return list(queryset[0 : self.limit])
|
||||
|
||||
def get_paginated_response(self, data):
|
||||
return Response(OrderedDict([('results', data)]))
|
||||
|
||||
def get_paginated_response_schema(self, schema):
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'results': schema,
|
||||
},
|
||||
}
|
||||
|
||||
def get_limit(self, request):
|
||||
try:
|
||||
return pagination._positive_int(request.query_params[self.limit_query_param], strict=True)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
|
||||
return self.default_limit
|
||||
|
||||
|
||||
class UnifiedJobEventPagination(Pagination):
|
||||
"""
|
||||
By default, use Pagination for all operations.
|
||||
If `limit` query parameter specified use LimitPagination
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.use_limit_paginator = False
|
||||
self.limit_pagination = LimitPagination()
|
||||
return super().__init__(*args, **kwargs)
|
||||
|
||||
def paginate_queryset(self, queryset, request, view=None):
|
||||
if 'limit' in request.query_params:
|
||||
self.use_limit_paginator = True
|
||||
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.paginate_queryset(queryset, request, view=view)
|
||||
return super().paginate_queryset(queryset, request, view=view)
|
||||
|
||||
def get_paginated_response(self, data):
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.get_paginated_response(data)
|
||||
return super().get_paginated_response(data)
|
||||
|
||||
def get_paginated_response_schema(self, schema):
|
||||
if self.use_limit_paginator:
|
||||
return self.limit_pagination.get_paginated_response_schema(schema)
|
||||
return super().get_paginated_response_schema(schema)
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# Python
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||
from rest_framework import permissions
|
||||
@@ -245,7 +247,7 @@ class IsSuperUser(permissions.BasePermission):
|
||||
|
||||
class InstanceGroupTowerPermission(ModelAccessPermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
if request.method == 'DELETE' and obj.name == "tower":
|
||||
if request.method == 'DELETE' and obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
return False
|
||||
return super(InstanceGroupTowerPermission, self).has_object_permission(request, view, obj)
|
||||
|
||||
|
||||
@@ -144,7 +144,6 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',
|
||||
),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -171,7 +170,6 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'role': ('id', 'role_field'),
|
||||
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'instance_group': ('id', 'name', 'is_container_group'),
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
@@ -724,6 +722,20 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
else:
|
||||
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super().get_summary_fields(obj)
|
||||
|
||||
if self.is_detail_view:
|
||||
resolved_ee = obj.resolve_execution_environment()
|
||||
if resolved_ee is not None:
|
||||
summary_fields['resolved_environment'] = {
|
||||
field: getattr(resolved_ee, field, None)
|
||||
for field in SUMMARIZABLE_FK_FIELDS['execution_environment']
|
||||
if getattr(resolved_ee, field, None) is not None
|
||||
}
|
||||
|
||||
return summary_fields
|
||||
|
||||
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
@@ -754,6 +766,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
'result_traceback',
|
||||
'event_processing_finished',
|
||||
'launched_by',
|
||||
'work_unit_id',
|
||||
)
|
||||
|
||||
extra_kwargs = {
|
||||
@@ -1396,11 +1409,11 @@ class ProjectOptionsSerializer(BaseSerializer):
|
||||
|
||||
class ExecutionEnvironmentSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy']
|
||||
managed_by_tower = serializers.ReadOnlyField()
|
||||
managed = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = ExecutionEnvironment
|
||||
fields = ('*', 'organization', 'image', 'managed_by_tower', 'credential', 'pull')
|
||||
fields = ('*', 'organization', 'image', 'managed', 'credential', 'pull')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ExecutionEnvironmentSerializer, self).get_related(obj)
|
||||
@@ -1646,7 +1659,6 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'insights_credential',
|
||||
'pending_deletion',
|
||||
)
|
||||
|
||||
@@ -1671,8 +1683,6 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
copy=self.reverse('api:inventory_copy', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.insights_credential:
|
||||
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
return res
|
||||
@@ -1740,10 +1750,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'has_inventory_sources',
|
||||
'last_job',
|
||||
'last_job_host_summary',
|
||||
'insights_system_id',
|
||||
'ansible_facts_modified',
|
||||
)
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified')
|
||||
read_only_fields = ('last_job', 'last_job_host_summary', 'ansible_facts_modified')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -1767,7 +1776,6 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
smart_inventories=self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_command_events=self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
|
||||
insights=self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
@@ -2473,14 +2481,14 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
|
||||
class CredentialTypeSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
managed_by_tower = serializers.ReadOnlyField()
|
||||
managed = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = CredentialType
|
||||
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs', 'injectors')
|
||||
fields = ('*', 'kind', 'namespace', 'name', 'managed', 'inputs', 'injectors')
|
||||
|
||||
def validate(self, attrs):
|
||||
if self.instance and self.instance.managed_by_tower:
|
||||
if self.instance and self.instance.managed:
|
||||
raise PermissionDenied(detail=_("Modifications not allowed for managed credential types"))
|
||||
|
||||
old_inputs = {}
|
||||
@@ -2512,8 +2520,8 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
def to_representation(self, data):
|
||||
value = super(CredentialTypeSerializer, self).to_representation(data)
|
||||
|
||||
# translate labels and help_text for credential fields "managed by Tower"
|
||||
if value.get('managed_by_tower'):
|
||||
# translate labels and help_text for credential fields "managed"
|
||||
if value.get('managed'):
|
||||
value['name'] = _(value['name'])
|
||||
for field in value.get('inputs', {}).get('fields', []):
|
||||
field['label'] = _(field['label'])
|
||||
@@ -2532,11 +2540,11 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
class CredentialSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy', 'use']
|
||||
capabilities_prefetch = ['admin', 'use']
|
||||
managed_by_tower = serializers.ReadOnlyField()
|
||||
managed = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = Credential
|
||||
fields = ('*', 'organization', 'credential_type', 'managed_by_tower', 'inputs', 'kind', 'cloud', 'kubernetes')
|
||||
fields = ('*', 'organization', 'credential_type', 'managed', 'inputs', 'kind', 'cloud', 'kubernetes')
|
||||
extra_kwargs = {'credential_type': {'label': _('Credential Type')}}
|
||||
|
||||
def to_representation(self, data):
|
||||
@@ -2603,7 +2611,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
return summary_dict
|
||||
|
||||
def validate(self, attrs):
|
||||
if self.instance and self.instance.managed_by_tower:
|
||||
if self.instance and self.instance.managed:
|
||||
raise PermissionDenied(detail=_("Modifications not allowed for managed credentials"))
|
||||
return super(CredentialSerializer, self).validate(attrs)
|
||||
|
||||
@@ -2615,7 +2623,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
return ret
|
||||
|
||||
def validate_organization(self, org):
|
||||
if self.instance and self.instance.credential_type.kind == 'galaxy' and org is None:
|
||||
if self.instance and (not self.instance.managed) and self.instance.credential_type.kind == 'galaxy' and org is None:
|
||||
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
|
||||
return org
|
||||
|
||||
@@ -2623,7 +2631,6 @@ class CredentialSerializer(BaseSerializer):
|
||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||
for related_objects in (
|
||||
'ad_hoc_commands',
|
||||
'insights_inventories',
|
||||
'unifiedjobs',
|
||||
'unifiedjobtemplates',
|
||||
'projects',
|
||||
@@ -3031,7 +3038,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
res = super(JobSerializer, self).get_related(obj)
|
||||
res.update(
|
||||
dict(
|
||||
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
|
||||
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}), # TODO: consider adding job_created
|
||||
job_host_summaries=self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
notifications=self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
|
||||
@@ -3098,8 +3105,8 @@ class JobDetailSerializer(JobSerializer):
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
|
||||
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||
|
||||
data = {'play_count': play_count, 'task_count': task_count}
|
||||
|
||||
@@ -3107,7 +3114,7 @@ class JobDetailSerializer(JobSerializer):
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except JobEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
@@ -3414,6 +3421,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
'ask_limit_on_launch',
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'-execution_environment',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3440,6 +3448,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
survey_spec=self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
|
||||
copy=self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.webhook_credential_id:
|
||||
@@ -3491,6 +3500,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
'allow_simultaneous',
|
||||
'job_template',
|
||||
'is_sliced_job',
|
||||
'-execution_environment',
|
||||
'-execution_node',
|
||||
'-event_processing_finished',
|
||||
'-controller_node',
|
||||
@@ -3504,6 +3514,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobSerializer, self).get_related(obj)
|
||||
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||
if obj.workflow_job_template:
|
||||
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
|
||||
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
||||
@@ -3528,7 +3539,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
fields = ('*', '-execution_node', '-controller_node')
|
||||
fields = ('*', '-execution_environment', '-execution_node', '-controller_node')
|
||||
|
||||
|
||||
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
|
||||
@@ -4178,7 +4189,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
elif field_name == 'credentials':
|
||||
for cred in obj.credentials.all():
|
||||
cred_dict = dict(id=cred.id, name=cred.name, credential_type=cred.credential_type.pk, passwords_needed=cred.passwords_needed)
|
||||
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
|
||||
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
||||
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||
else:
|
||||
@@ -4905,8 +4916,12 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
return value
|
||||
|
||||
def validate_name(self, value):
|
||||
if self.instance and self.instance.name == 'tower' and value != 'tower':
|
||||
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
|
||||
if self.instance and self.instance.name == settings.DEFAULT_EXECUTION_QUEUE_NAME and value != settings.DEFAULT_EXECUTION_QUEUE_NAME:
|
||||
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_EXECUTION_QUEUE_NAME))
|
||||
|
||||
if self.instance and self.instance.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME and value != settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME))
|
||||
|
||||
return value
|
||||
|
||||
def validate_credential(self, value):
|
||||
@@ -4973,7 +4988,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
|
||||
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
|
||||
('o_auth2_application', ('id', 'name', 'description')),
|
||||
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
|
||||
('credential_type', ('id', 'name', 'description', 'kind', 'managed')),
|
||||
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
|
||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||
]
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
21
awx/api/templates/api/job_job_events_list.md
Normal file
21
awx/api/templates/api/job_job_events_list.md
Normal file
@@ -0,0 +1,21 @@
|
||||
{% include "api/sub_list_api_view.md" %}
|
||||
{% ifmeth GET %}
|
||||
## Special limit feature for event list views
|
||||
|
||||
Use the `limit` query string parameter to opt out of the pagination keys.
|
||||
Doing this can improve response times for jobs that produce a large volume
|
||||
of outputs.
|
||||
|
||||
?limit=25
|
||||
|
||||
This will set the page size to 25 and the `previous` and `next` keys will be
|
||||
omitted from the response data. The data structure will look like this.
|
||||
|
||||
{
|
||||
"results": [
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
{% endifmeth %}
|
||||
@@ -1,25 +0,0 @@
|
||||
Make a GET request to retrieve the list of aggregated play data associated with a job
|
||||
|
||||
## Filtering
|
||||
|
||||
This endpoints supports a limited filtering subset:
|
||||
|
||||
?event_id__in=1,2,3
|
||||
|
||||
Will show only the given ids.
|
||||
|
||||
?event_id__gt=1
|
||||
|
||||
Will show ids greater than the given one.
|
||||
|
||||
?event_id__lt=3
|
||||
|
||||
Will show ids less than the given one.
|
||||
|
||||
?failed=true
|
||||
|
||||
Will show only failed plays. Alternatively `false` may be used.
|
||||
|
||||
?play__icontains=test
|
||||
|
||||
Will filter plays matching the substring `test`
|
||||
@@ -1,27 +0,0 @@
|
||||
Make a GET request to retrieve the list of aggregated task data associated with the play given by event_id.
|
||||
|
||||
`event_id` is a required query parameter and must match the job event id of the parent play in order to receive the list of tasks associated with the play
|
||||
|
||||
## Filtering
|
||||
|
||||
This endpoints supports a limited filtering subset:
|
||||
|
||||
?event_id__in=1,2,3
|
||||
|
||||
Will show only the given task ids under the play given by `event_id`.
|
||||
|
||||
?event_id__gt=1
|
||||
|
||||
Will show ids greater than the given one.
|
||||
|
||||
?event_id__lt=3
|
||||
|
||||
Will show ids less than the given one.
|
||||
|
||||
?failed=true
|
||||
|
||||
Will show only failed plays. Alternatively `false` may be used.
|
||||
|
||||
?task__icontains=test
|
||||
|
||||
Will filter tasks matching the substring `test`
|
||||
1
awx/api/templates/api/project_update_events_list.md
Normal file
1
awx/api/templates/api/project_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
1
awx/api/templates/api/system_job_events_list.md
Normal file
1
awx/api/templates/api/system_job_events_list.md
Normal file
@@ -0,0 +1 @@
|
||||
{% include "api/job_job_events_list.md" %}
|
||||
@@ -16,7 +16,6 @@ from awx.api.views import (
|
||||
HostSmartInventoriesList,
|
||||
HostAdHocCommandsList,
|
||||
HostAdHocCommandEventsList,
|
||||
HostInsights,
|
||||
)
|
||||
|
||||
|
||||
@@ -33,7 +32,6 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/smart_inventories/$', HostSmartInventoriesList.as_view(), name='host_smart_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_commands/$', HostAdHocCommandsList.as_view(), name='host_ad_hoc_commands_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_command_events/$', HostAdHocCommandEventsList.as_view(), name='host_ad_hoc_command_events_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/insights/$', HostInsights.as_view(), name='host_insights'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -3,14 +3,11 @@
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import JobEventList, JobEventDetail, JobEventChildrenList, JobEventHostsList
|
||||
|
||||
from awx.api.views import JobEventDetail, JobEventChildrenList
|
||||
|
||||
urls = [
|
||||
url(r'^$', JobEventList.as_view(), name='job_event_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobEventDetail.as_view(), name='job_event_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/children/$', JobEventChildrenList.as_view(), name='job_event_children_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/hosts/$', JobEventHostsList.as_view(), name='job_event_hosts_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -21,7 +21,7 @@ from urllib3.exceptions import ConnectTimeoutError
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Sum
|
||||
from django.db import IntegrityError, transaction, connection
|
||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
@@ -90,17 +90,14 @@ from awx.main import models
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
extract_ansible_vars,
|
||||
get_awx_http_client_headers,
|
||||
get_object_or_400,
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.insights import filter_insights_api_response
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.api.permissions import (
|
||||
JobTemplateCallbackPermission,
|
||||
@@ -172,11 +169,21 @@ from awx.api.views.root import ( # noqa
|
||||
ApiV2AttachView,
|
||||
)
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
return cursor.fetchone()[0] or -1
|
||||
except ProgrammingError:
|
||||
return 0
|
||||
|
||||
|
||||
def api_exception_handler(exc, context):
|
||||
"""
|
||||
Override default API exception handler to catch IntegrityError exceptions.
|
||||
@@ -685,6 +692,7 @@ class TeamAccessList(ResourceAccessList):
|
||||
|
||||
class ExecutionEnvironmentList(ListCreateAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
@@ -692,10 +700,26 @@ class ExecutionEnvironmentList(ListCreateAPIView):
|
||||
|
||||
class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
fields_to_check = ['name', 'description', 'organization', 'image', 'credential']
|
||||
if instance.managed and request.user.can_access(models.ExecutionEnvironment, 'change', instance):
|
||||
for field in fields_to_check:
|
||||
if kwargs.get('partial') and field not in request.data:
|
||||
continue
|
||||
left = getattr(instance, field, None)
|
||||
if hasattr(left, 'id'):
|
||||
left = left.id
|
||||
right = request.data.get(field)
|
||||
if left != right:
|
||||
raise PermissionDenied(_("Only the 'pull' field can be edited for managed execution environments."))
|
||||
return super().update(request, *args, **kwargs)
|
||||
|
||||
|
||||
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
|
||||
|
||||
@@ -878,11 +902,17 @@ class ProjectUpdateEventsList(SubListAPIView):
|
||||
relationship = 'project_update_events'
|
||||
name = _('Project Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
def get_queryset(self):
|
||||
pu = self.get_parent_object()
|
||||
self.check_parent_access(pu)
|
||||
return pu.get_event_queryset()
|
||||
|
||||
|
||||
class SystemJobEventsList(SubListAPIView):
|
||||
|
||||
@@ -892,11 +922,17 @@ class SystemJobEventsList(SubListAPIView):
|
||||
relationship = 'system_job_events'
|
||||
name = _('System Job Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
return job.get_event_queryset()
|
||||
|
||||
|
||||
class ProjectUpdateCancel(RetrieveAPIView):
|
||||
|
||||
@@ -1274,7 +1310,7 @@ class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
if instance.managed_by_tower:
|
||||
if instance.managed:
|
||||
raise PermissionDenied(detail=_("Deletion not allowed for managed credential types"))
|
||||
if instance.credentials.exists():
|
||||
raise PermissionDenied(detail=_("Credential types that are in use cannot be deleted"))
|
||||
@@ -1389,7 +1425,7 @@ class CredentialDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
if instance.managed_by_tower:
|
||||
if instance.managed:
|
||||
raise PermissionDenied(detail=_("Deletion not allowed for managed credentials"))
|
||||
return super(CredentialDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
@@ -1665,106 +1701,6 @@ class GatewayTimeout(APIException):
|
||||
default_code = 'gateway_timeout'
|
||||
|
||||
|
||||
class HostInsights(GenericAPIView):
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.EmptySerializer
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
raise GatewayTimeout(_('Request to {} timed out.').format(url))
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise BadGateway(_('Unknown exception {} while trying to GET {}').format(e, url))
|
||||
|
||||
if res.status_code == 401:
|
||||
raise BadGateway(_('Unauthorized access. Please check your Insights Credential username and password.'))
|
||||
elif res.status_code != 200:
|
||||
raise BadGateway(
|
||||
_('Failed to access the Insights API at URL {}.' ' Server responded with {} status code and message {}').format(
|
||||
url, res.status_code, res.content
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
raise BadGateway(_('Expected JSON response from Insights at URL {}' ' but instead got {}').format(url, res.content))
|
||||
|
||||
def _get_session(self, username, password):
|
||||
session = requests.Session()
|
||||
session.auth = requests.auth.HTTPBasicAuth(username, password)
|
||||
|
||||
return session
|
||||
|
||||
def _get_platform_info(self, host, session, headers):
|
||||
url = '{}/api/inventory/v1/hosts?insights_id={}'.format(settings.INSIGHTS_URL_BASE, host.insights_system_id)
|
||||
res = self._call_insights_api(url, session, headers)
|
||||
try:
|
||||
res['results'][0]['id']
|
||||
except (IndexError, KeyError):
|
||||
raise NotFound(_('Could not translate Insights system ID {}' ' into an Insights platform ID.').format(host.insights_system_id))
|
||||
|
||||
return res['results'][0]
|
||||
|
||||
def _get_reports(self, platform_id, session, headers):
|
||||
url = '{}/api/insights/v1/system/{}/reports/'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
||||
|
||||
return self._call_insights_api(url, session, headers)
|
||||
|
||||
def _get_remediations(self, platform_id, session, headers):
|
||||
url = '{}/api/remediations/v1/remediations?system={}'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
||||
|
||||
remediations = []
|
||||
|
||||
# Iterate over all of the pages of content.
|
||||
while url:
|
||||
data = self._call_insights_api(url, session, headers)
|
||||
remediations.extend(data['data'])
|
||||
|
||||
url = data['links']['next'] # Will be `None` if this is the last page.
|
||||
|
||||
return remediations
|
||||
|
||||
def _get_insights(self, host, session, headers):
|
||||
platform_info = self._get_platform_info(host, session, headers)
|
||||
platform_id = platform_info['id']
|
||||
reports = self._get_reports(platform_id, session, headers)
|
||||
remediations = self._get_remediations(platform_id, session, headers)
|
||||
|
||||
return {'insights_content': filter_insights_api_response(platform_info, reports, remediations)}
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
host = self.get_object()
|
||||
cred = None
|
||||
|
||||
if host.insights_system_id is None:
|
||||
return Response(dict(error=_('This host is not recognized as an Insights host.')), status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
if host.inventory and host.inventory.insights_credential:
|
||||
cred = host.inventory.insights_credential
|
||||
else:
|
||||
return Response(dict(error=_('The Insights Credential for "{}" was not found.').format(host.inventory.name)), status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
username = cred.get_input('username', default='')
|
||||
password = cred.get_input('password', default='')
|
||||
session = self._get_session(username, password)
|
||||
headers = get_awx_http_client_headers()
|
||||
|
||||
data = self._get_insights(host, session, headers)
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
|
||||
def handle_exception(self, exc):
|
||||
# Continue supporting the slightly different way we have handled error responses on this view.
|
||||
response = super().handle_exception(exc)
|
||||
response.data['error'] = response.data.pop('detail')
|
||||
return response
|
||||
|
||||
|
||||
class GroupList(ListCreateAPIView):
|
||||
|
||||
model = models.Group
|
||||
@@ -3602,7 +3538,7 @@ class JobRelaunch(RetrieveAPIView):
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
host_qs = obj.retry_qs(retry_hosts)
|
||||
if not obj.job_events.filter(event='playbook_on_stats').exists():
|
||||
if not obj.get_event_queryset().filter(event='playbook_on_stats').exists():
|
||||
return Response(
|
||||
{'hosts': _('Cannot retry on {status_value} hosts, playbook stats not available.').format(status_value=retry_hosts)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
@@ -3729,18 +3665,22 @@ class JobHostSummaryDetail(RetrieveAPIView):
|
||||
serializer_class = serializers.JobHostSummarySerializer
|
||||
|
||||
|
||||
class JobEventList(NoTruncateMixin, ListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
|
||||
class JobEventDetail(RetrieveAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
|
||||
@property
|
||||
def is_partitioned(self):
|
||||
if 'pk' not in self.kwargs:
|
||||
return True
|
||||
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
if self.is_partitioned:
|
||||
return models.JobEvent
|
||||
return models.UnpartitionedJobEvent
|
||||
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
context.update(no_truncate=True)
|
||||
@@ -3749,33 +3689,31 @@ class JobEventDetail(RetrieveAPIView):
|
||||
|
||||
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
parent_model = models.JobEvent
|
||||
relationship = 'children'
|
||||
name = _('Job Event Children List')
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(parent_uuid=parent_event.uuid)
|
||||
return qs
|
||||
@property
|
||||
def is_partitioned(self):
|
||||
if 'pk' not in self.kwargs:
|
||||
return True
|
||||
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
if self.is_partitioned:
|
||||
return models.JobEvent
|
||||
return models.UnpartitionedJobEvent
|
||||
|
||||
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
|
||||
model = models.Host
|
||||
serializer_class = serializers.HostSerializer
|
||||
parent_model = models.JobEvent
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
@property
|
||||
def parent_model(self):
|
||||
return self.model
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
return parent_event.job.get_event_queryset().filter(parent_uuid=parent_event.uuid)
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
@@ -3811,12 +3749,12 @@ class GroupJobEventsList(BaseJobEventsList):
|
||||
class JobJobEventsList(BaseJobEventsList):
|
||||
|
||||
parent_model = models.Job
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||
|
||||
|
||||
class AdHocCommandList(ListCreateAPIView):
|
||||
@@ -3974,6 +3912,11 @@ class AdHocCommandEventList(NoTruncateMixin, ListAPIView):
|
||||
serializer_class = serializers.AdHocCommandEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_queryset(self):
|
||||
adhoc = self.get_parent_object()
|
||||
self.check_parent_access(adhoc)
|
||||
return adhoc.get_event_queryset()
|
||||
|
||||
|
||||
class AdHocCommandEventDetail(RetrieveAPIView):
|
||||
|
||||
@@ -3994,12 +3937,21 @@ class BaseAdHocCommandEventsList(NoTruncateMixin, SubListAPIView):
|
||||
relationship = 'ad_hoc_command_events'
|
||||
name = _('Ad Hoc Command Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return parent.get_event_queryset()
|
||||
|
||||
|
||||
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
|
||||
|
||||
parent_model = models.Host
|
||||
|
||||
def get_queryset(self):
|
||||
return super(BaseAdHocCommandEventsList, self).get_queryset()
|
||||
|
||||
|
||||
# class GroupJobEventsList(BaseJobEventsList):
|
||||
# parent_model = Group
|
||||
|
||||
@@ -38,6 +38,9 @@ from awx.api.serializers import (
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
@@ -49,6 +52,12 @@ class InventoryUpdateEventsList(SubListAPIView):
|
||||
relationship = 'inventory_update_events'
|
||||
name = _('Inventory Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
pagination_class = UnifiedJobEventPagination
|
||||
|
||||
def get_queryset(self):
|
||||
iu = self.get_parent_object()
|
||||
self.check_parent_access(iu)
|
||||
return iu.get_event_queryset()
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
|
||||
@@ -52,6 +52,11 @@ class UnifiedJobDeletionMixin(object):
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||
|
||||
# Manually cascade delete events if unpartitioned job
|
||||
if obj.has_unpartitioned_events:
|
||||
obj.get_event_queryset().delete()
|
||||
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
from awx.main.utils.licensing import get_licenser
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -106,7 +107,6 @@ class ApiVersionRootView(APIView):
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['job_events'] = reverse('api:job_event_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
|
||||
data['system_jobs'] = reverse('api:system_job_list', request=request)
|
||||
@@ -174,8 +174,6 @@ class ApiV2SubscriptionView(APIView):
|
||||
self.permission_denied(request) # Raises PermissionDenied exception.
|
||||
|
||||
def post(self, request):
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
data = request.data.copy()
|
||||
if data.get('subscriptions_password') == '$encrypted$':
|
||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||
@@ -223,7 +221,6 @@ class ApiV2AttachView(APIView):
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if pool_id and user and pw:
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
data = request.data.copy()
|
||||
try:
|
||||
@@ -265,8 +262,6 @@ class ApiV2ConfigView(APIView):
|
||||
def get(self, request, format=None):
|
||||
'''Return various sitewide configuration settings'''
|
||||
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
license_data = get_licenser().validate()
|
||||
|
||||
if not license_data.get('valid_key', False):
|
||||
@@ -302,7 +297,9 @@ class ApiV2ConfigView(APIView):
|
||||
):
|
||||
data.update(
|
||||
dict(
|
||||
project_base_dir=settings.PROJECTS_ROOT, project_local_paths=Project.get_local_path_choices(), custom_virtualenvs=get_custom_venv_choices()
|
||||
project_base_dir=settings.PROJECTS_ROOT,
|
||||
project_local_paths=Project.get_local_path_choices(),
|
||||
custom_virtualenvs=get_custom_venv_choices(),
|
||||
)
|
||||
)
|
||||
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||
@@ -319,8 +316,6 @@ class ApiV2ConfigView(APIView):
|
||||
logger.info(smart_text(u"Invalid JSON submitted for license."), extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
license_data = json.loads(data_actual)
|
||||
if 'license_key' in license_data:
|
||||
return Response({"error": _('Legacy license submitted. A subscription manifest is now required.')}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -23,6 +23,7 @@ import cachetools
|
||||
# AWX
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.fields import PrimaryKeyRelatedField
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
@@ -420,9 +421,9 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
||||
|
||||
try:
|
||||
data = field.to_representation(value)
|
||||
data = None if value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(value)
|
||||
setting_value = field.run_validation(data)
|
||||
db_value = field.to_representation(setting_value)
|
||||
db_value = None if setting_value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(setting_value)
|
||||
except Exception as e:
|
||||
logger.exception('Unable to assign value "%r" to setting "%s".', value, name, exc_info=True)
|
||||
raise e
|
||||
|
||||
@@ -45,6 +45,7 @@ from awx.main.models import (
|
||||
InventoryUpdateEvent,
|
||||
Job,
|
||||
JobEvent,
|
||||
UnpartitionedJobEvent,
|
||||
JobHostSummary,
|
||||
JobLaunchConfig,
|
||||
JobTemplate,
|
||||
@@ -464,7 +465,7 @@ class BaseAccess(object):
|
||||
if display_method == 'schedule':
|
||||
user_capabilities['schedule'] = user_capabilities['start']
|
||||
continue
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource)):
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource, ExecutionEnvironment)):
|
||||
user_capabilities['delete'] = user_capabilities['edit']
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
||||
@@ -866,13 +867,11 @@ class InventoryAccess(BaseAccess):
|
||||
# If no data is specified, just checking for generic add permission?
|
||||
if not data:
|
||||
return Organization.accessible_objects(self.user, 'inventory_admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, role_field='inventory_admin_role') and self.check_related(
|
||||
'insights_credential', Credential, data, role_field='use_role'
|
||||
)
|
||||
return self.check_related('organization', Organization, data, role_field='inventory_admin_role')
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return self.can_admin(obj, data) and self.check_related('insights_credential', Credential, data, obj=obj, role_field='use_role')
|
||||
return self.can_admin(obj, data)
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj, data):
|
||||
@@ -1037,7 +1036,7 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
||||
|
||||
def can_add(self, data):
|
||||
if not data or 'inventory' not in data:
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return Inventory.accessible_objects(self.user, 'admin_role').exists()
|
||||
|
||||
if not self.check_related('source_project', Project, data, role_field='use_role'):
|
||||
return False
|
||||
@@ -1120,7 +1119,7 @@ class CredentialTypeAccess(BaseAccess):
|
||||
I can create when:
|
||||
- I'm a superuser:
|
||||
I can change when:
|
||||
- I'm a superuser and the type is not "managed by Tower"
|
||||
- I'm a superuser and the type is not "managed"
|
||||
"""
|
||||
|
||||
model = CredentialType
|
||||
@@ -1206,7 +1205,7 @@ class CredentialAccess(BaseAccess):
|
||||
def get_user_capabilities(self, obj, **kwargs):
|
||||
user_capabilities = super(CredentialAccess, self).get_user_capabilities(obj, **kwargs)
|
||||
user_capabilities['use'] = self.can_use(obj)
|
||||
if getattr(obj, 'managed_by_tower', False) is True:
|
||||
if getattr(obj, 'managed', False) is True:
|
||||
user_capabilities['edit'] = user_capabilities['delete'] = False
|
||||
return user_capabilities
|
||||
|
||||
@@ -1369,6 +1368,8 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.managed:
|
||||
raise PermissionDenied
|
||||
return self.can_change(obj, None)
|
||||
|
||||
|
||||
@@ -2352,6 +2353,11 @@ class JobEventAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedJobEventAccess(JobEventAccess):
|
||||
|
||||
model = UnpartitionedJobEvent
|
||||
|
||||
|
||||
class ProjectUpdateEventAccess(BaseAccess):
|
||||
"""
|
||||
I can see project update event records whenever I can access the project update
|
||||
@@ -2895,3 +2901,4 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count, Max, Min
|
||||
from django.db.models import Count
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -15,7 +15,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from psycopg2.errors import UntranslatableCharacter
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, get_custom_venv_choices, camelcase_to_underscore, datetime_hook
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
|
||||
@@ -58,7 +58,10 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
horizon = until - timedelta(weeks=4)
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
try:
|
||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||
except TypeError: # last_entries has a stale non-datetime entry for this collector
|
||||
last_entry = max(last_gather, horizon)
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
@@ -67,7 +70,7 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def events_slicing(key, since, until, last_gather):
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
@@ -77,16 +80,8 @@ def events_slicing(key, since, until, last_gather):
|
||||
lower = since or last_gather
|
||||
if not since and last_entries.get(key):
|
||||
lower = horizon
|
||||
pk_values = models.JobEvent.objects.filter(created__gte=lower, created__lte=until).aggregate(Min('pk'), Max('pk'))
|
||||
|
||||
previous_pk = pk_values['pk__min'] - 1 if pk_values['pk__min'] is not None else 0
|
||||
if not since and last_entries.get(key):
|
||||
previous_pk = max(last_entries[key], previous_pk)
|
||||
final_pk = pk_values['pk__max'] or 0
|
||||
|
||||
step = 100000
|
||||
for start in range(previous_pk, final_pk + 1, step):
|
||||
yield (start, min(start + step, final_pk))
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.3', description=_('General platform configuration.'))
|
||||
@@ -120,7 +115,7 @@ def config(since, **kwargs):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.0', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (
|
||||
@@ -138,9 +133,6 @@ def counts(since, **kwargs):
|
||||
):
|
||||
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
||||
|
||||
venvs = get_custom_venv_choices()
|
||||
counts['custom_virtualenvs'] = len([v for v in venvs if os.path.basename(v.rstrip('/')) != 'ansible'])
|
||||
|
||||
inv_counts = dict(models.Inventory.objects.order_by().values_list('kind').annotate(Count('kind')))
|
||||
inv_counts['normal'] = inv_counts.get('', 0)
|
||||
inv_counts.pop('', None)
|
||||
@@ -183,12 +175,12 @@ def org_counts(since, **kwargs):
|
||||
def cred_type_counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cred_type in models.CredentialType.objects.annotate(num_credentials=Count('credentials', distinct=True)).values(
|
||||
'name', 'id', 'managed_by_tower', 'num_credentials'
|
||||
'name', 'id', 'managed', 'num_credentials'
|
||||
):
|
||||
counts[cred_type['id']] = {
|
||||
'name': cred_type['name'],
|
||||
'credential_count': cred_type['num_credentials'],
|
||||
'managed_by_tower': cred_type['managed_by_tower'],
|
||||
'managed': cred_type['managed'],
|
||||
}
|
||||
return counts
|
||||
|
||||
@@ -335,39 +327,49 @@ def _copy_table(table, query, path):
|
||||
return file.file_list()
|
||||
|
||||
|
||||
@register('events_table', '1.2', format='csv', description=_('Automation task records'), expensive=events_slicing)
|
||||
def events_table(since, full_path, until, **kwargs):
|
||||
def _events_table(since, full_path, until, tbl, where_column, project_job_created=False, **kwargs):
|
||||
def query(event_data):
|
||||
return f'''COPY (SELECT main_jobevent.id,
|
||||
main_jobevent.created,
|
||||
main_jobevent.modified,
|
||||
main_jobevent.uuid,
|
||||
main_jobevent.parent_uuid,
|
||||
main_jobevent.event,
|
||||
{event_data}->'task_action' AS task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
main_jobevent.failed,
|
||||
main_jobevent.changed,
|
||||
main_jobevent.playbook,
|
||||
main_jobevent.play,
|
||||
main_jobevent.task,
|
||||
main_jobevent.role,
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name,
|
||||
CAST({event_data}->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST({event_data}->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
{event_data}->'duration' AS duration,
|
||||
{event_data}->'res'->'warnings' AS warnings,
|
||||
{event_data}->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
WHERE (main_jobevent.id > {since} AND main_jobevent.id <= {until})
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
query = f'''COPY (SELECT {tbl}.id,
|
||||
{tbl}.created,
|
||||
{tbl}.modified,
|
||||
{tbl + '.job_created' if project_job_created else 'NULL'} as job_created,
|
||||
{tbl}.uuid,
|
||||
{tbl}.parent_uuid,
|
||||
{tbl}.event,
|
||||
task_action,
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||
{tbl}.failed,
|
||||
{tbl}.changed,
|
||||
{tbl}.playbook,
|
||||
{tbl}.play,
|
||||
{tbl}.task,
|
||||
{tbl}.role,
|
||||
{tbl}.job_id,
|
||||
{tbl}.host_id,
|
||||
{tbl}.host_name,
|
||||
CAST(x.start AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST(x.end AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
x.duration AS duration,
|
||||
x.res->'warnings' AS warnings,
|
||||
x.res->'deprecations' AS deprecations
|
||||
FROM {tbl}, json_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "start" text, "end" text)
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query("main_jobevent.event_data::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::json"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query("replace(main_jobevent.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_unpartitioned(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, '_unpartitioned_main_jobevent', 'created', **kwargs)
|
||||
|
||||
|
||||
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.2', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
|
||||
@@ -270,7 +270,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
if not files:
|
||||
if collection_type != 'dry-run':
|
||||
with disable_activity_stream():
|
||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
||||
entry = last_entries.get(key)
|
||||
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
continue
|
||||
|
||||
@@ -293,7 +294,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if slice_succeeded and collection_type != 'dry-run':
|
||||
with disable_activity_stream():
|
||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
||||
entry = last_entries.get(key)
|
||||
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
except Exception:
|
||||
succeeded = False
|
||||
|
||||
@@ -39,7 +39,6 @@ def metrics():
|
||||
],
|
||||
registry=REGISTRY,
|
||||
)
|
||||
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
|
||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
|
||||
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
|
||||
STATUS = Gauge(
|
||||
@@ -159,7 +158,6 @@ def metrics():
|
||||
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
||||
|
||||
SCHEDULE_COUNT.set(current_counts['schedule'])
|
||||
CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])
|
||||
|
||||
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
|
||||
@@ -177,6 +177,24 @@ register(
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_CONTROL_PLANE_QUEUE_NAME',
|
||||
field_class=fields.CharField,
|
||||
label=_('The instance group where control plane tasks run'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_EXECUTION_QUEUE_NAME',
|
||||
field_class=fields.CharField,
|
||||
label=_('The instance group where user jobs run (currently only on non-VM installs)'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||
field_class=fields.PrimaryKeyRelatedField,
|
||||
@@ -344,6 +362,17 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_WEBSOCKET_EVENT_RATE',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
default=30,
|
||||
label=_('Job Event Maximum Websocket Messages Per Second'),
|
||||
help_text=_('Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'SCHEDULE_MAX_JOBS',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -663,6 +692,15 @@ register(
|
||||
unit=_('seconds'),
|
||||
)
|
||||
|
||||
register(
|
||||
'IS_K8S',
|
||||
field_class=fields.BooleanField,
|
||||
read_only=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -14,7 +14,7 @@ __all__ = [
|
||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
@@ -41,6 +41,7 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||
CENSOR_VALUE = '************'
|
||||
ENV_BLOCKLIST = frozenset(
|
||||
(
|
||||
|
||||
56
awx/main/credential_plugins/dsv.py
Normal file
56
awx/main/credential_plugins/dsv.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},
|
||||
],
|
||||
'metadata': [
|
||||
{
|
||||
'id': 'path',
|
||||
'label': _('Secret Path'),
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
dsv_inputs['fields'].append(
|
||||
{
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
)
|
||||
@@ -63,7 +63,15 @@ base_inputs = {
|
||||
'id': 'secret_path',
|
||||
'label': _('Path to Secret'),
|
||||
'type': 'string',
|
||||
'help_text': _('The path to the secret stored in the secret backend e.g, /some/secret/'),
|
||||
'help_text': _(
|
||||
(
|
||||
'The path to the secret stored in the secret backend e.g, /some/secret/. It is recommended'
|
||||
' that you use the secret backend field to identify the storage backend and to use this field'
|
||||
' for locating a specific secret within that store. However, if you prefer to fully identify'
|
||||
' both the secret backend and one of its secrets using only this field, join their locations'
|
||||
' into a single path without any additional separators, e.g, /location/of/backend/some/secret.'
|
||||
)
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'auth_path',
|
||||
|
||||
@@ -142,7 +142,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
duration_to_save = time.perf_counter() - duration_to_save
|
||||
for e in events:
|
||||
emit_event_detail(e)
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
emit_event_detail(e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
@@ -207,7 +208,13 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
GuidMiddleware.set_guid('')
|
||||
return
|
||||
|
||||
skip_websocket_message = body.pop('skip_websocket_message', False)
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
if skip_websocket_message:
|
||||
event._skip_websocket_message = True
|
||||
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
|
||||
@@ -642,7 +642,7 @@ class CredentialInputField(JSONSchemaField):
|
||||
|
||||
# `ssh_key_unlock` requirements are very specific and can't be
|
||||
# represented without complicated JSON schema
|
||||
if model_instance.credential_type.managed_by_tower is True and 'ssh_key_unlock' in defined_fields:
|
||||
if model_instance.credential_type.managed is True and 'ssh_key_unlock' in defined_fields:
|
||||
|
||||
# in order to properly test the necessity of `ssh_key_unlock`, we
|
||||
# need to know the real value of `ssh_key_data`; for a payload like:
|
||||
@@ -711,7 +711,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
}
|
||||
|
||||
def validate(self, value, model_instance):
|
||||
if isinstance(value, dict) and 'dependencies' in value and not model_instance.managed_by_tower:
|
||||
if isinstance(value, dict) and 'dependencies' in value and not model_instance.managed:
|
||||
raise django_exceptions.ValidationError(
|
||||
_("'dependencies' is not supported for custom credentials."),
|
||||
code='invalid',
|
||||
|
||||
@@ -4,11 +4,13 @@
|
||||
# Python
|
||||
import datetime
|
||||
import logging
|
||||
import pytz
|
||||
import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.db import transaction, connection
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
@@ -18,6 +20,132 @@ from awx.main.signals import disable_activity_stream, disable_computed_fields
|
||||
from awx.main.utils.deletion import AWXCollector, pre_delete
|
||||
|
||||
|
||||
def unified_job_class_to_event_table_name(job_class):
|
||||
return f'main_{job_class().event_class.__name__.lower()}'
|
||||
|
||||
|
||||
def partition_table_name(job_class, dt):
|
||||
suffix = dt.replace(microsecond=0, second=0, minute=0).strftime('%Y%m%d_%H')
|
||||
|
||||
event_tbl_name = unified_job_class_to_event_table_name(job_class)
|
||||
event_tbl_name += f'_{suffix}'
|
||||
return event_tbl_name
|
||||
|
||||
|
||||
def partition_name_dt(part_name):
|
||||
"""
|
||||
part_name examples:
|
||||
main_jobevent_20210318_09
|
||||
main_projectupdateevent_20210318_11
|
||||
main_inventoryupdateevent_20210318_03
|
||||
"""
|
||||
if '_unpartitioned' in part_name:
|
||||
return None
|
||||
p = re.compile('([a-z]+)_([a-z]+)_([0-9]+)_([0-9][0-9])')
|
||||
m = p.match(part_name)
|
||||
if not m:
|
||||
return m
|
||||
dt_str = f"{m.group(3)}_{m.group(4)}"
|
||||
dt = datetime.datetime.strptime(dt_str, '%Y%m%d_%H').replace(tzinfo=pytz.UTC)
|
||||
return dt
|
||||
|
||||
|
||||
def dt_to_partition_name(tbl_name, dt):
|
||||
return f"{tbl_name}_{dt.strftime('%Y%m%d_%H')}"
|
||||
|
||||
|
||||
class DeleteMeta:
|
||||
def __init__(self, logger, job_class, cutoff, dry_run):
|
||||
self.logger = logger
|
||||
self.job_class = job_class
|
||||
self.cutoff = cutoff
|
||||
self.dry_run = dry_run
|
||||
|
||||
self.jobs_qs = None # Set in by find_jobs_to_delete()
|
||||
|
||||
self.parts_no_drop = set() # Set in identify_excluded_partitions()
|
||||
self.parts_to_drop = set() # Set in find_partitions_to_drop()
|
||||
self.jobs_pk_list = [] # Set in find_jobs_to_delete()
|
||||
self.jobs_to_delete_count = 0 # Set in find_jobs_to_delete()
|
||||
self.jobs_no_delete_count = 0 # Set in find_jobs_to_delete()
|
||||
|
||||
def find_jobs_to_delete(self):
|
||||
self.jobs_qs = self.job_class.objects.filter(created__lt=self.cutoff).values_list('pk', 'status', 'created')
|
||||
for pk, status, created in self.jobs_qs:
|
||||
if status not in ['pending', 'waiting', 'running']:
|
||||
self.jobs_to_delete_count += 1
|
||||
self.jobs_pk_list.append(pk)
|
||||
self.jobs_no_delete_count = (
|
||||
self.job_class.objects.filter(created__gte=self.cutoff) | self.job_class.objects.filter(status__in=['pending', 'waiting', 'running'])
|
||||
).count()
|
||||
|
||||
def identify_excluded_partitions(self):
|
||||
|
||||
part_drop = {}
|
||||
|
||||
for pk, status, created in self.jobs_qs:
|
||||
|
||||
part_key = partition_table_name(self.job_class, created)
|
||||
if status in ['pending', 'waiting', 'running']:
|
||||
part_drop[part_key] = False
|
||||
else:
|
||||
part_drop.setdefault(part_key, True)
|
||||
|
||||
# Note that parts_no_drop _may_ contain the names of partitions that don't exist
|
||||
# This can happen when the cleanup of _unpartitioned_* logic leaves behind jobs with status pending, waiting, running. The find_jobs_to_delete() will
|
||||
# pick these jobs up.
|
||||
self.parts_no_drop = set([k for k, v in part_drop.items() if v is False])
|
||||
|
||||
def delete_jobs(self):
|
||||
if not self.dry_run:
|
||||
self.job_class.objects.filter(pk__in=self.jobs_pk_list).delete()
|
||||
|
||||
def find_partitions_to_drop(self):
|
||||
tbl_name = unified_job_class_to_event_table_name(self.job_class)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
query = "SELECT inhrelid::regclass::text AS child FROM pg_catalog.pg_inherits"
|
||||
query += f" WHERE inhparent = 'public.{tbl_name}'::regclass"
|
||||
query += f" AND TO_TIMESTAMP(LTRIM(inhrelid::regclass::text, '{tbl_name}_'), 'YYYYMMDD_HH24') < '{self.cutoff}'"
|
||||
query += " ORDER BY inhrelid::regclass::text"
|
||||
|
||||
cursor.execute(query)
|
||||
partitions_from_db = [r[0] for r in cursor.fetchall()]
|
||||
|
||||
partitions_dt = [partition_name_dt(p) for p in partitions_from_db if not None]
|
||||
partitions_dt = [p for p in partitions_dt if not None]
|
||||
|
||||
# convert datetime partition back to string partition
|
||||
partitions_maybe_drop = set([dt_to_partition_name(tbl_name, dt) for dt in partitions_dt])
|
||||
|
||||
# Do not drop partition if there is a job that will not be deleted pointing at it
|
||||
self.parts_to_drop = partitions_maybe_drop - self.parts_no_drop
|
||||
|
||||
def drop_partitions(self):
|
||||
if len(self.parts_to_drop) > 0:
|
||||
parts_to_drop = list(self.parts_to_drop)
|
||||
parts_to_drop.sort() # sort it to make reading it easier for humans
|
||||
parts_to_drop_str = ','.join(parts_to_drop)
|
||||
if self.dry_run:
|
||||
self.logger.debug(f"Would drop event partition(s) {parts_to_drop_str}")
|
||||
else:
|
||||
self.logger.debug(f"Dropping event partition(s) {parts_to_drop_str}")
|
||||
|
||||
if not self.dry_run:
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"DROP TABLE {parts_to_drop_str}")
|
||||
else:
|
||||
self.logger.debug("No event partitions to drop")
|
||||
|
||||
def delete(self):
|
||||
self.find_jobs_to_delete()
|
||||
self.identify_excluded_partitions()
|
||||
self.find_partitions_to_drop()
|
||||
self.drop_partitions()
|
||||
self.delete_jobs()
|
||||
return (self.jobs_no_delete_count, self.jobs_to_delete_count)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Management command to cleanup old jobs and project updates.
|
||||
@@ -36,6 +164,43 @@ class Command(BaseCommand):
|
||||
parser.add_argument('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications')
|
||||
parser.add_argument('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs')
|
||||
|
||||
def cleanup(self, job_class):
|
||||
delete_meta = DeleteMeta(self.logger, job_class, self.cutoff, self.dry_run)
|
||||
skipped, deleted = delete_meta.delete()
|
||||
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def cleanup_jobs_partition(self):
|
||||
return self.cleanup(Job)
|
||||
|
||||
def cleanup_ad_hoc_commands_partition(self):
|
||||
return self.cleanup(AdHocCommand)
|
||||
|
||||
def cleanup_project_updates_partition(self):
|
||||
return self.cleanup(ProjectUpdate)
|
||||
|
||||
def cleanup_inventory_updates_partition(self):
|
||||
return self.cleanup(InventoryUpdate)
|
||||
|
||||
def cleanup_management_jobs_partition(self):
|
||||
return self.cleanup(SystemJob)
|
||||
|
||||
def cleanup_workflow_jobs_partition(self):
|
||||
delete_meta = DeleteMeta(self.logger, WorkflowJob, self.cutoff, self.dry_run)
|
||||
|
||||
delete_meta.find_jobs_to_delete()
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
if len(pk_list) > 0:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
|
||||
@@ -45,12 +210,14 @@ class Command(BaseCommand):
|
||||
# get queryset for available jobs to remove
|
||||
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
||||
# get pk list for the first N (batch_size) objects
|
||||
pk_list = qs[0:batch_size].values_list('pk')
|
||||
pk_list = qs[0:batch_size].values_list('pk', flat=True)
|
||||
# You cannot delete queries with sql LIMIT set, so we must
|
||||
# create a new query from this pk_list
|
||||
qs_batch = Job.objects.filter(pk__in=pk_list)
|
||||
just_deleted = 0
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
|
||||
del_query = pre_delete(qs_batch)
|
||||
collector = AWXCollector(del_query.db)
|
||||
collector.collect(del_query)
|
||||
@@ -71,6 +238,7 @@ class Command(BaseCommand):
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
skipped, deleted = 0, 0
|
||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||
ad_hoc_command_display = '"%s" (%d events)' % (str(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count())
|
||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||
@@ -81,15 +249,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(ad_hoc_command.pk)
|
||||
ad_hoc_command.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_project_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for pu in project_updates.iterator():
|
||||
pu_display = '"%s" (type %s)' % (str(pu), str(pu.launch_type))
|
||||
if pu.status in ('pending', 'waiting', 'running'):
|
||||
@@ -104,15 +277,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, pu_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(pu.pk)
|
||||
pu.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_inventory_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for iu in inventory_updates.iterator():
|
||||
iu_display = '"%s" (source %s)' % (str(iu), str(iu.source))
|
||||
if iu.status in ('pending', 'waiting', 'running'):
|
||||
@@ -127,15 +305,20 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, iu_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(iu.pk)
|
||||
iu.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_management_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
||||
pk_list = []
|
||||
for sj in system_jobs.iterator():
|
||||
sj_display = '"%s" (type %s)' % (str(sj), str(sj.job_type))
|
||||
if sj.status in ('pending', 'waiting', 'running'):
|
||||
@@ -146,9 +329,13 @@ class Command(BaseCommand):
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, sj_display)
|
||||
if not self.dry_run:
|
||||
pk_list.append(sj.pk)
|
||||
sj.delete()
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@@ -222,6 +409,13 @@ class Command(BaseCommand):
|
||||
for m in model_names:
|
||||
if m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from crum import impersonate
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate, ExecutionEnvironment
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||
from awx.main.signals import disable_computed_fields
|
||||
|
||||
|
||||
@@ -45,7 +44,7 @@ class Command(BaseCommand):
|
||||
|
||||
public_galaxy_credential = Credential(
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||
)
|
||||
@@ -68,13 +67,6 @@ class Command(BaseCommand):
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
changed = True
|
||||
|
||||
for ee in reversed(settings.DEFAULT_EXECUTION_ENVIRONMENTS):
|
||||
_, created = ExecutionEnvironment.objects.update_or_create(name=ee['name'], defaults={'image': ee['image'], 'managed_by_tower': True})
|
||||
|
||||
if created:
|
||||
changed = True
|
||||
print('Default Execution Environment(s) registered.')
|
||||
|
||||
if changed:
|
||||
print('(changed: True)')
|
||||
else:
|
||||
|
||||
59
awx/main/management/commands/custom_venv_associations.py
Normal file
59
awx/main/management/commands/custom_venv_associations.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.utils.common import get_custom_venv_choices
|
||||
from awx.main.models import Organization, InventorySource, JobTemplate, Project
|
||||
import yaml
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns the pip freeze from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'path',
|
||||
type=str,
|
||||
nargs=1,
|
||||
default='',
|
||||
help='run this with a path to a virtual environment as an argument to see the associated Job Templates, Organizations, Projects, and Inventory Sources.',
|
||||
)
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# look organiztions and unified job templates (which include JTs, workflows, and Inventory updates)
|
||||
super(Command, self).__init__()
|
||||
results = {}
|
||||
path = options.get('path')
|
||||
if path:
|
||||
all_venvs = get_custom_venv_choices()
|
||||
if path[0] in all_venvs: # verify this is a valid path
|
||||
path = path[0]
|
||||
orgs = [{"name": org.name, "id": org.id} for org in Organization.objects.filter(custom_virtualenv=path)]
|
||||
jts = [{"name": jt.name, "id": jt.id} for jt in JobTemplate.objects.filter(custom_virtualenv=path)]
|
||||
proj = [{"name": proj.name, "id": proj.id} for proj in Project.objects.filter(custom_virtualenv=path)]
|
||||
invsrc = [{"name": inv.name, "id": inv.id} for inv in InventorySource.objects.filter(custom_virtualenv=path)]
|
||||
results["organizations"] = orgs
|
||||
results["job_templates"] = jts
|
||||
results["projects"] = proj
|
||||
results["inventory_sources"] = invsrc
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Virtual Environments Associations:',
|
||||
yaml.dump(results),
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print(yaml.dump(results))
|
||||
|
||||
else:
|
||||
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||
print('\n'.join(all_venvs), '\n')
|
||||
48
awx/main/management/commands/export_custom_venv.py
Normal file
48
awx/main/management/commands/export_custom_venv.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from awx.main.utils.common import get_custom_venv_pip_freeze, get_custom_venv_choices
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns the pip freeze from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'path',
|
||||
type=str,
|
||||
nargs=1,
|
||||
default='',
|
||||
help='run this with a path to a virtual environment as an argument to see the pip freeze data',
|
||||
)
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
if options.get('path'):
|
||||
path = options.get('path')
|
||||
all_venvs = get_custom_venv_choices()
|
||||
if path[0] in all_venvs:
|
||||
pip_data = get_custom_venv_pip_freeze(options.get('path')[0])
|
||||
if pip_data:
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Virtual environment contents:',
|
||||
pip_data,
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
'awx-manage custom_venv_associations /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print(pip_data)
|
||||
|
||||
else:
|
||||
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||
print('\n'.join(all_venvs))
|
||||
@@ -36,20 +36,20 @@ from awx.main.utils.pglock import advisory_lock
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
|
||||
LICENSE_EXPIRED_MESSAGE = '''\
|
||||
License expired.
|
||||
See http://www.ansible.com/renew for license extension information.'''
|
||||
Subscription expired.
|
||||
Contact us (https://www.redhat.com/contact) for subscription extension information.'''
|
||||
|
||||
LICENSE_NON_EXISTANT_MESSAGE = '''\
|
||||
No license.
|
||||
See http://www.ansible.com/renew for license information.'''
|
||||
No subscription.
|
||||
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||
|
||||
LICENSE_MESSAGE = '''\
|
||||
Number of licensed instances exceeded, would bring available instances to %(new_count)d, system is licensed for %(instance_count)d.
|
||||
See http://www.ansible.com/renew for license extension information.'''
|
||||
%(new_count)d instances have been automated, system is subscribed for %(instance_count)d.
|
||||
Contact us (https://www.redhat.com/contact) for upgrade information.'''
|
||||
|
||||
DEMO_LICENSE_MESSAGE = '''\
|
||||
Demo mode free license count exceeded, would bring available instances to %(new_count)d, demo mode allows %(instance_count)d.
|
||||
See http://www.ansible.com/renew for licensing information.'''
|
||||
Demo mode free subscription count exceeded. Current automated instances are %(new_count)d, demo mode allows %(instance_count)d.
|
||||
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||
|
||||
|
||||
def functioning_dir(path):
|
||||
@@ -66,13 +66,9 @@ class AnsibleInventoryLoader(object):
|
||||
/usr/bin/ansible/ansible-inventory -i hosts --list
|
||||
"""
|
||||
|
||||
def __init__(self, source, venv_path=None, verbosity=0):
|
||||
def __init__(self, source, verbosity=0):
|
||||
self.source = source
|
||||
self.verbosity = verbosity
|
||||
if venv_path:
|
||||
self.venv_path = venv_path
|
||||
else:
|
||||
self.venv_path = settings.ANSIBLE_VENV_PATH
|
||||
|
||||
def get_base_args(self):
|
||||
bargs = ['podman', 'run', '--user=root', '--quiet']
|
||||
@@ -131,7 +127,6 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--inventory-name', dest='inventory_name', type=str, default=None, metavar='n', help='name of inventory to sync')
|
||||
parser.add_argument('--inventory-id', dest='inventory_id', type=int, default=None, metavar='i', help='id of inventory to sync')
|
||||
parser.add_argument('--venv', dest='venv', type=str, default=None, help='absolute path to the AWX custom virtualenv to use')
|
||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
||||
@@ -761,29 +756,22 @@ class Command(BaseCommand):
|
||||
instance_count = license_info.get('instance_count', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
automated_count = license_info.get('automated_instances', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise PermissionDenied("License has expired!")
|
||||
raise PermissionDenied("Subscription has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
if self.inventory_source.source == 'tower' and any(f in self.inventory_source.source_path for f in TOWER_SOURCE_FILES):
|
||||
# only if this is the 2nd call to license check, we cannot compare before running plugin
|
||||
if hasattr(self, 'all_group'):
|
||||
self.remote_tower_license_compare(local_license_type)
|
||||
if free_instances < 0:
|
||||
d = {
|
||||
'new_count': new_count,
|
||||
'new_count': automated_count,
|
||||
'instance_count': instance_count,
|
||||
}
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
raise PermissionDenied('License count exceeded!')
|
||||
raise PermissionDenied('Subscription count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
@@ -824,7 +812,6 @@ class Command(BaseCommand):
|
||||
raise CommandError('--source is required')
|
||||
verbosity = int(options.get('verbosity', 1))
|
||||
self.set_logging_level(verbosity)
|
||||
venv_path = options.get('venv', None)
|
||||
|
||||
# Load inventory object based on name or ID.
|
||||
if inventory_id:
|
||||
@@ -854,7 +841,7 @@ class Command(BaseCommand):
|
||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(source=source, venv_path=venv_path, verbosity=verbosity).load()
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
|
||||
|
||||
43
awx/main/management/commands/list_custom_venvs.py
Normal file
43
awx/main/management/commands/list_custom_venvs.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright (c) 2021 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
import sys
|
||||
|
||||
from awx.main.utils.common import get_custom_venv_choices
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Returns a list of custom venv paths from the path passed in the argument"""
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
venvs = get_custom_venv_choices()
|
||||
if venvs:
|
||||
if not options.get('q'):
|
||||
msg = [
|
||||
'# Discovered Virtual Environments:',
|
||||
'\n'.join(venvs),
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
'awx-manage custom_venv_associations /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
'',
|
||||
]
|
||||
print('\n'.join(msg))
|
||||
else:
|
||||
print('\n'.join(venvs), '\n')
|
||||
else:
|
||||
msg = ["No custom virtual environments detected in:", settings.BASE_VENV_PATH]
|
||||
|
||||
for path in settings.CUSTOM_VENV_PATHS:
|
||||
msg.append(path)
|
||||
|
||||
print('\n'.join(msg), file=sys.stderr)
|
||||
@@ -0,0 +1,135 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
import sys
|
||||
from distutils.util import strtobool
|
||||
from argparse import RawTextHelpFormatter
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.models import CredentialType, Credential, ExecutionEnvironment
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Create default execution environments, intended for new installs"""
|
||||
|
||||
help = """
|
||||
Creates or updates the execution environments set in settings.DEFAULT_EXECUTION_ENVIRONMENTS if they are not yet created.
|
||||
Optionally provide authentication details to create or update a container registry credential that will be set on all of these default execution environments.
|
||||
Note that settings.DEFAULT_EXECUTION_ENVIRONMENTS is and ordered list, the first in the list will be used for project updates.
|
||||
"""
|
||||
|
||||
# Preserves newlines in the help text
|
||||
def create_parser(self, *args, **kwargs):
|
||||
parser = super(Command, self).create_parser(*args, **kwargs)
|
||||
parser.formatter_class = RawTextHelpFormatter
|
||||
return parser
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--registry-url",
|
||||
type=str,
|
||||
default="",
|
||||
help="URL for the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--registry-username",
|
||||
type=str,
|
||||
default="",
|
||||
help="username for the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--registry-password",
|
||||
type=str,
|
||||
default="",
|
||||
help="Password or token for CLI authentication with the container registry",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verify-ssl",
|
||||
type=lambda x: bool(strtobool(str(x))),
|
||||
default=True,
|
||||
help="Verify SSL when authenticating with the container registry",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
changed = False
|
||||
registry_cred = None
|
||||
|
||||
if options.get("registry_username"):
|
||||
if not options.get("registry_password"):
|
||||
sys.stderr.write("Registry password must be provided when providing registry username\n")
|
||||
sys.exit(1)
|
||||
|
||||
if not options.get("registry_url"):
|
||||
sys.stderr.write("Registry url must be provided when providing registry username\n")
|
||||
sys.exit(1)
|
||||
|
||||
registry_cred_type = CredentialType.objects.filter(kind="registry")
|
||||
if not registry_cred_type.exists():
|
||||
sys.stderr.write("No registry credential type found")
|
||||
sys.exit(1)
|
||||
|
||||
inputs = {
|
||||
"host": options.get("registry_url"),
|
||||
"password": options.get("registry_password"),
|
||||
"username": options.get("registry_username"),
|
||||
"verify_ssl": options.get("verify_ssl"),
|
||||
}
|
||||
registry_cred, cred_created = Credential.objects.get_or_create(
|
||||
name="Default Execution Environment Registry Credential",
|
||||
managed=True,
|
||||
credential_type=registry_cred_type[0],
|
||||
defaults={'inputs': inputs},
|
||||
)
|
||||
|
||||
if cred_created:
|
||||
changed = True
|
||||
print("'Default Execution Environment Credential' registered.")
|
||||
|
||||
for key, value in inputs.items():
|
||||
if not registry_cred.inputs.get(key) or registry_cred.get_input(key) != value:
|
||||
registry_cred.inputs[key] = value
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
registry_cred.save()
|
||||
print("'Default Execution Environment Credential' updated.")
|
||||
|
||||
# Create default globally available Execution Environments
|
||||
for ee in reversed(settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS):
|
||||
_this_ee, ee_created = ExecutionEnvironment.objects.get_or_create(name=ee["name"], defaults={'image': ee["image"], 'credential': registry_cred})
|
||||
if ee_created:
|
||||
changed = True
|
||||
print(f"'{ee['name']}' Default Execution Environment registered.")
|
||||
else:
|
||||
if _this_ee.image != ee["image"]:
|
||||
_this_ee.image = ee["image"]
|
||||
changed = True
|
||||
if _this_ee.credential != registry_cred:
|
||||
_this_ee.credential = registry_cred
|
||||
changed = True
|
||||
if changed:
|
||||
_this_ee.save()
|
||||
print(f"'{ee['name']}' Default Execution Environment updated.")
|
||||
|
||||
# Create the control plane execution environment that is used for project updates and system jobs
|
||||
ee = settings.CONTROL_PLANE_EXECUTION_ENVIRONMENT
|
||||
_this_ee, cp_created = ExecutionEnvironment.objects.get_or_create(
|
||||
name="Control Plane Execution Environment", defaults={'image': ee, 'managed': True, 'credential': registry_cred}
|
||||
)
|
||||
if cp_created:
|
||||
changed = True
|
||||
print("Control Plane Execution Environment registered.")
|
||||
else:
|
||||
if _this_ee.image != ee:
|
||||
_this_ee.image = ee
|
||||
changed = True
|
||||
if _this_ee.credential != registry_cred:
|
||||
_this_ee.credential = registry_cred
|
||||
changed = True
|
||||
if changed:
|
||||
_this_ee.save()
|
||||
|
||||
if changed:
|
||||
print("(changed: True)")
|
||||
else:
|
||||
print("(changed: False)")
|
||||
@@ -11,11 +11,16 @@ from django.conf import settings
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager', 'DeferJobCreatedManager']
|
||||
|
||||
logger = logging.getLogger('awx.main.managers')
|
||||
|
||||
|
||||
class DeferJobCreatedManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super(DeferJobCreatedManager, self).get_queryset().defer('job_created')
|
||||
|
||||
|
||||
class HostManager(models.Manager):
|
||||
"""Custom manager class for Hosts model."""
|
||||
|
||||
@@ -28,7 +33,7 @@ class HostManager(models.Manager):
|
||||
- Only consider results that are unique
|
||||
- Return the count of this query
|
||||
"""
|
||||
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
|
||||
return self.order_by().exclude(inventory_sources__source='controller').values('name').distinct().count()
|
||||
|
||||
def org_active_count(self, org_id):
|
||||
"""Return count of active, unique hosts used by an organization.
|
||||
@@ -40,7 +45,7 @@ class HostManager(models.Manager):
|
||||
- Only consider results that are unique
|
||||
- Return the count of this query
|
||||
"""
|
||||
return self.order_by().exclude(inventory_sources__source='tower').filter(inventory__organization=org_id).values('name').distinct().count()
|
||||
return self.order_by().exclude(inventory_sources__source='controller').filter(inventory__organization=org_id).values('name').distinct().count()
|
||||
|
||||
def get_queryset(self):
|
||||
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
||||
@@ -141,8 +146,8 @@ class InstanceManager(models.Manager):
|
||||
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
is_container_group = settings.IS_K8S
|
||||
RegisterQueue('tower', 100, 0, [], is_container_group).register()
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
@@ -151,10 +156,6 @@ class InstanceManager(models.Manager):
|
||||
"""Return count of active Tower nodes for licensing."""
|
||||
return self.all().count()
|
||||
|
||||
def my_role(self):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
return "tower"
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
@@ -46,7 +46,7 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
||||
if settings.AWX_REQUEST_PROFILE:
|
||||
response['X-API-Profile-File'] = self.prof.stop()
|
||||
perf_logger.info(
|
||||
perf_logger.debug(
|
||||
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"])),
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ from awx.main.utils.common import set_current_apps
|
||||
|
||||
def migrate_to_static_inputs(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -14,7 +14,7 @@ from awx.main.utils.common import set_current_apps
|
||||
|
||||
def setup_tower_managed_defaults(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -8,7 +8,7 @@ from awx.main.utils.common import set_current_apps
|
||||
|
||||
def setup_tower_managed_defaults(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -9,7 +9,7 @@ from awx.main.utils.common import set_current_apps
|
||||
|
||||
def create_new_credential_types(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -5,7 +5,7 @@ from awx.main.models import CredentialType
|
||||
|
||||
|
||||
def update_cyberark_aim_name(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
aim_types = apps.get_model('main', 'CredentialType').objects.filter(namespace='aim').order_by('id')
|
||||
|
||||
if aim_types.count() == 2:
|
||||
|
||||
@@ -10,15 +10,6 @@ def migrate_event_data(apps, schema_editor):
|
||||
# that have a bigint primary key (because the old usage of an integer
|
||||
# numeric isn't enough, as its range is about 2.1B, see:
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
|
||||
# unfortunately, we can't do this with a simple ALTER TABLE, because
|
||||
# for tables with hundreds of millions or billions of rows, the ALTER TABLE
|
||||
# can take *hours* on modest hardware.
|
||||
#
|
||||
# the approach in this migration means that post-migration, event data will
|
||||
# *not* immediately show up, but will be repopulated over time progressively
|
||||
# the trade-off here is not having to wait hours for the full data migration
|
||||
# before you can start and run AWX again (including new playbook runs)
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# rename the current event table
|
||||
@@ -35,30 +26,7 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
||||
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
||||
|
||||
# replace the BTREE index on main_jobevent.job_id with
|
||||
# a BRIN index to drastically improve per-UJ lookup performance
|
||||
# see: https://info.crunchydata.com/blog/postgresql-brin-indexes-big-data-performance-with-minimal-storage
|
||||
if tblname == 'main_jobevent':
|
||||
cursor.execute("SELECT indexname FROM pg_indexes WHERE tablename='main_jobevent' AND indexdef LIKE '%USING btree (job_id)';")
|
||||
old_index = cursor.fetchone()[0]
|
||||
cursor.execute(f'DROP INDEX {old_index}')
|
||||
cursor.execute('CREATE INDEX main_jobevent_job_id_brin_idx ON main_jobevent USING brin (job_id);')
|
||||
|
||||
# remove all of the indexes and constraints from the old table
|
||||
# (they just slow down the data migration)
|
||||
cursor.execute(f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename='_old_{tblname}' AND indexname != '{tblname}_pkey';")
|
||||
indexes = cursor.fetchall()
|
||||
|
||||
cursor.execute(
|
||||
f"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = '_old_{tblname}'::regclass AND conname != '{tblname}_pkey';"
|
||||
)
|
||||
constraints = cursor.fetchall()
|
||||
|
||||
for indexname, indexdef in indexes:
|
||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
||||
for conname, contype, condef in constraints:
|
||||
cursor.execute(f'ALTER TABLE _old_{tblname} DROP CONSTRAINT IF EXISTS {conname}')
|
||||
cursor.execute(f'DROP TABLE _old_{tblname};')
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
|
||||
@@ -6,7 +6,7 @@ from awx.main.utils.common import set_current_apps
|
||||
|
||||
def setup_tower_managed_defaults(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
@@ -1,15 +1,33 @@
|
||||
# Generated by Django 2.2.16 on 2021-04-21 15:02
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.db import migrations, models, transaction
|
||||
|
||||
|
||||
def remove_iso_instances(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
with transaction.atomic():
|
||||
Instance.objects.filter(rampart_groups__controller__isnull=False).delete()
|
||||
|
||||
|
||||
def remove_iso_groups(apps, schema_editor):
|
||||
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||
UnifiedJob = apps.get_model('main', 'UnifiedJob')
|
||||
with transaction.atomic():
|
||||
for ig in InstanceGroup.objects.filter(controller__isnull=False):
|
||||
UnifiedJob.objects.filter(instance_group=ig).update(instance_group=None)
|
||||
ig.delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
('main', '0138_custom_inventory_scripts_removal'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(remove_iso_instances),
|
||||
migrations.RunPython(remove_iso_groups),
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='last_isolated_check',
|
||||
|
||||
268
awx/main/migrations/0144_event_partitions.py
Normal file
268
awx/main/migrations/0144_event_partitions.py
Normal file
@@ -0,0 +1,268 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
#
|
||||
# the goal of this function is -- for each job event table -- to:
|
||||
# - create a parent partition table
|
||||
# - .. with a single partition
|
||||
# - .. that includes all existing job events
|
||||
#
|
||||
# the new main_jobevent_parent table should have a new
|
||||
# denormalized column, job_created, this is used as a
|
||||
# basis for partitioning job event rows
|
||||
#
|
||||
# The initial partion will be a unique case. After
|
||||
# the migration is completed, awx should create
|
||||
# new partitions on an hourly basis, as needed.
|
||||
# All events for a given job should be placed in
|
||||
# a partition based on the job's _created time_.
|
||||
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# mark existing table as _unpartitioned_*
|
||||
# we will drop this table after its data
|
||||
# has been moved over
|
||||
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _unpartitioned_{tblname}')
|
||||
|
||||
# create a copy of the table that we will use as a reference for schema
|
||||
# otherwise, the schema changes we would make on the old jobevents table
|
||||
# (namely, dropping the primary key constraint) would cause the migration
|
||||
# to suffer a serious performance degradation
|
||||
cursor.execute(f'CREATE TABLE tmp_{tblname} ' f'(LIKE _unpartitioned_{tblname} INCLUDING ALL)')
|
||||
|
||||
# drop primary key constraint; in a partioned table
|
||||
# constraints must include the partition key itself
|
||||
# TODO: do more generic search for pkey constraints
|
||||
# instead of hardcoding this one that applies to main_jobevent
|
||||
cursor.execute(f'ALTER TABLE tmp_{tblname} DROP CONSTRAINT tmp_{tblname}_pkey')
|
||||
|
||||
# create parent table
|
||||
cursor.execute(
|
||||
f'CREATE TABLE {tblname} '
|
||||
f'(LIKE tmp_{tblname} INCLUDING ALL, job_created TIMESTAMP WITH TIME ZONE NOT NULL) '
|
||||
f'PARTITION BY RANGE(job_created);'
|
||||
)
|
||||
|
||||
cursor.execute(f'DROP TABLE tmp_{tblname}')
|
||||
|
||||
# recreate primary key constraint
|
||||
cursor.execute(f'ALTER TABLE ONLY {tblname} ' f'ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
"""
|
||||
Big int migration introduced the brin index main_jobevent_job_id_brin_idx index. For upgardes, we drop the index, new installs do nothing.
|
||||
I have seen the second index in my dev environment. I can not find where in the code it was created. Drop it just in case
|
||||
"""
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_brin_idx')
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
# going to accomplish the migration with some custom raw SQL
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0143_hostmetric'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='projectupdateevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='adhoccommandevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
FakeAddField(
|
||||
model_name='systemjobevent',
|
||||
name='job_created',
|
||||
field=models.DateTimeField(null=True, editable=False),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedAdHocCommandEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.adhoccommandevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedInventoryUpdateEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.inventoryupdateevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedJobEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.jobevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedProjectUpdateEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.projectupdateevent',),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='UnpartitionedSystemJobEvent',
|
||||
fields=[],
|
||||
options={
|
||||
'proxy': True,
|
||||
'indexes': [],
|
||||
'constraints': [],
|
||||
},
|
||||
bases=('main.systemjobevent',),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='ad_hoc_command',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='ad_hoc_command_events', to='main.AdHocCommand'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='inventory_update',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='inventory_update_events', to='main.InventoryUpdate'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='job',
|
||||
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=models.deletion.DO_NOTHING, related_name='job_events', to='main.Job'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='project_update',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='project_update_events', to='main.ProjectUpdate'
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='created',
|
||||
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='modified',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobevent',
|
||||
name='system_job',
|
||||
field=models.ForeignKey(
|
||||
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='system_job_events', to='main.SystemJob'
|
||||
),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='adhoccommandevent',
|
||||
index_together={
|
||||
('ad_hoc_command', 'job_created', 'event'),
|
||||
('ad_hoc_command', 'job_created', 'counter'),
|
||||
('ad_hoc_command', 'job_created', 'uuid'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='inventoryupdateevent',
|
||||
index_together={('inventory_update', 'job_created', 'counter'), ('inventory_update', 'job_created', 'uuid')},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='jobevent',
|
||||
index_together={
|
||||
('job', 'job_created', 'counter'),
|
||||
('job', 'job_created', 'uuid'),
|
||||
('job', 'job_created', 'event'),
|
||||
('job', 'job_created', 'parent_uuid'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='projectupdateevent',
|
||||
index_together={
|
||||
('project_update', 'job_created', 'uuid'),
|
||||
('project_update', 'job_created', 'event'),
|
||||
('project_update', 'job_created', 'counter'),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='systemjobevent',
|
||||
index_together={('system_job', 'job_created', 'uuid'), ('system_job', 'job_created', 'counter')},
|
||||
),
|
||||
]
|
||||
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-07 19:36
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
ExecutionEnvironment = apps.get_model('main', 'ExecutionEnvironment')
|
||||
for row in ExecutionEnvironment.objects.filter(managed_by_tower=True):
|
||||
row.managed_by_tower = False
|
||||
row.save(update_fields=['managed_by_tower'])
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0144_event_partitions'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(forwards),
|
||||
]
|
||||
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-08 18:59
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0145_deregister_managed_ee_objs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='insights_system_id',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('tower', 'Ansible Tower'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-15 02:49
|
||||
|
||||
import awx.main.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0146_add_insights_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='executionenvironment',
|
||||
name='image',
|
||||
field=models.CharField(
|
||||
help_text='The full image location, including the container registry, image name, and version tag.',
|
||||
max_length=1024,
|
||||
validators=[awx.main.validators.validate_container_image_name],
|
||||
verbose_name='image location',
|
||||
),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/0148_unifiedjob_receptor_unit_id.py
Normal file
20
awx/main/migrations/0148_unifiedjob_receptor_unit_id.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-11 04:50
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0147_validate_ee_image_field'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='work_unit_id',
|
||||
field=models.CharField(
|
||||
blank=True, default=None, editable=False, help_text='The Receptor work unit ID associated with this job.', max_length=255, null=True
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-16 21:00
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0148_unifiedjob_receptor_unit_id'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='insights_credential',
|
||||
),
|
||||
]
|
||||
113
awx/main/migrations/0150_rename_inv_sources_inv_updates.py
Normal file
113
awx/main/migrations/0150_rename_inv_sources_inv_updates.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-17 13:12
|
||||
import logging
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.models.credential import ManagedCredentialType, CredentialType as ModernCredentialType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def forwards(apps, schema_editor):
|
||||
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
|
||||
r = InventoryUpdate.objects.filter(source='tower').update(source='controller')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} tower inventory updates to controller')
|
||||
InventorySource.objects.filter(source='tower').update(source='controller')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} tower inventory sources to controller')
|
||||
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
|
||||
tower_type = CredentialType.objects.filter(managed_by_tower=True, namespace='tower').first()
|
||||
if tower_type is not None:
|
||||
controller_type = CredentialType.objects.filter(managed_by_tower=True, namespace='controller', kind='cloud').first()
|
||||
if controller_type:
|
||||
# this gets created by prior migrations in upgrade scenarios
|
||||
controller_type.delete()
|
||||
|
||||
registry_type = ManagedCredentialType.registry.get('controller')
|
||||
if not registry_type:
|
||||
raise RuntimeError('Excpected to find controller credential, this may need to be edited in the future!')
|
||||
logger.warn('Renaming the Ansible Tower credential type for existing install')
|
||||
tower_type.name = registry_type.name # sensitive to translations
|
||||
tower_type.namespace = 'controller' # if not done, will error setup_tower_managed_defaults
|
||||
tower_type.save(update_fields=['name', 'namespace'])
|
||||
|
||||
ModernCredentialType.setup_tower_managed_defaults(apps)
|
||||
|
||||
|
||||
def backwards(apps, schema_editor):
|
||||
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
|
||||
r = InventoryUpdate.objects.filter(source='controller').update(source='tower')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} controller inventory updates to tower')
|
||||
r = InventorySource.objects.filter(source='controller').update(source='tower')
|
||||
if r:
|
||||
logger.warn(f'Renamed {r} controller inventory sources to tower')
|
||||
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
|
||||
tower_type = CredentialType.objects.filter(managed_by_tower=True, namespace='controller', kind='cloud').first()
|
||||
if tower_type is not None and not CredentialType.objects.filter(managed_by_tower=True, namespace='tower').exists():
|
||||
logger.info('Renaming the controller credential type back')
|
||||
tower_type.namespace = 'tower'
|
||||
tower_type.name = 'Ansible Tower'
|
||||
tower_type.save(update_fields=['namespace', 'name'])
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0149_remove_inventory_insights_credential'),
|
||||
]
|
||||
operations = [
|
||||
migrations.RunPython(migrations.RunPython.noop, backwards),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.RunPython(forwards, migrations.RunPython.noop),
|
||||
]
|
||||
28
awx/main/migrations/0151_rename_managed_by_tower.py
Normal file
28
awx/main/migrations/0151_rename_managed_by_tower.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# Generated by Django 2.2.16 on 2021-06-17 18:32
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0150_rename_inv_sources_inv_updates'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RenameField(
|
||||
model_name='credential',
|
||||
old_name='managed_by_tower',
|
||||
new_name='managed',
|
||||
),
|
||||
migrations.RenameField(
|
||||
model_name='credentialtype',
|
||||
old_name='managed_by_tower',
|
||||
new_name='managed',
|
||||
),
|
||||
migrations.RenameField(
|
||||
model_name='executionenvironment',
|
||||
old_name='managed_by_tower',
|
||||
new_name='managed',
|
||||
),
|
||||
]
|
||||
@@ -19,7 +19,7 @@ def migrate_galaxy_settings(apps, schema_editor):
|
||||
# nothing to migrate
|
||||
return
|
||||
set_current_apps(apps)
|
||||
ModernCredentialType.setup_tower_managed_defaults()
|
||||
ModernCredentialType.setup_tower_managed_defaults(apps)
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
Credential = apps.get_model('main', 'Credential')
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
@@ -34,10 +34,21 @@ def migrate_galaxy_settings(apps, schema_editor):
|
||||
if public_galaxy_setting and public_galaxy_setting.value is False:
|
||||
# ...UNLESS this behavior was explicitly disabled via this setting
|
||||
public_galaxy_enabled = False
|
||||
|
||||
public_galaxy_credential = Credential(
|
||||
created=now(), modified=now(), name='Ansible Galaxy', managed_by_tower=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}
|
||||
)
|
||||
try:
|
||||
# Needed for old migrations
|
||||
public_galaxy_credential = Credential(
|
||||
created=now(),
|
||||
modified=now(),
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=galaxy_type,
|
||||
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||
)
|
||||
except:
|
||||
# Needed for new migrations, tests
|
||||
public_galaxy_credential = Credential(
|
||||
created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
|
||||
for org in Organization.objects.all():
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
# Django
|
||||
from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
@@ -36,6 +35,11 @@ from awx.main.models.events import ( # noqa
|
||||
JobEvent,
|
||||
ProjectUpdateEvent,
|
||||
SystemJobEvent,
|
||||
UnpartitionedAdHocCommandEvent,
|
||||
UnpartitionedInventoryUpdateEvent,
|
||||
UnpartitionedJobEvent,
|
||||
UnpartitionedProjectUpdateEvent,
|
||||
UnpartitionedSystemJobEvent,
|
||||
)
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
||||
from awx.main.models.schedules import Schedule # noqa
|
||||
@@ -92,27 +96,6 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def enforce_bigint_pk_migration():
|
||||
#
|
||||
# NOTE: this function is not actually in use anymore,
|
||||
# but has been intentionally kept for historical purposes,
|
||||
# and to serve as an illustration if we ever need to perform
|
||||
# bulk modification/migration of event data in the future.
|
||||
#
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
# look at all the event tables and verify that they have been fully migrated
|
||||
# from the *old* int primary key table to the replacement bigint table
|
||||
# if not, attempt to migrate them in the background
|
||||
#
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute('SELECT 1 FROM information_schema.tables WHERE table_name=%s', (f'_old_{tblname}',))
|
||||
if bool(cursor.rowcount):
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
|
||||
|
||||
def cleanup_created_modified_by(sender, **kwargs):
|
||||
# work around a bug in django-polymorphic that doesn't properly
|
||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||
|
||||
@@ -15,7 +15,7 @@ from django.core.exceptions import ValidationError
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.events import AdHocCommandEvent
|
||||
from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
|
||||
@@ -127,6 +127,8 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedAdHocCommandEvent
|
||||
return AdHocCommandEvent
|
||||
|
||||
@property
|
||||
@@ -215,9 +217,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
if not self.execution_environment_id:
|
||||
self.execution_environment = self.resolve_execution_environment()
|
||||
update_fields.append('execution_environment')
|
||||
super(AdHocCommand, self).save(*args, **kwargs)
|
||||
|
||||
@property
|
||||
|
||||
@@ -19,6 +19,7 @@ from django.utils.translation import ugettext_lazy as _, ugettext_noop
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.encoding import force_text
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
@@ -92,7 +93,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
on_delete=models.CASCADE,
|
||||
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
|
||||
)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
managed = models.BooleanField(default=False, editable=False)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
null=True,
|
||||
@@ -341,7 +342,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
)
|
||||
|
||||
kind = models.CharField(max_length=32, choices=KIND_CHOICES)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
managed = models.BooleanField(default=False, editable=False)
|
||||
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
|
||||
inputs = CredentialTypeInputField(
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||
@@ -355,7 +356,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
@classmethod
|
||||
def from_db(cls, db, field_names, values):
|
||||
instance = super(CredentialType, cls).from_db(db, field_names, values)
|
||||
if instance.managed_by_tower and instance.namespace:
|
||||
if instance.managed and instance.namespace:
|
||||
native = ManagedCredentialType.registry[instance.namespace]
|
||||
instance.inputs = native.inputs
|
||||
instance.injectors = native.injectors
|
||||
@@ -395,9 +396,13 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
return dict((k, functools.partial(v.create)) for k, v in ManagedCredentialType.registry.items())
|
||||
|
||||
@classmethod
|
||||
def setup_tower_managed_defaults(cls):
|
||||
def setup_tower_managed_defaults(cls, apps=None):
|
||||
if apps is not None:
|
||||
ct_class = apps.get_model('main', 'CredentialType')
|
||||
else:
|
||||
ct_class = CredentialType
|
||||
for default in ManagedCredentialType.registry.values():
|
||||
existing = CredentialType.objects.filter(name=default.name, kind=default.kind).first()
|
||||
existing = ct_class.objects.filter(name=default.name, kind=default.kind).first()
|
||||
if existing is not None:
|
||||
existing.namespace = default.namespace
|
||||
existing.inputs = {}
|
||||
@@ -405,7 +410,11 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
existing.save()
|
||||
continue
|
||||
logger.debug(_("adding %s credential type" % default.name))
|
||||
created = default.create()
|
||||
params = default.get_creation_params()
|
||||
if 'managed' not in [f.name for f in ct_class._meta.get_fields()]:
|
||||
params['managed_by_tower'] = params.pop('managed')
|
||||
params['created'] = params['modified'] = now() # CreatedModifiedModel service
|
||||
created = ct_class(**params)
|
||||
created.inputs = created.injectors = {}
|
||||
created.save()
|
||||
|
||||
@@ -439,7 +448,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
files)
|
||||
"""
|
||||
if not self.injectors:
|
||||
if self.managed_by_tower and credential.credential_type.namespace in dir(builtin_injectors):
|
||||
if self.managed and credential.credential_type.namespace in dir(builtin_injectors):
|
||||
injected_env = {}
|
||||
getattr(builtin_injectors, credential.credential_type.namespace)(credential, injected_env, private_data_dir)
|
||||
env.update(injected_env)
|
||||
@@ -556,16 +565,19 @@ class ManagedCredentialType(SimpleNamespace):
|
||||
)
|
||||
ManagedCredentialType.registry[namespace] = self
|
||||
|
||||
def create(self):
|
||||
return CredentialType(
|
||||
def get_creation_params(self):
|
||||
return dict(
|
||||
namespace=self.namespace,
|
||||
kind=self.kind,
|
||||
name=self.name,
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs=self.inputs,
|
||||
injectors=self.injectors,
|
||||
)
|
||||
|
||||
def create(self):
|
||||
return CredentialType(**self.get_creation_params())
|
||||
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='ssh',
|
||||
@@ -606,7 +618,7 @@ ManagedCredentialType(
|
||||
namespace='scm',
|
||||
kind='scm',
|
||||
name=ugettext_noop('Source Control'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||
@@ -621,7 +633,7 @@ ManagedCredentialType(
|
||||
namespace='vault',
|
||||
kind='vault',
|
||||
name=ugettext_noop('Vault'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'vault_password', 'label': ugettext_noop('Vault Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
|
||||
@@ -647,7 +659,7 @@ ManagedCredentialType(
|
||||
namespace='net',
|
||||
kind='net',
|
||||
name=ugettext_noop('Network'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||
@@ -687,7 +699,7 @@ ManagedCredentialType(
|
||||
namespace='aws',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Amazon Web Services'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'username', 'label': ugettext_noop('Access Key'), 'type': 'string'},
|
||||
@@ -718,7 +730,7 @@ ManagedCredentialType(
|
||||
namespace='openstack',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('OpenStack'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||
@@ -776,7 +788,7 @@ ManagedCredentialType(
|
||||
namespace='vmware',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('VMware vCenter'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -801,7 +813,7 @@ ManagedCredentialType(
|
||||
namespace='satellite6',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Red Hat Satellite 6'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -826,7 +838,7 @@ ManagedCredentialType(
|
||||
namespace='gce',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Google Compute Engine'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -864,7 +876,7 @@ ManagedCredentialType(
|
||||
namespace='azure_rm',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Microsoft Azure Resource Manager'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -903,7 +915,7 @@ ManagedCredentialType(
|
||||
namespace='github_token',
|
||||
kind='token',
|
||||
name=ugettext_noop('GitHub Personal Access Token'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -922,7 +934,7 @@ ManagedCredentialType(
|
||||
namespace='gitlab_token',
|
||||
kind='token',
|
||||
name=ugettext_noop('GitLab Personal Access Token'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
@@ -941,7 +953,7 @@ ManagedCredentialType(
|
||||
namespace='insights',
|
||||
kind='insights',
|
||||
name=ugettext_noop('Insights'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||
@@ -954,6 +966,10 @@ ManagedCredentialType(
|
||||
"scm_username": "{{username}}",
|
||||
"scm_password": "{{password}}",
|
||||
},
|
||||
'env': {
|
||||
'INSIGHTS_USER': '{{username}}',
|
||||
'INSIGHTS_PASSWORD': '{{password}}',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -961,7 +977,7 @@ ManagedCredentialType(
|
||||
namespace='rhv',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Red Hat Virtualization'),
|
||||
managed_by_tower=True,
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{'id': 'host', 'label': ugettext_noop('Host (Authentication URL)'), 'type': 'string', 'help_text': ugettext_noop('The host to authenticate with.')},
|
||||
@@ -1002,23 +1018,25 @@ ManagedCredentialType(
|
||||
)
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='tower',
|
||||
namespace='controller',
|
||||
kind='cloud',
|
||||
name=ugettext_noop('Ansible Tower'),
|
||||
managed_by_tower=True,
|
||||
name=ugettext_noop('Red Hat Ansible Automation Platform'),
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
'id': 'host',
|
||||
'label': ugettext_noop('Ansible Tower Hostname'),
|
||||
'label': ugettext_noop('Red Hat Ansible Automation Platform'),
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop('The Ansible Tower base URL to authenticate with.'),
|
||||
'help_text': ugettext_noop('Red Hat Ansible Automation Platform base URL to authenticate with.'),
|
||||
},
|
||||
{
|
||||
'id': 'username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop('The Ansible Tower user to authenticate as.' 'This should not be set if an OAuth token is being used.'),
|
||||
'help_text': ugettext_noop(
|
||||
'Red Hat Ansible Automation Platform username id to authenticate as.' 'This should not be set if an OAuth token is being used.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
@@ -1044,6 +1062,11 @@ ManagedCredentialType(
|
||||
'TOWER_PASSWORD': '{{password}}',
|
||||
'TOWER_VERIFY_SSL': '{{verify_ssl}}',
|
||||
'TOWER_OAUTH_TOKEN': '{{oauth_token}}',
|
||||
'CONTROLLER_HOST': '{{host}}',
|
||||
'CONTROLLER_USERNAME': '{{username}}',
|
||||
'CONTROLLER_PASSWORD': '{{password}}',
|
||||
'CONTROLLER_VERIFY_SSL': '{{verify_ssl}}',
|
||||
'CONTROLLER_OAUTH_TOKEN': '{{oauth_token}}',
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -15,7 +15,9 @@ from django.utils.encoding import force_text
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import consumers
|
||||
from awx.main.managers import DeferJobCreatedManager
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
|
||||
@@ -56,9 +58,6 @@ def create_host_status_counts(event_data):
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||
|
||||
|
||||
def emit_event_detail(event):
|
||||
if settings.UI_LIVE_UPDATES_ENABLED is False and event.event not in MINIMAL_EVENTS:
|
||||
return
|
||||
@@ -271,6 +270,10 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
)
|
||||
modified = models.DateTimeField(
|
||||
default=None,
|
||||
editable=False,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
@@ -365,14 +368,24 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = (
|
||||
job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
||||
job.get_event_queryset()
|
||||
.filter(changed=True)
|
||||
.exclude(parent_uuid=None)
|
||||
.only('parent_uuid')
|
||||
.values_list('parent_uuid', flat=True)
|
||||
.distinct()
|
||||
) # noqa
|
||||
failed = (
|
||||
job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
||||
job.get_event_queryset()
|
||||
.filter(failed=True)
|
||||
.exclude(parent_uuid=None)
|
||||
.only('parent_uuid')
|
||||
.values_list('parent_uuid', flat=True)
|
||||
.distinct()
|
||||
) # noqa
|
||||
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=changed).update(changed=True)
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=failed).update(failed=True)
|
||||
job.get_event_queryset().filter(uuid__in=changed).update(changed=True)
|
||||
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
@@ -423,6 +436,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
# same as above, for job_created
|
||||
# TODO: if this approach, identical to above, works, can convert to for loop
|
||||
try:
|
||||
if not isinstance(kwargs['job_created'], datetime.datetime):
|
||||
kwargs['job_created'] = parse_datetime(kwargs['job_created'])
|
||||
if not kwargs['job_created'].tzinfo:
|
||||
kwargs['job_created'] = kwargs['job_created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('job_created', None)
|
||||
|
||||
host_map = kwargs.pop('host_map', {})
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
@@ -430,6 +453,11 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
# shouldn't job_created _always_ be present?
|
||||
# if it's not, how could we save the event to the db?
|
||||
job_created = kwargs.pop('job_created', None)
|
||||
if job_created:
|
||||
setattr(event, 'job_created', job_created)
|
||||
setattr(event, 'host_map', host_map)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
@@ -444,25 +472,28 @@ class JobEvent(BasePlaybookEvent):
|
||||
An event/message logged from the callback when running a job.
|
||||
"""
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id']
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('job', 'event'),
|
||||
('job', 'uuid'),
|
||||
('job', 'start_line'),
|
||||
('job', 'end_line'),
|
||||
('job', 'parent_uuid'),
|
||||
('job', 'job_created', 'event'),
|
||||
('job', 'job_created', 'uuid'),
|
||||
('job', 'job_created', 'parent_uuid'),
|
||||
('job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
on_delete=models.CASCADE,
|
||||
null=True,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
@@ -482,6 +513,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -518,7 +550,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host)
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = self.host_map.get(host, None)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
@@ -561,33 +593,52 @@ class JobEvent(BasePlaybookEvent):
|
||||
return self.job.verbosity
|
||||
|
||||
|
||||
class UnpartitionedJobEvent(JobEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedJobEvent._meta.db_table = '_unpartitioned_' + JobEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id']
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
index_together = [
|
||||
('project_update', 'event'),
|
||||
('project_update', 'uuid'),
|
||||
('project_update', 'start_line'),
|
||||
('project_update', 'end_line'),
|
||||
('project_update', 'job_created', 'event'),
|
||||
('project_update', 'job_created', 'uuid'),
|
||||
('project_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
project_update = models.ForeignKey(
|
||||
'ProjectUpdate',
|
||||
related_name='project_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def host_name(self):
|
||||
return 'localhost'
|
||||
|
||||
|
||||
class UnpartitionedProjectUpdateEvent(ProjectUpdateEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedProjectUpdateEvent._meta.db_table = '_unpartitioned_' + ProjectUpdateEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class BaseCommandEvent(CreatedModifiedModel):
|
||||
"""
|
||||
An event/message logged from a command for each host.
|
||||
@@ -627,6 +678,16 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
default=0,
|
||||
editable=False,
|
||||
)
|
||||
created = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
)
|
||||
modified = models.DateTimeField(
|
||||
default=None,
|
||||
editable=False,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||
@@ -681,16 +742,17 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('ad_hoc_command', 'event'),
|
||||
('ad_hoc_command', 'uuid'),
|
||||
('ad_hoc_command', 'start_line'),
|
||||
('ad_hoc_command', 'end_line'),
|
||||
('ad_hoc_command', 'job_created', 'event'),
|
||||
('ad_hoc_command', 'job_created', 'uuid'),
|
||||
('ad_hoc_command', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
EVENT_TYPES = [
|
||||
@@ -737,8 +799,9 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
ad_hoc_command = models.ForeignKey(
|
||||
'AdHocCommand',
|
||||
related_name='ad_hoc_command_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
host = models.ForeignKey(
|
||||
'Host',
|
||||
@@ -753,6 +816,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -768,26 +832,37 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=self)))
|
||||
|
||||
|
||||
class UnpartitionedAdHocCommandEvent(AdHocCommandEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedAdHocCommandEvent._meta.db_table = '_unpartitioned_' + AdHocCommandEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('inventory_update', 'uuid'),
|
||||
('inventory_update', 'start_line'),
|
||||
('inventory_update', 'end_line'),
|
||||
('inventory_update', 'job_created', 'uuid'),
|
||||
('inventory_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
inventory_update = models.ForeignKey(
|
||||
'InventoryUpdate',
|
||||
related_name='inventory_update_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
@@ -802,26 +877,37 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedInventoryUpdateEvent(InventoryUpdateEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedInventoryUpdateEvent._meta.db_table = '_unpartitioned_' + InventoryUpdateEvent._meta.db_table # noqa
|
||||
|
||||
|
||||
class SystemJobEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id', 'job_created']
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
index_together = [
|
||||
('system_job', 'uuid'),
|
||||
('system_job', 'start_line'),
|
||||
('system_job', 'end_line'),
|
||||
('system_job', 'job_created', 'uuid'),
|
||||
('system_job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
system_job = models.ForeignKey(
|
||||
'SystemJob',
|
||||
related_name='system_job_events',
|
||||
on_delete=models.CASCADE,
|
||||
on_delete=models.DO_NOTHING,
|
||||
editable=False,
|
||||
db_index=False,
|
||||
)
|
||||
job_created = models.DateTimeField(null=True, editable=False)
|
||||
|
||||
@property
|
||||
def event(self):
|
||||
@@ -834,3 +920,11 @@ class SystemJobEvent(BaseCommandEvent):
|
||||
@property
|
||||
def changed(self):
|
||||
return False
|
||||
|
||||
|
||||
class UnpartitionedSystemJobEvent(SystemJobEvent):
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
||||
UnpartitionedSystemJobEvent._meta.db_table = '_unpartitioned_' + SystemJobEvent._meta.db_table # noqa
|
||||
|
||||
@@ -3,6 +3,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModel
|
||||
from awx.main.validators import validate_container_image_name
|
||||
|
||||
|
||||
__all__ = ['ExecutionEnvironment']
|
||||
@@ -31,8 +32,9 @@ class ExecutionEnvironment(CommonModel):
|
||||
max_length=1024,
|
||||
verbose_name=_('image location'),
|
||||
help_text=_("The full image location, including the container registry, image name, and version tag."),
|
||||
validators=[validate_container_image_name],
|
||||
)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
managed = models.BooleanField(default=False, editable=False)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
|
||||
@@ -130,12 +130,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
return self.modified < ref_time - timedelta(seconds=grace_period)
|
||||
|
||||
def refresh_capacity(self):
|
||||
if settings.IS_K8S:
|
||||
self.capacity = self.cpu = self.memory = self.cpu_capacity = self.mem_capacity = 0 # noqa
|
||||
self.version = awx_application_version
|
||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity'])
|
||||
return
|
||||
|
||||
cpu = get_cpu_capacity()
|
||||
mem = get_mem_capacity()
|
||||
if self.enabled:
|
||||
|
||||
@@ -35,7 +35,7 @@ from awx.main.fields import (
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -51,6 +51,7 @@ from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
@@ -164,15 +165,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'admin_role',
|
||||
]
|
||||
)
|
||||
insights_credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='insights_inventories',
|
||||
help_text=_('Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.'),
|
||||
on_delete=models.SET_NULL,
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
)
|
||||
pending_deletion = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -314,7 +306,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(), remote_tower_id=host.id)
|
||||
tower_dict = dict(
|
||||
remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id,
|
||||
remote_host_enabled=str(host.enabled).lower(),
|
||||
remote_host_id=host.id,
|
||||
)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
@@ -367,13 +364,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
group_pks = self.groups.values_list('pk', flat=True)
|
||||
return self.groups.exclude(parents__pk__in=group_pks).distinct()
|
||||
|
||||
def clean_insights_credential(self):
|
||||
if self.kind == 'smart' and self.insights_credential:
|
||||
raise ValidationError(_("Assignment not allowed for Smart Inventory"))
|
||||
if self.insights_credential and self.insights_credential.credential_type.kind != 'insights':
|
||||
raise ValidationError(_("Credential kind must be 'insights'."))
|
||||
return self.insights_credential
|
||||
|
||||
@transaction.atomic
|
||||
def schedule_deletion(self, user_id=None):
|
||||
from awx.main.tasks import delete_inventory
|
||||
@@ -503,13 +493,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
null=True,
|
||||
help_text=_('The date and time ansible_facts was last modified.'),
|
||||
)
|
||||
insights_system_id = models.TextField(
|
||||
blank=True,
|
||||
default=None,
|
||||
null=True,
|
||||
db_index=True,
|
||||
help_text=_('Red Hat Insights host unique identifier.'),
|
||||
)
|
||||
|
||||
objects = HostManager()
|
||||
|
||||
@@ -827,7 +810,8 @@ class InventorySourceOptions(BaseModel):
|
||||
('satellite6', _('Red Hat Satellite 6')),
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
('controller', _('Red Hat Ansible Automation Platform')),
|
||||
('insights', _('Red Hat Insights')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
@@ -1230,6 +1214,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return True
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
return 'inventory_source'
|
||||
|
||||
@@ -1265,6 +1253,8 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedInventoryUpdateEvent
|
||||
return InventoryUpdateEvent
|
||||
|
||||
@property
|
||||
@@ -1306,16 +1296,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
if self.inventory_source and self.inventory_source.custom_virtualenv:
|
||||
return self.inventory_source.custom_virtualenv
|
||||
if self.inventory_source and self.inventory_source.source_project:
|
||||
project = self.inventory_source.source_project
|
||||
if project and project.custom_virtualenv:
|
||||
return project.custom_virtualenv
|
||||
return settings.ANSIBLE_VENV_PATH
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res:
|
||||
@@ -1350,6 +1330,7 @@ class PluginFileInjector(object):
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
use_fqcn = False # plugin: name versus plugin: namespace.collection.name
|
||||
|
||||
# TODO: delete this method and update unit tests
|
||||
@classmethod
|
||||
@@ -1376,7 +1357,12 @@ class PluginFileInjector(object):
|
||||
Note that a plugin value of '' should still be overridden.
|
||||
'''
|
||||
if self.plugin_name is not None:
|
||||
source_vars['plugin'] = self.plugin_name
|
||||
if hasattr(self, 'downstream_namespace') and server_product_name() != 'AWX':
|
||||
source_vars['plugin'] = f'{self.downstream_namespace}.{self.downstream_collection}.{self.plugin_name}'
|
||||
elif self.use_fqcn:
|
||||
source_vars['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
source_vars['plugin'] = self.plugin_name
|
||||
return source_vars
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
@@ -1387,7 +1373,7 @@ class PluginFileInjector(object):
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
"""By default, we will apply the standard managed_by_tower injectors"""
|
||||
"""By default, we will apply the standard managed injectors"""
|
||||
injected_env = {}
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
# some sources may have no credential, specifically ec2
|
||||
@@ -1406,7 +1392,7 @@ class PluginFileInjector(object):
|
||||
args = []
|
||||
credential.credential_type.inject_credential(credential, injected_env, safe_env, args, private_data_dir)
|
||||
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
|
||||
# that means that managed_by_tower injectors must only inject detectable env keys
|
||||
# that means that managed injectors must only inject detectable env keys
|
||||
# enforcement of this is accomplished by tests
|
||||
return injected_env
|
||||
|
||||
@@ -1524,12 +1510,17 @@ class rhv(PluginFileInjector):
|
||||
initial_version = '2.9'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'rhv'
|
||||
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'satellite'
|
||||
use_fqcn = True
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# this assumes that this is merged
|
||||
@@ -1542,18 +1533,24 @@ class satellite6(PluginFileInjector):
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# this inventory plugin requires the fully qualified inventory plugin name
|
||||
ret['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return ret
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
class controller(PluginFileInjector):
|
||||
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
||||
base_injector = 'template'
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
downstream_namespace = 'ansible'
|
||||
downstream_collection = 'controller'
|
||||
|
||||
|
||||
class insights(PluginFileInjector):
|
||||
plugin_name = 'insights'
|
||||
base_injector = 'template'
|
||||
namespace = 'redhatinsights'
|
||||
collection = 'insights'
|
||||
downstream_namespace = 'redhat'
|
||||
downstream_collection = 'insights'
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
|
||||
@@ -37,7 +37,7 @@ from awx.main.models.base import (
|
||||
VERBOSITY_CHOICES,
|
||||
VarsDictProperty,
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
from awx.main.models.events import JobEvent, UnpartitionedJobEvent, UnpartitionedSystemJobEvent, SystemJobEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
@@ -600,20 +600,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_ui_url(self):
|
||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
||||
|
||||
@property
|
||||
def ansible_virtualenv_path(self):
|
||||
# the order here enforces precedence (it matters)
|
||||
for virtualenv in (
|
||||
self.job_template.custom_virtualenv if self.job_template else None,
|
||||
self.project.custom_virtualenv,
|
||||
self.organization.custom_virtualenv if self.organization else None,
|
||||
):
|
||||
if virtualenv:
|
||||
return virtualenv
|
||||
return settings.ANSIBLE_VENV_PATH
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedJobEvent
|
||||
return JobEvent
|
||||
|
||||
def copy_unified_job(self, **new_prompts):
|
||||
@@ -855,23 +845,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
||||
ansible_facts = ansible_facts.get('insights', {})
|
||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
||||
if ansible_local_system_id:
|
||||
print("Setting local {}".format(ansible_local_system_id))
|
||||
logger.debug(
|
||||
"Insights system_id {} found for host <{}, {}> in"
|
||||
" ansible local facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
||||
)
|
||||
host.insights_system_id = ansible_local_system_id
|
||||
elif ansible_facts_system_id:
|
||||
logger.debug(
|
||||
"Insights system_id {} found for host <{}, {}> in"
|
||||
" insights facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
||||
)
|
||||
host.insights_system_id = ansible_facts_system_id
|
||||
host.save()
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
@@ -1259,17 +1232,21 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedSystemJobEvent
|
||||
return SystemJobEvent
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 5
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
return self.global_instance_groups
|
||||
return self.control_plane_instance_group
|
||||
|
||||
'''
|
||||
JobNotificationMixin
|
||||
|
||||
@@ -464,15 +464,18 @@ class ExecutionEnvironmentMixin(models.Model):
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Return the execution environment that should be used when creating a new job.
|
||||
Return the execution environment that should be used when executing a job.
|
||||
"""
|
||||
if self.execution_environment is not None:
|
||||
return self.execution_environment
|
||||
template = getattr(self, 'unified_job_template', None)
|
||||
if template is not None and template.execution_environment is not None:
|
||||
return template.execution_environment
|
||||
if getattr(self, 'project_id', None) and self.project.default_environment is not None:
|
||||
return self.project.default_environment
|
||||
if getattr(self, 'organization', None) and self.organization.default_environment is not None:
|
||||
if getattr(self, 'organization_id', None) and self.organization.default_environment is not None:
|
||||
return self.organization.default_environment
|
||||
if getattr(self, 'inventory', None) and self.inventory.organization is not None:
|
||||
if getattr(self, 'inventory_id', None) and self.inventory.organization is not None:
|
||||
if self.inventory.organization.default_environment is not None:
|
||||
return self.inventory.organization.default_environment
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
def create_default_galaxy_credential(self):
|
||||
from awx.main.models import Credential
|
||||
|
||||
public_galaxy_credential = Credential.objects.filter(managed_by_tower=True, name='Ansible Galaxy').first()
|
||||
public_galaxy_credential = Credential.objects.filter(managed=True, name='Ansible Galaxy').first()
|
||||
if public_galaxy_credential not in self.galaxy_credentials.all():
|
||||
self.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
|
||||
from awx.main.models.events import ProjectUpdateEvent
|
||||
from awx.main.models.events import ProjectUpdateEvent, UnpartitionedProjectUpdateEvent
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
@@ -32,7 +32,7 @@ from awx.main.models.jobs import Job
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
|
||||
from awx.main.utils import update_scm_url, polymorphic
|
||||
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.execution_environments import get_control_plane_execution_environment
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
@@ -185,11 +185,11 @@ class ProjectOptions(models.Model):
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the default execution environment.
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
return get_default_execution_environment()
|
||||
return get_control_plane_execution_environment()
|
||||
|
||||
def get_project_path(self, check_if_exists=True):
|
||||
local_path = os.path.basename(self.local_path)
|
||||
@@ -553,14 +553,18 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
websocket_data.update(dict(project_id=self.project.id))
|
||||
return websocket_data
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def event_class(self):
|
||||
if self.has_unpartitioned_events:
|
||||
return UnpartitionedProjectUpdateEvent
|
||||
return ProjectUpdateEvent
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 0 if self.job_type == 'run' else 1
|
||||
|
||||
@property
|
||||
@@ -621,6 +625,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
organization_groups = []
|
||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||
selected_groups = template_groups + organization_groups
|
||||
if not any([not group.is_container_group for group in selected_groups]):
|
||||
selected_groups = selected_groups + list(self.control_plane_instance_group)
|
||||
if not selected_groups:
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
@@ -49,6 +49,7 @@ from awx.main.utils import (
|
||||
getattr_dne,
|
||||
polymorphic,
|
||||
schedule_task_manager,
|
||||
get_event_partition_epoch,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
@@ -366,8 +367,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
unified_job.execution_environment = self.resolve_execution_environment()
|
||||
|
||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
@@ -718,6 +717,9 @@ class UnifiedJob(
|
||||
editable=False,
|
||||
help_text=_("The version of Ansible Core installed in the execution environment."),
|
||||
)
|
||||
work_unit_id = models.CharField(
|
||||
max_length=255, blank=True, default=None, editable=False, null=True, help_text=_("The Receptor work unit ID associated with this job.")
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
RealClass = self.get_real_instance_class()
|
||||
@@ -737,6 +739,13 @@ class UnifiedJob(
|
||||
def _get_task_class(cls):
|
||||
raise NotImplementedError # Implement in subclasses.
|
||||
|
||||
@property
|
||||
def can_run_on_control_plane(self):
|
||||
if settings.IS_K8S:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return False
|
||||
@@ -992,8 +1001,18 @@ class UnifiedJob(
|
||||
'main_systemjob': 'system_job_id',
|
||||
}[tablename]
|
||||
|
||||
@property
|
||||
def has_unpartitioned_events(self):
|
||||
applied = get_event_partition_epoch()
|
||||
return applied and self.created and self.created < applied
|
||||
|
||||
def get_event_queryset(self):
|
||||
return self.event_class.objects.filter(**{self.event_parent_key: self.id})
|
||||
kwargs = {
|
||||
self.event_parent_key: self.id,
|
||||
}
|
||||
if not self.has_unpartitioned_events:
|
||||
kwargs['job_created'] = self.created
|
||||
return self.event_class.objects.filter(**kwargs)
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
@@ -1079,13 +1098,15 @@ class UnifiedJob(
|
||||
# .write() calls on the fly to maintain this interface
|
||||
_write = fd.write
|
||||
fd.write = lambda s: _write(smart_text(s))
|
||||
tbl = self._meta.db_table + 'event'
|
||||
created_by_cond = ''
|
||||
if self.has_unpartitioned_events:
|
||||
tbl = f'_unpartitioned_{tbl}'
|
||||
else:
|
||||
created_by_cond = f"job_created='{self.created.isoformat()}' AND "
|
||||
|
||||
cursor.copy_expert(
|
||||
"copy (select stdout from {} where {}={} and stdout != '' order by start_line) to stdout".format(
|
||||
self._meta.db_table + 'event', self.event_parent_key, self.id
|
||||
),
|
||||
fd,
|
||||
)
|
||||
sql = f"copy (select stdout from {tbl} where {created_by_cond}{self.event_parent_key}={self.id} and stdout != '' order by start_line) to stdout" # nosql
|
||||
cursor.copy_expert(sql, fd)
|
||||
|
||||
if hasattr(fd, 'name'):
|
||||
# If we're dealing with a physical file, use `sed` to clean
|
||||
@@ -1404,14 +1425,26 @@ class UnifiedJob(
|
||||
return []
|
||||
return list(self.unified_job_template.instance_groups.all())
|
||||
|
||||
@property
|
||||
def control_plane_instance_group(self):
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
control_plane_instance_group = InstanceGroup.objects.filter(name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
|
||||
return list(control_plane_instance_group)
|
||||
|
||||
@property
|
||||
def global_instance_groups(self):
|
||||
from awx.main.models.ha import InstanceGroup
|
||||
|
||||
default_instance_group = InstanceGroup.objects.filter(name='tower')
|
||||
if default_instance_group.exists():
|
||||
return [default_instance_group.first()]
|
||||
return []
|
||||
default_instance_group_names = [settings.DEFAULT_EXECUTION_QUEUE_NAME]
|
||||
|
||||
if not settings.IS_K8S:
|
||||
default_instance_group_names.append(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||
|
||||
default_instance_groups = InstanceGroup.objects.filter(name__in=default_instance_group_names)
|
||||
|
||||
return list(default_instance_groups)
|
||||
|
||||
def awx_meta_vars(self):
|
||||
"""
|
||||
|
||||
@@ -258,6 +258,10 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -591,6 +595,9 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
def _get_related_jobs(self):
|
||||
return WorkflowJob.objects.filter(workflow_job_template=self)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
return None # EEs are not meaningful for workflows
|
||||
|
||||
|
||||
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, WebhookMixin):
|
||||
class Meta:
|
||||
@@ -620,6 +627,10 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
def workflow_nodes(self):
|
||||
return self.workflow_job_nodes
|
||||
|
||||
@property
|
||||
def event_processing_finished(self):
|
||||
return True
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
# This is a workflow job which is a container for slice jobs
|
||||
|
||||
@@ -96,7 +96,7 @@ class PodManager(object):
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to create secret for container group {} because the needed service account roles are needed. Add get, create and delete roles for secret resources for your cluster credential.'.format(
|
||||
'Failed to create secret for container group {} because additional service account role rules are needed. Add get, create and delete role rules for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
@@ -113,7 +113,7 @@ class PodManager(object):
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to delete secret for container group {} because the needed service account roles are needed. Add create and delete roles for secret resources for your cluster credential.'.format(
|
||||
'Failed to delete secret for container group {} because additional service account role rules are needed. Add create and delete role rules for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
|
||||
@@ -35,6 +35,7 @@ from awx.main.models import (
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.utils.common import create_partition
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import decrypt_field
|
||||
@@ -301,6 +302,8 @@ class TaskManager:
|
||||
|
||||
def post_commit():
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
# Before task is dispatched, ensure that job_event partitions exist
|
||||
create_partition(task.event_class._meta.db_table, start=task.created)
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
@@ -471,6 +474,7 @@ class TaskManager:
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
|
||||
found_acceptable_queue = False
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
@@ -481,6 +485,7 @@ class TaskManager:
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if task.can_run_containerized and rampart_group.is_container_group:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
@@ -488,12 +493,12 @@ class TaskManager:
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
if not task.can_run_on_control_plane:
|
||||
logger.debug("Skipping group {}, task cannot run on control plane".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if (
|
||||
task.task_impact > 0
|
||||
and not rampart_group.is_container_group # project updates have a cost of zero
|
||||
and self.get_remaining_capacity(rampart_group.name) <= 0
|
||||
):
|
||||
if task.task_impact > 0 and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ from crum.signals import current_user_getter
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
InstanceGroup,
|
||||
@@ -623,6 +624,12 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
||||
approval.deny()
|
||||
|
||||
|
||||
@receiver(pre_delete, sender=ExecutionEnvironment)
|
||||
def remove_default_ee(sender, instance, **kwargs):
|
||||
if instance.id == getattr(settings.DEFAULT_EXECUTION_ENVIRONMENT, 'id', None):
|
||||
settings.DEFAULT_EXECUTION_ENVIRONMENT = None
|
||||
|
||||
|
||||
@receiver(post_save, sender=Session)
|
||||
def save_user_session_membership(sender, **kwargs):
|
||||
session = kwargs.get('instance', None)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
from collections import OrderedDict, namedtuple
|
||||
from collections import OrderedDict, namedtuple, deque
|
||||
import errno
|
||||
import functools
|
||||
import importlib
|
||||
@@ -32,7 +32,7 @@ import sys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -57,7 +57,7 @@ from receptorctl.socket_interface import ReceptorControl
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
@@ -87,7 +87,7 @@ from awx.main.exceptions import AwxTaskError, PostRunError
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (
|
||||
from awx.main.utils.common import (
|
||||
update_scm_url,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
@@ -97,8 +97,9 @@ from awx.main.utils import (
|
||||
deepmerge,
|
||||
parse_yaml_or_json,
|
||||
cleanup_new_process,
|
||||
create_partition,
|
||||
)
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec, CONTAINER_ROOT, to_container_path
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
@@ -472,6 +473,33 @@ def cluster_node_heartbeat():
|
||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
|
||||
This is also called the work Unit ID in receptor, and is used in various receptor commands,
|
||||
e.g. "work results qLL2JFNT"
|
||||
After an AWX job executes, the receptor work unit directory is cleaned up by
|
||||
issuing the work release command. In some cases the release process might fail, or
|
||||
if AWX crashes during a job's execution, the work release command is never issued to begin with.
|
||||
As such, this periodic task will obtain a list of all receptor work units, and find which ones
|
||||
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
|
||||
This task will call "work release" on each of these work units to clean up the files on disk.
|
||||
"""
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
logger.debug("Checking for unreleased receptor work units")
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||
|
||||
unit_ids = [id for id in receptor_work_list]
|
||||
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
||||
for job in jobs_with_unreleased_receptor_units:
|
||||
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
|
||||
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
@@ -682,48 +710,6 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def migrate_legacy_event_data(tblname):
|
||||
#
|
||||
# NOTE: this function is not actually in use anymore,
|
||||
# but has been intentionally kept for historical purposes,
|
||||
# and to serve as an illustration if we ever need to perform
|
||||
# bulk modification/migration of event data in the future.
|
||||
#
|
||||
if 'event' not in tblname:
|
||||
return
|
||||
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
return
|
||||
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
|
||||
|
||||
def _remaining():
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
|
||||
return cursor.fetchone()[0]
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
return None
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
total_rows = _remaining()
|
||||
while total_rows:
|
||||
with transaction.atomic():
|
||||
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
|
||||
last_insert_pk = cursor.fetchone()
|
||||
if last_insert_pk is None:
|
||||
# this means that the SELECT from the old table was
|
||||
# empty, and there was nothing to insert (so we're done)
|
||||
break
|
||||
last_insert_pk = last_insert_pk[0]
|
||||
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
|
||||
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
|
||||
|
||||
if _remaining() is None:
|
||||
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
|
||||
logger.warn(f'{tblname} primary key migration to bigint has finished')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
@@ -771,6 +757,10 @@ def with_path_cleanup(f):
|
||||
return _wrapped
|
||||
|
||||
|
||||
def get_receptor_ctl():
|
||||
return ReceptorControl('/var/run/receptor/receptor.sock')
|
||||
|
||||
|
||||
class BaseTask(object):
|
||||
model = None
|
||||
event_model = None
|
||||
@@ -781,6 +771,8 @@ class BaseTask(object):
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
self.guid = GuidMiddleware.get_guid()
|
||||
self.job_created = None
|
||||
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
@@ -1158,6 +1150,7 @@ class BaseTask(object):
|
||||
event_data.pop('parent_uuid', None)
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
event_data['job_created'] = self.job_created
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
@@ -1191,6 +1184,37 @@ class BaseTask(object):
|
||||
if 'event_data' in event_data:
|
||||
event_data['event_data']['guid'] = self.guid
|
||||
|
||||
# To prevent overwhelming the broadcast queue, skip some websocket messages
|
||||
if self.recent_event_timings:
|
||||
cpu_time = time.time()
|
||||
first_window_time = self.recent_event_timings[0]
|
||||
last_window_time = self.recent_event_timings[-1]
|
||||
|
||||
if event_data.get('event') in MINIMAL_EVENTS:
|
||||
should_emit = True # always send some types like playbook_on_stats
|
||||
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
|
||||
should_emit = False # exclude events with no output
|
||||
else:
|
||||
should_emit = any(
|
||||
[
|
||||
# if 30the most recent websocket message was sent over 1 second ago
|
||||
cpu_time - first_window_time > 1.0,
|
||||
# if the very last websocket message came in over 1/30 seconds ago
|
||||
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
|
||||
# if the queue is not yet full
|
||||
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
|
||||
]
|
||||
)
|
||||
|
||||
if should_emit:
|
||||
self.recent_event_timings.append(cpu_time)
|
||||
else:
|
||||
event_data.setdefault('event_data', {})
|
||||
event_data['skip_websocket_message'] = True
|
||||
|
||||
elif self.recent_event_timings.maxlen:
|
||||
self.recent_event_timings.append(time.time())
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -1244,11 +1268,17 @@ class BaseTask(object):
|
||||
for k, v in self.safe_env.items():
|
||||
if k in job_env:
|
||||
job_env[k] = v
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
from awx.main.signals import disable_activity_stream # Circular import
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||
|
||||
@with_path_cleanup
|
||||
def run(self, pk, **kwargs):
|
||||
@@ -1283,6 +1313,8 @@ class BaseTask(object):
|
||||
if self.instance.spawned_by_workflow:
|
||||
self.parent_workflow_job_id = self.instance.get_workflow_job().id
|
||||
|
||||
self.job_created = str(self.instance.created)
|
||||
|
||||
try:
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
@@ -1300,10 +1332,6 @@ class BaseTask(object):
|
||||
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
||||
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
||||
|
||||
# store a record of the venv used at runtime
|
||||
if hasattr(self.instance, 'custom_virtualenv'):
|
||||
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
@@ -1380,8 +1408,8 @@ class BaseTask(object):
|
||||
)
|
||||
else:
|
||||
receptor_job = AWXReceptorJob(self, params)
|
||||
self.unit_id = receptor_job.unit_id
|
||||
res = receptor_job.run()
|
||||
self.unit_id = receptor_job.unit_id
|
||||
|
||||
if not res:
|
||||
return
|
||||
@@ -1770,6 +1798,7 @@ class RunJob(BaseTask):
|
||||
if 'update_' not in sync_metafields['job_tags']:
|
||||
sync_metafields['scm_revision'] = job_revision
|
||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
||||
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
job = self.update_model(job.pk, project_update=local_project_sync)
|
||||
@@ -2049,17 +2078,24 @@ class RunProjectUpdate(BaseTask):
|
||||
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
|
||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
|
||||
continue
|
||||
|
||||
if settings.IS_K8S:
|
||||
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
|
||||
else:
|
||||
instance_group = project_update.instance_group
|
||||
|
||||
local_inv_update = inv_src.create_inventory_update(
|
||||
_eager_fields=dict(
|
||||
launch_type='scm',
|
||||
status='running',
|
||||
instance_group=project_update.instance_group,
|
||||
instance_group=instance_group,
|
||||
execution_node=project_update.execution_node,
|
||||
source_project_update=project_update,
|
||||
celery_task_id=project_update.celery_task_id,
|
||||
)
|
||||
)
|
||||
try:
|
||||
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
|
||||
inv_update_class().run(local_inv_update.id)
|
||||
except Exception:
|
||||
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
|
||||
@@ -2140,8 +2176,6 @@ class RunProjectUpdate(BaseTask):
|
||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||
os.mkdir(settings.PROJECTS_ROOT)
|
||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||
if not os.path.exists(project_path):
|
||||
os.makedirs(project_path) # used as container mount
|
||||
|
||||
self.acquire_lock(instance)
|
||||
|
||||
@@ -2154,6 +2188,9 @@ class RunProjectUpdate(BaseTask):
|
||||
else:
|
||||
self.original_branch = git_repo.active_branch
|
||||
|
||||
if not os.path.exists(project_path):
|
||||
os.makedirs(project_path) # used as container mount
|
||||
|
||||
stage_path = os.path.join(instance.get_cache_path(), 'stage')
|
||||
if os.path.exists(stage_path):
|
||||
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
|
||||
@@ -2391,6 +2428,12 @@ class RunInventoryUpdate(BaseTask):
|
||||
paths = [config_values[config_setting]] + paths
|
||||
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
|
||||
env[env_key] = os.pathsep.join(paths)
|
||||
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
|
||||
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
|
||||
else:
|
||||
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
|
||||
paths.append('/usr/share/automation-controller/collections')
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
|
||||
|
||||
return env
|
||||
|
||||
@@ -2883,7 +2926,7 @@ class TransmitterThread(threading.Thread):
|
||||
|
||||
|
||||
class AWXReceptorJob:
|
||||
def __init__(self, task=None, runner_params=None):
|
||||
def __init__(self, task, runner_params=None):
|
||||
self.task = task
|
||||
self.runner_params = runner_params
|
||||
self.unit_id = None
|
||||
@@ -2894,7 +2937,7 @@ class AWXReceptorJob:
|
||||
|
||||
def run(self):
|
||||
# We establish a connection to the Receptor socket
|
||||
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
|
||||
try:
|
||||
return self._run_internal(receptor_ctl)
|
||||
@@ -2916,6 +2959,7 @@ class AWXReceptorJob:
|
||||
# in the right side of our socketpair for reading.
|
||||
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
|
||||
self.unit_id = result['unitid']
|
||||
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
|
||||
|
||||
sockin.close()
|
||||
sockout.close()
|
||||
@@ -2960,7 +3004,8 @@ class AWXReceptorJob:
|
||||
if state_name == 'Succeeded':
|
||||
return res
|
||||
|
||||
raise RuntimeError(detail)
|
||||
if self.task.instance.result_traceback is None:
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
|
||||
@@ -3030,18 +3075,11 @@ class AWXReceptorJob:
|
||||
result = namedtuple('result', ['status', 'rc'])
|
||||
return result('canceled', 1)
|
||||
|
||||
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
|
||||
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
|
||||
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
@property
|
||||
def pod_definition(self):
|
||||
if self.task:
|
||||
ee = self.task.instance.resolve_execution_environment()
|
||||
else:
|
||||
ee = get_default_execution_environment()
|
||||
ee = self.task.instance.execution_environment
|
||||
|
||||
default_pod_spec = get_default_pod_spec()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
from unittest import mock
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.main.models import Credential
|
||||
from awx.main.models import Credential, UnifiedJob
|
||||
from awx.main.tests.factories import (
|
||||
create_organization,
|
||||
create_job_template,
|
||||
@@ -81,7 +81,7 @@ def instance_group_factory():
|
||||
|
||||
@pytest.fixture
|
||||
def default_instance_group(instance_factory, instance_group_factory):
|
||||
return create_instance_group("tower", instances=[create_instance("hostA")])
|
||||
return create_instance_group("default", instances=[create_instance("hostA")])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -149,3 +149,29 @@ def mock_external_credential_input_sources():
|
||||
# test it explicitly.
|
||||
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def mock_has_unpartitioned_events():
|
||||
# has_unpartitioned_events determines if there are any events still
|
||||
# left in the old, unpartitioned job events table. In order to work,
|
||||
# this method looks up when the partition migration occurred. When
|
||||
# Django's unit tests run, however, there will be no record of the migration.
|
||||
# We mock this out to circumvent the migration query.
|
||||
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def mock_get_event_queryset_no_job_created():
|
||||
"""
|
||||
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
|
||||
job_created field. That field does not actually exist in a non-partition scenario.
|
||||
"""
|
||||
|
||||
def event_qs(self):
|
||||
kwargs = {self.event_parent_key: self.id}
|
||||
return self.event_class.objects.filter(**kwargs)
|
||||
|
||||
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,14 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
with open(os.path.join(dir_path, 'insights_hosts.json')) as data_file:
|
||||
TEST_INSIGHTS_HOSTS = json.load(data_file)
|
||||
|
||||
with open(os.path.join(dir_path, 'insights.json')) as data_file:
|
||||
TEST_INSIGHTS_PLANS = json.load(data_file)
|
||||
|
||||
with open(os.path.join(dir_path, 'insights_remediations.json')) as data_file:
|
||||
TEST_INSIGHTS_REMEDIATIONS = json.load(data_file)['data']
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"total": 1,
|
||||
"count": 1,
|
||||
"page": 1,
|
||||
"per_page": 50,
|
||||
"results": [
|
||||
{
|
||||
"id": "11111111-1111-1111-1111-111111111111",
|
||||
"insights_id": "22222222-2222-2222-2222-222222222222",
|
||||
"updated": "2019-03-19T21:59:09.213151-04:00"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"id": "9197ba55-0abc-4028-9bbe-269e530f8bd5",
|
||||
"name": "Fix Critical CVEs",
|
||||
"created_by": {
|
||||
"username": "jharting@redhat.com",
|
||||
"first_name": "Jozef",
|
||||
"last_name": "Hartinger"
|
||||
},
|
||||
"created_at": "2018-12-05T08:19:36.641Z",
|
||||
"updated_by": {
|
||||
"username": "jharting@redhat.com",
|
||||
"first_name": "Jozef",
|
||||
"last_name": "Hartinger"
|
||||
},
|
||||
"updated_at": "2018-12-05T08:19:36.641Z",
|
||||
"issue_count": 0,
|
||||
"system_count": 0,
|
||||
"needs_reboot": true
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"count": 0,
|
||||
"total": 0
|
||||
},
|
||||
"links": {
|
||||
"first": null,
|
||||
"last": null,
|
||||
"next": null,
|
||||
"previous": null
|
||||
}
|
||||
}
|
||||
13
awx/main/tests/data/inventory/plugins/controller/env.json
Normal file
13
awx/main/tests/data/inventory/plugins/controller/env.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_OAUTH_TOKEN": "",
|
||||
"TOWER_VERIFY_SSL": "False",
|
||||
"CONTROLLER_HOST": "https://foo.invalid",
|
||||
"CONTROLLER_PASSWORD": "fooo",
|
||||
"CONTROLLER_USERNAME": "fooo",
|
||||
"CONTROLLER_OAUTH_TOKEN": "",
|
||||
"CONTROLLER_VERIFY_SSL": "False"
|
||||
}
|
||||
5
awx/main/tests/data/inventory/plugins/insights/env.json
Normal file
5
awx/main/tests/data/inventory/plugins/insights/env.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"INSIGHTS_USER": "fooo",
|
||||
"INSIGHTS_PASSWORD": "fooo"
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_OAUTH_TOKEN": "",
|
||||
"TOWER_VERIFY_SSL": "False"
|
||||
}
|
||||
@@ -36,7 +36,7 @@ def mk_instance(persisted=True, hostname='instance.example.org'):
|
||||
return Instance.objects.get_or_create(uuid=settings.SYSTEM_UUID, hostname=hostname)[0]
|
||||
|
||||
|
||||
def mk_instance_group(name='tower', instance=None, minimum=0, percentage=0):
|
||||
def mk_instance_group(name='default', instance=None, minimum=0, percentage=0):
|
||||
ig, status = InstanceGroup.objects.get_or_create(name=name, policy_instance_minimum=minimum, policy_instance_percentage=percentage)
|
||||
if instance is not None:
|
||||
if type(instance) == list:
|
||||
|
||||
@@ -16,6 +16,65 @@ def app_post_migration(sender, app_config, **kwargs):
|
||||
if 'result_stdout_text' not in cols:
|
||||
cur.execute('ALTER TABLE main_unifiedjob ADD COLUMN result_stdout_text TEXT')
|
||||
|
||||
# we also need to make sure that the `_unpartitioned_<event>` tables are present.
|
||||
# these tables represent old job event tables that were renamed / preserved during a
|
||||
# migration which introduces partitioned event tables
|
||||
# https://github.com/ansible/awx/issues/9039
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
table_entries = cur.execute(f'SELECT count(*) from sqlite_master WHERE tbl_name="_unpartitioned_{tblname}";').fetchone()[0]
|
||||
if table_entries > 0:
|
||||
continue
|
||||
if tblname == 'main_adhoccommandevent':
|
||||
unique_columns = """host_name character varying(1024) NOT NULL,
|
||||
event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
host_id integer,
|
||||
ad_hoc_command_id integer NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_inventoryupdateevent':
|
||||
unique_columns = "inventory_update_id integer NOT NULL"
|
||||
elif tblname == 'main_jobevent':
|
||||
unique_columns = """event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
host_name character varying(1024) NOT NULL,
|
||||
play character varying(1024) NOT NULL,
|
||||
role character varying(1024) NOT NULL,
|
||||
task character varying(1024) NOT NULL,
|
||||
host_id integer,
|
||||
job_id integer NOT NULL,
|
||||
playbook character varying(1024) NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_projectupdateevent':
|
||||
unique_columns = """event character varying(100) NOT NULL,
|
||||
failed boolean NOT NULL,
|
||||
changed boolean NOT NULL,
|
||||
playbook character varying(1024) NOT NULL,
|
||||
play character varying(1024) NOT NULL,
|
||||
role character varying(1024) NOT NULL,
|
||||
task character varying(1024) NOT NULL,
|
||||
project_update_id integer NOT NULL
|
||||
"""
|
||||
elif tblname == 'main_systemjobevent':
|
||||
unique_columns = "system_job_id integer NOT NULL"
|
||||
|
||||
cur.execute(
|
||||
f"""CREATE TABLE _unpartitioned_{tblname} (
|
||||
id bigint NOT NULL,
|
||||
created timestamp with time zone NOT NULL,
|
||||
modified timestamp with time zone NOT NULL,
|
||||
event_data text NOT NULL,
|
||||
counter integer NOT NULL,
|
||||
end_line integer NOT NULL,
|
||||
start_line integer NOT NULL,
|
||||
stdout text NOT NULL,
|
||||
uuid character varying(1024) NOT NULL,
|
||||
verbosity integer NOT NULL,
|
||||
{unique_columns});
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
post_migrate.connect(app_post_migration, sender=apps.get_app_config('main'))
|
||||
|
||||
@@ -12,7 +12,6 @@ def test_empty():
|
||||
"active_sessions": 0,
|
||||
"active_host_count": 0,
|
||||
"credential": 0,
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"host": 0,
|
||||
"inventory": 0,
|
||||
"inventories": {"normal": 0, "smart": 0},
|
||||
|
||||
@@ -21,7 +21,6 @@ EXPECTED_VALUES = {
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_sessions_total': 0.0,
|
||||
'awx_custom_virtualenvs_total': 0.0,
|
||||
'awx_running_jobs_total': 0.0,
|
||||
'awx_instance_capacity': 100.0,
|
||||
'awx_instance_consumed_capacity': 0.0,
|
||||
|
||||
@@ -5,7 +5,7 @@ import pytest
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
|
||||
from awx.main.models import AdHocCommand, Credential, CredentialType, Job, JobTemplate, Inventory, InventorySource, Project, WorkflowJobNode
|
||||
from awx.main.models import AdHocCommand, Credential, CredentialType, Job, JobTemplate, InventorySource, Project, WorkflowJobNode
|
||||
from awx.main.utils import decrypt_field
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
@@ -652,6 +652,31 @@ def test_satellite6_create_ok(post, organization, admin):
|
||||
assert decrypt_field(cred, 'password') == 'some_password'
|
||||
|
||||
|
||||
#
|
||||
# RH Insights Credentials
|
||||
#
|
||||
@pytest.mark.django_db
|
||||
def test_insights_create_ok(post, organization, admin):
|
||||
params = {
|
||||
'credential_type': 1,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {
|
||||
'username': 'some_username',
|
||||
'password': 'some_password',
|
||||
},
|
||||
}
|
||||
sat6 = CredentialType.defaults['insights']()
|
||||
sat6.save()
|
||||
params['organization'] = organization.id
|
||||
response = post(reverse('api:credential_list'), params, admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
assert Credential.objects.count() == 1
|
||||
cred = Credential.objects.all()[:1].get()
|
||||
assert cred.inputs['username'] == 'some_username'
|
||||
assert decrypt_field(cred, 'password') == 'some_password'
|
||||
|
||||
|
||||
#
|
||||
# AWS Credentials
|
||||
#
|
||||
@@ -832,7 +857,6 @@ def test_field_removal(put, organization, admin, credentialtype_ssh):
|
||||
'relation, related_obj',
|
||||
[
|
||||
['ad_hoc_commands', AdHocCommand()],
|
||||
['insights_inventories', Inventory()],
|
||||
['unifiedjobs', Job()],
|
||||
['unifiedjobtemplates', JobTemplate()],
|
||||
['unifiedjobtemplates', InventorySource(source='ec2')],
|
||||
|
||||
@@ -75,7 +75,7 @@ def test_update_as_unauthorized_xfail(patch, delete):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_update_managed_by_tower_xfail(patch, delete, admin):
|
||||
def test_update_managed_xfail(patch, delete, admin):
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
ssh.save()
|
||||
url = reverse('api:credential_type_detail', kwargs={'pk': ssh.pk})
|
||||
@@ -161,19 +161,19 @@ def test_create_as_admin(get, post, admin):
|
||||
assert response.data['results'][0]['name'] == 'Custom Credential Type'
|
||||
assert response.data['results'][0]['inputs'] == {}
|
||||
assert response.data['results'][0]['injectors'] == {}
|
||||
assert response.data['results'][0]['managed_by_tower'] is False
|
||||
assert response.data['results'][0]['managed'] is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_managed_by_tower_readonly(get, post, admin):
|
||||
def test_create_managed_readonly(get, post, admin):
|
||||
response = post(
|
||||
reverse('api:credential_type_list'), {'kind': 'cloud', 'name': 'Custom Credential Type', 'inputs': {}, 'injectors': {}, 'managed_by_tower': True}, admin
|
||||
reverse('api:credential_type_list'), {'kind': 'cloud', 'name': 'Custom Credential Type', 'inputs': {}, 'injectors': {}, 'managed': True}, admin
|
||||
)
|
||||
assert response.status_code == 201
|
||||
|
||||
response = get(reverse('api:credential_type_list'), admin)
|
||||
assert response.data['count'] == 1
|
||||
assert response.data['results'][0]['managed_by_tower'] is False
|
||||
assert response.data['results'][0]['managed'] is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -16,7 +16,7 @@ def test_job_events_sublist_truncation(get, organization_factory, job_template_f
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization, inventory='test_inv', project='test_proj').job_template
|
||||
job = jt.create_unified_job()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
JobEvent.create_from_data(job_id=job.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025, job_created=job.created).save()
|
||||
|
||||
url = reverse('api:job_job_events_list', kwargs={'pk': job.pk})
|
||||
if not truncate:
|
||||
@@ -38,7 +38,7 @@ def test_ad_hoc_events_sublist_truncation(get, organization_factory, job_templat
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
adhoc = AdHocCommand()
|
||||
adhoc.save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=adhoc.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025).save()
|
||||
AdHocCommandEvent.create_from_data(ad_hoc_command_id=adhoc.pk, uuid='abc123', event='runner_on_start', stdout='a' * 1025, job_created=adhoc.created).save()
|
||||
|
||||
url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': adhoc.pk})
|
||||
if not truncate:
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestHostInsights:
|
||||
def test_insights_bad_host(self, get, hosts, user, mocker):
|
||||
mocker.patch.object(requests.Session, 'get')
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == 'This host is not recognized as an Insights host.'
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_insights_host_missing_from_insights(self, get, hosts, insights_credential, user, mocker):
|
||||
class Response:
|
||||
status_code = 200
|
||||
content = "{'results': []}"
|
||||
|
||||
def json(self):
|
||||
return {'results': []}
|
||||
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response())
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == ('Could not translate Insights system ID 123e4567-e89b-12d3-a456-426655440000' ' into an Insights platform ID.')
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_insights_no_credential(self, get, hosts, user, mocker):
|
||||
mocker.patch.object(requests.Session, 'get')
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == 'The Insights Credential for "test-inv" was not found.'
|
||||
assert response.status_code == 404
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"status_code, exception, error, message",
|
||||
[
|
||||
(
|
||||
502,
|
||||
requests.exceptions.SSLError,
|
||||
'SSLError while trying to connect to https://myexample.com/whocares/me/',
|
||||
None,
|
||||
),
|
||||
(
|
||||
504,
|
||||
requests.exceptions.Timeout,
|
||||
'Request to https://myexample.com/whocares/me/ timed out.',
|
||||
None,
|
||||
),
|
||||
(502, requests.exceptions.RequestException, 'booo!', 'Unknown exception booo! while trying to GET https://myexample.com/whocares/me/'),
|
||||
],
|
||||
)
|
||||
def test_insights_exception(self, get, hosts, insights_credential, user, mocker, status_code, exception, error, message):
|
||||
mocker.patch.object(requests.Session, 'get', side_effect=exception(error))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == message or error
|
||||
assert response.status_code == status_code
|
||||
|
||||
def test_insights_unauthorized(self, get, hosts, insights_credential, user, mocker):
|
||||
Response = namedtuple('Response', 'status_code content')
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response(401, 'mock 401 err msg'))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'] == ("Unauthorized access. Please check your Insights Credential username and password.")
|
||||
assert response.status_code == 502
|
||||
|
||||
def test_insights_bad_status(self, get, hosts, insights_credential, user, mocker):
|
||||
Response = namedtuple('Response', 'status_code content')
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response(500, 'mock 500 err msg'))
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'].startswith("Failed to access the Insights API at URL")
|
||||
assert "Server responded with 500 status code and message mock 500 err msg" in response.data['error']
|
||||
assert response.status_code == 502
|
||||
|
||||
def test_insights_bad_json(self, get, hosts, insights_credential, user, mocker):
|
||||
class Response:
|
||||
status_code = 200
|
||||
content = 'booo!'
|
||||
|
||||
def json(self):
|
||||
raise ValueError("we do not care what this is")
|
||||
|
||||
mocker.patch.object(requests.Session, 'get', return_value=Response())
|
||||
|
||||
host = hosts(host_count=1)[0]
|
||||
host.insights_system_id = '123e4567-e89b-12d3-a456-426655440000'
|
||||
host.inventory.insights_credential = insights_credential
|
||||
host.inventory.save()
|
||||
host.save()
|
||||
|
||||
url = reverse('api:host_insights', kwargs={'pk': host.pk})
|
||||
response = get(url, user('admin', True))
|
||||
|
||||
assert response.data['error'].startswith("Expected JSON response from Insights at URL")
|
||||
assert 'insights_id=123e4567-e89b-12d3-a456-426655440000' in response.data['error']
|
||||
assert response.data['error'].endswith("but instead got booo!")
|
||||
assert response.status_code == 502
|
||||
@@ -13,7 +13,7 @@ from awx.main.utils import camelcase_to_underscore
|
||||
|
||||
@pytest.fixture
|
||||
def tower_instance_group():
|
||||
ig = InstanceGroup(name='tower')
|
||||
ig = InstanceGroup(name='default')
|
||||
ig.save()
|
||||
return ig
|
||||
|
||||
@@ -105,7 +105,9 @@ def test_delete_instance_group_jobs_running(delete, instance_group_jobs_running,
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_delete_rename_tower_instance_group_prevented(delete, options, tower_instance_group, instance_group, user, patch, execution_environment):
|
||||
def test_delete_rename_tower_instance_group_prevented(
|
||||
delete, options, tower_instance_group, instance_group, user, patch, control_plane_execution_environment, default_job_execution_environment
|
||||
):
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': tower_instance_group.pk})
|
||||
super_user = user('bob', True)
|
||||
|
||||
@@ -117,8 +119,8 @@ def test_delete_rename_tower_instance_group_prevented(delete, options, tower_ins
|
||||
assert 'GET' in resp.data['actions']
|
||||
assert 'PUT' in resp.data['actions']
|
||||
|
||||
# Rename 'tower' instance group denied
|
||||
patch(url, {'name': 'tower_prime'}, super_user, expect=400)
|
||||
# Rename 'default' instance group denied
|
||||
patch(url, {'name': 'default_prime'}, super_user, expect=400)
|
||||
|
||||
# Rename, other instance group OK
|
||||
url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})
|
||||
|
||||
@@ -592,23 +592,3 @@ class TestControlledBySCM:
|
||||
rando,
|
||||
expect=403,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInsightsCredential:
|
||||
def test_insights_credential(self, patch, insights_inventory, admin_user, insights_credential):
|
||||
patch(insights_inventory.get_absolute_url(), {'insights_credential': insights_credential.id}, admin_user, expect=200)
|
||||
|
||||
def test_insights_credential_protection(self, post, patch, insights_inventory, alice, insights_credential):
|
||||
insights_inventory.organization.admin_role.members.add(alice)
|
||||
insights_inventory.admin_role.members.add(alice)
|
||||
post(
|
||||
reverse('api:inventory_list'),
|
||||
{"name": "test", "organization": insights_inventory.organization.id, "insights_credential": insights_credential.id},
|
||||
alice,
|
||||
expect=403,
|
||||
)
|
||||
patch(insights_inventory.get_absolute_url(), {'insights_credential': insights_credential.id}, alice, expect=403)
|
||||
|
||||
def test_non_insights_credential(self, patch, insights_inventory, admin_user, scm_credential):
|
||||
patch(insights_inventory.get_absolute_url(), {'insights_credential': scm_credential.id}, admin_user, expect=400)
|
||||
|
||||
@@ -4,6 +4,7 @@ from unittest.mock import patch
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from awx.main.models.inventory import Group, Host
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||
from awx.api.pagination import Pagination
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
@@ -61,3 +62,46 @@ def test_pagination_cap_page_size(get, admin, inventory):
|
||||
|
||||
assert jdata['previous'] == host_list_url({'page': '1', 'page_size': '5'})
|
||||
assert jdata['next'] == host_list_url({'page': '3', 'page_size': '5'})
|
||||
|
||||
|
||||
class TestUnifiedJobEventPagination:
|
||||
@pytest.fixture
|
||||
def ad_hoc_command(self, ad_hoc_command_factory):
|
||||
return ad_hoc_command_factory()
|
||||
|
||||
def _test_unified_job(self, get, admin, template, job_attribute, list_endpoint):
|
||||
if isinstance(template, AdHocCommand):
|
||||
job = template
|
||||
else:
|
||||
job = template.create_unified_job()
|
||||
kwargs = {job_attribute: job.pk}
|
||||
for i in range(20):
|
||||
job.event_class.create_from_data(**kwargs).save()
|
||||
|
||||
url = reverse(f'api:{list_endpoint}', kwargs={'pk': job.pk}) + '?limit=7'
|
||||
resp = get(url, user=admin, expect=200)
|
||||
|
||||
assert 'count' not in resp.data
|
||||
assert 'next' not in resp.data
|
||||
assert 'previous' not in resp.data
|
||||
assert len(resp.data['results']) == 7
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job(self, get, admin, job_template):
|
||||
self._test_unified_job(get, admin, job_template, 'job_id', 'job_job_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_update(self, get, admin, project):
|
||||
self._test_unified_job(get, admin, project, 'project_update_id', 'project_update_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_update(self, get, admin, inventory_source):
|
||||
self._test_unified_job(get, admin, inventory_source, 'inventory_update_id', 'inventory_update_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_system_job(self, get, admin, system_job_template):
|
||||
self._test_unified_job(get, admin, system_job_template, 'system_job_id', 'system_job_events_list')
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_adhoc_command(self, get, admin, ad_hoc_command):
|
||||
self._test_unified_job(get, admin, ad_hoc_command, 'ad_hoc_command_id', 'ad_hoc_command_ad_hoc_command_events_list')
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import base64
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -26,16 +27,22 @@ from awx.main.models import (
|
||||
)
|
||||
|
||||
|
||||
def _mk_project_update():
|
||||
def _mk_project_update(created=None):
|
||||
kwargs = {}
|
||||
if created:
|
||||
kwargs['created'] = created
|
||||
project = Project()
|
||||
project.save()
|
||||
return ProjectUpdate(project=project)
|
||||
return ProjectUpdate(project=project, **kwargs)
|
||||
|
||||
|
||||
def _mk_inventory_update():
|
||||
def _mk_inventory_update(created=None):
|
||||
kwargs = {}
|
||||
if created:
|
||||
kwargs['created'] = created
|
||||
source = InventorySource(source='ec2')
|
||||
source.save()
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2')
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2', **kwargs)
|
||||
return iu
|
||||
|
||||
|
||||
@@ -139,10 +146,11 @@ def test_stdout_line_range(sqlite_copy_expert, Parent, Child, relation, view, ge
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
|
||||
job = SystemJob()
|
||||
created = datetime.utcnow()
|
||||
job = SystemJob(created=created)
|
||||
job.save()
|
||||
for i in range(3):
|
||||
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i).save()
|
||||
SystemJobEvent(system_job=job, stdout='Testing {}\n'.format(i), start_line=i, job_created=created).save()
|
||||
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert smart_str(response.data['result_stdout']).splitlines() == ['Testing %d' % i for i in range(3)]
|
||||
@@ -150,11 +158,12 @@ def test_text_stdout_from_system_job_events(sqlite_copy_expert, get, admin):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_text_stdout_with_max_stdout(sqlite_copy_expert, get, admin):
|
||||
job = SystemJob()
|
||||
created = datetime.utcnow()
|
||||
job = SystemJob(created=created)
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
SystemJobEvent(system_job=job, stdout=large_stdout, start_line=0).save()
|
||||
SystemJobEvent(system_job=job, stdout=large_stdout, start_line=0, job_created=created).save()
|
||||
url = reverse('api:system_job_detail', kwargs={'pk': job.pk})
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['result_stdout'] == (
|
||||
@@ -176,11 +185,12 @@ def test_text_stdout_with_max_stdout(sqlite_copy_expert, get, admin):
|
||||
@pytest.mark.parametrize('fmt', ['txt', 'ansi'])
|
||||
@mock.patch('awx.main.redact.UriCleaner.SENSITIVE_URI_PATTERN', mock.Mock(**{'search.return_value': None})) # really slow for large strings
|
||||
def test_max_bytes_display(sqlite_copy_expert, Parent, Child, relation, view, fmt, get, admin):
|
||||
job = Parent()
|
||||
created = datetime.utcnow()
|
||||
job = Parent(created=created)
|
||||
job.save()
|
||||
total_bytes = settings.STDOUT_MAX_BYTES_DISPLAY + 1
|
||||
large_stdout = 'X' * total_bytes
|
||||
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0}).save()
|
||||
Child(**{relation: job, 'stdout': large_stdout, 'start_line': 0, 'job_created': created}).save()
|
||||
url = reverse(view, kwargs={'pk': job.pk})
|
||||
|
||||
response = get(url + '?format={}'.format(fmt), user=admin, expect=200)
|
||||
@@ -257,10 +267,11 @@ def test_text_with_unicode_stdout(sqlite_copy_expert, Parent, Child, relation, v
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_unicode_with_base64_ansi(sqlite_copy_expert, get, admin):
|
||||
job = Job()
|
||||
created = datetime.utcnow()
|
||||
job = Job(created=created)
|
||||
job.save()
|
||||
for i in range(3):
|
||||
JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i).save()
|
||||
JobEvent(job=job, stdout='オ{}\n'.format(i), start_line=i, job_created=created).save()
|
||||
url = reverse('api:job_stdout', kwargs={'pk': job.pk}) + '?format=json&content_encoding=base64'
|
||||
|
||||
response = get(url, user=admin, expect=200)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user