mirror of
https://github.com/ansible/awx.git
synced 2026-02-11 14:44:44 -03:30
Compare commits
788 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c09cad3e6d | ||
|
|
ce20c8e77b | ||
|
|
73bb475503 | ||
|
|
6df5c0331a | ||
|
|
dc7bd73431 | ||
|
|
017bb63023 | ||
|
|
5d4fc9613d | ||
|
|
f126a6343b | ||
|
|
40f5ff362c | ||
|
|
1ed170fff0 | ||
|
|
d5deedc822 | ||
|
|
9992bf03b0 | ||
|
|
04839a037a | ||
|
|
f541fe9904 | ||
|
|
162ea776fd | ||
|
|
f1273d5810 | ||
|
|
390e1f9a0a | ||
|
|
8dc788dbcb | ||
|
|
397908543d | ||
|
|
3be29d54ad | ||
|
|
03fb12d4c2 | ||
|
|
69388edaf9 | ||
|
|
1e750cfed9 | ||
|
|
1ba51c0357 | ||
|
|
2fa27000ab | ||
|
|
057bd6e625 | ||
|
|
d0b7d970c4 | ||
|
|
5ffffebe34 | ||
|
|
b98544264b | ||
|
|
0d7ef709bf | ||
|
|
1211faf8df | ||
|
|
dc327ceaeb | ||
|
|
0f25720634 | ||
|
|
28a62ea774 | ||
|
|
d3c5397721 | ||
|
|
f06490a5f8 | ||
|
|
9fddf7c5cf | ||
|
|
673f722b71 | ||
|
|
4de477686e | ||
|
|
4cc734ce6e | ||
|
|
f08bf4766d | ||
|
|
77b0b9a4e3 | ||
|
|
487d78cc72 | ||
|
|
f0a6567cd8 | ||
|
|
8246d4a298 | ||
|
|
cd83030668 | ||
|
|
88d492371b | ||
|
|
9316ace3f6 | ||
|
|
55b5060944 | ||
|
|
0be68fe84f | ||
|
|
5da02690c1 | ||
|
|
c417c5e219 | ||
|
|
02cccbe608 | ||
|
|
2afa406b7f | ||
|
|
1831b2591a | ||
|
|
3cdd35f2cf | ||
|
|
1422bb2043 | ||
|
|
695787be4e | ||
|
|
e1b8f30d8f | ||
|
|
293924168f | ||
|
|
7c72be7025 | ||
|
|
4a85983eb7 | ||
|
|
82c510e51e | ||
|
|
0508a9267c | ||
|
|
11e416995a | ||
|
|
bb3fc3caa8 | ||
|
|
ab40006535 | ||
|
|
53fe08af61 | ||
|
|
a8083296e6 | ||
|
|
95e796a88a | ||
|
|
0c0028541d | ||
|
|
32cc7e976a | ||
|
|
3495f75fa0 | ||
|
|
493e6cc527 | ||
|
|
7510b243bb | ||
|
|
e06ebb1f11 | ||
|
|
cc616206b3 | ||
|
|
06b04007a0 | ||
|
|
6db4732bf3 | ||
|
|
ead7907173 | ||
|
|
68f0ae612e | ||
|
|
8c1bc97c2f | ||
|
|
b64c2d6861 | ||
|
|
645f7f6dac | ||
|
|
0a5e9da287 | ||
|
|
a60abe38a8 | ||
|
|
51abbb9464 | ||
|
|
560b4ebf71 | ||
|
|
5a7a1b8f20 | ||
|
|
f1ca272394 | ||
|
|
8697387d13 | ||
|
|
1e68519c99 | ||
|
|
0947d30682 | ||
|
|
c993c8b3b9 | ||
|
|
2ce09d0dcc | ||
|
|
25bca532c2 | ||
|
|
fb897891c9 | ||
|
|
421d8f215c | ||
|
|
3db92ca668 | ||
|
|
c6fa85036e | ||
|
|
0b4a296181 | ||
|
|
1665acd58a | ||
|
|
75a27c38c2 | ||
|
|
7b9bcd0481 | ||
|
|
15444bef70 | ||
|
|
f4bc69d5f7 | ||
|
|
29d57ea403 | ||
|
|
ea5bd45d03 | ||
|
|
165a529ae0 | ||
|
|
1c31a56395 | ||
|
|
0c120782d4 | ||
|
|
6ba053d4ee | ||
|
|
3ecd26b5d8 | ||
|
|
bdf753ce23 | ||
|
|
0f54516b38 | ||
|
|
a56e32b59b | ||
|
|
026d5e6bdb | ||
|
|
03e73156ea | ||
|
|
0b6208047c | ||
|
|
f14129de9b | ||
|
|
6c1ba03235 | ||
|
|
85bb4e976f | ||
|
|
d1d3711fee | ||
|
|
d671366cad | ||
|
|
f9043864ce | ||
|
|
a1ded8db3f | ||
|
|
cf6b6d831f | ||
|
|
736cd4df36 | ||
|
|
8d99f79de4 | ||
|
|
1c70773cc2 | ||
|
|
c76a7d638f | ||
|
|
7a455d08d7 | ||
|
|
2c5bcf268d | ||
|
|
32cee852f0 | ||
|
|
8c1cd9ee71 | ||
|
|
da951714d1 | ||
|
|
2309feb6bd | ||
|
|
4d2c64ebb4 | ||
|
|
04f6fe6cd2 | ||
|
|
372baa12a5 | ||
|
|
21e6e5701e | ||
|
|
276a18b339 | ||
|
|
666acb9756 | ||
|
|
0eff3e6bac | ||
|
|
9af16c18c9 | ||
|
|
f0bcfc6024 | ||
|
|
d946103961 | ||
|
|
52d6f36b7c | ||
|
|
b10eb6f4c2 | ||
|
|
21aa1fc11f | ||
|
|
d4a3143b0e | ||
|
|
4478052b71 | ||
|
|
b92c5076a2 | ||
|
|
a3383716ab | ||
|
|
61846e88ca | ||
|
|
93e90228a2 | ||
|
|
395af1b5e4 | ||
|
|
5d7bdb3cbc | ||
|
|
95d40f037d | ||
|
|
ef67f9c65d | ||
|
|
8a4421dc0c | ||
|
|
2900bbbbd8 | ||
|
|
a6c09daf1e | ||
|
|
92c6ddf13c | ||
|
|
c6ac2f56dc | ||
|
|
7a130a0616 | ||
|
|
188f0417d2 | ||
|
|
d38401fd18 | ||
|
|
dadf8940cc | ||
|
|
6a36f802c2 | ||
|
|
5509686f5b | ||
|
|
1ee241199e | ||
|
|
67d828cf80 | ||
|
|
86c91509b3 | ||
|
|
bfd1abd79c | ||
|
|
c169cf6d58 | ||
|
|
9679c154f3 | ||
|
|
a317b6bede | ||
|
|
f73de11acc | ||
|
|
6f16d64929 | ||
|
|
8a6656aa90 | ||
|
|
95ba6de172 | ||
|
|
63aacd4e38 | ||
|
|
b39b80b036 | ||
|
|
214c27a5cf | ||
|
|
ae83032ff3 | ||
|
|
2e0dd61bb6 | ||
|
|
25aae9abc6 | ||
|
|
8b20d770a2 | ||
|
|
6726b72fe2 | ||
|
|
e610b77f8d | ||
|
|
2490929fd5 | ||
|
|
29ec0dc82a | ||
|
|
301d7a02c2 | ||
|
|
81e6ead99c | ||
|
|
158fe23d7c | ||
|
|
b3705357ba | ||
|
|
f460f70513 | ||
|
|
b6ced2b8dc | ||
|
|
be18803250 | ||
|
|
f26d975005 | ||
|
|
a7a17a2063 | ||
|
|
9e657059f3 | ||
|
|
486bcd80f8 | ||
|
|
e4f21ec294 | ||
|
|
3cb3819be9 | ||
|
|
122b36dcc5 | ||
|
|
2d1a859719 | ||
|
|
f882ac420d | ||
|
|
373cd9c20b | ||
|
|
673afdf1b5 | ||
|
|
7a16782ebf | ||
|
|
8ede74a7f6 | ||
|
|
19da9955ce | ||
|
|
e6e1f97048 | ||
|
|
0a63c2d4a0 | ||
|
|
493109782d | ||
|
|
0195bab931 | ||
|
|
e570810bdb | ||
|
|
9f1e8a1ae2 | ||
|
|
9a0c159943 | ||
|
|
9aa56b1247 | ||
|
|
d4d21a1511 | ||
|
|
27a2c842ac | ||
|
|
87dcc49429 | ||
|
|
805ca53765 | ||
|
|
070034047c | ||
|
|
53c50947d1 | ||
|
|
108a6e11f4 | ||
|
|
a4bc306b96 | ||
|
|
9106c3f813 | ||
|
|
3340ef9c91 | ||
|
|
fc171deb79 | ||
|
|
f2dac36dd1 | ||
|
|
fb1a5c0db5 | ||
|
|
d6a06c40f1 | ||
|
|
a6383e7f79 | ||
|
|
3063073395 | ||
|
|
9eda6359f0 | ||
|
|
1deaf55ba4 | ||
|
|
44c50bbbf7 | ||
|
|
c9e7747f2d | ||
|
|
199b4b6b47 | ||
|
|
f06485feca | ||
|
|
4bd910493a | ||
|
|
cd100fd770 | ||
|
|
157adb828e | ||
|
|
b26e33ca34 | ||
|
|
27a1254883 | ||
|
|
535bbfcc39 | ||
|
|
d2d511f596 | ||
|
|
25ca8d22d6 | ||
|
|
378a0711c2 | ||
|
|
8fd9225629 | ||
|
|
ee8c1638c5 | ||
|
|
4add72b9d2 | ||
|
|
54dd24b96b | ||
|
|
7d06fc74dd | ||
|
|
1a2e56c785 | ||
|
|
a7b29f6112 | ||
|
|
39b26c8f0e | ||
|
|
1ade9b3a7d | ||
|
|
82c5803e59 | ||
|
|
9f4172ce7b | ||
|
|
ef56571772 | ||
|
|
6911a59f39 | ||
|
|
7a63785255 | ||
|
|
a695274cb6 | ||
|
|
44fed1d7c1 | ||
|
|
be48d3eefd | ||
|
|
3cc6a4cf44 | ||
|
|
ddf4fbc4ce | ||
|
|
f0e7f2dbcd | ||
|
|
579d49033a | ||
|
|
210d5084f0 | ||
|
|
53e8a9e709 | ||
|
|
15effd7ade | ||
|
|
b919befc90 | ||
|
|
faded278e3 | ||
|
|
768ac01f58 | ||
|
|
4052603238 | ||
|
|
b306c6f258 | ||
|
|
4b6b8f2bdd | ||
|
|
70420dc3e4 | ||
|
|
50ca2d47ce | ||
|
|
faa0a6cf9a | ||
|
|
01228cea02 | ||
|
|
cbb461ab71 | ||
|
|
b551608f16 | ||
|
|
7a25f22078 | ||
|
|
b43d8e2c7f | ||
|
|
2ad84b60b3 | ||
|
|
d1981fcb4a | ||
|
|
df8ce801cf | ||
|
|
50e6348bef | ||
|
|
5ab449c90f | ||
|
|
66a9ffc376 | ||
|
|
16c6e2d716 | ||
|
|
ac5b53b13c | ||
|
|
6d433cc42a | ||
|
|
7c442f3f50 | ||
|
|
d79f73ab7a | ||
|
|
b26eaa3bd2 | ||
|
|
de46fb409e | ||
|
|
8078daa733 | ||
|
|
e4931bde6c | ||
|
|
e2b0a4f7a7 | ||
|
|
5da6b02801 | ||
|
|
326184da0f | ||
|
|
dad5533816 | ||
|
|
ca07946c24 | ||
|
|
2ce276455a | ||
|
|
4db6b8c1fe | ||
|
|
03209fe2f2 | ||
|
|
7832639c25 | ||
|
|
121db42699 | ||
|
|
ddb6c775b1 | ||
|
|
7467779ea9 | ||
|
|
66f140bb70 | ||
|
|
151f9e79ed | ||
|
|
f83343592b | ||
|
|
12504c9bc3 | ||
|
|
023cc68ba2 | ||
|
|
ec8ac6f1a7 | ||
|
|
82c4f6bb88 | ||
|
|
5beb68f527 | ||
|
|
2eb1e4bbe3 | ||
|
|
d3b20e6585 | ||
|
|
310cc2fd03 | ||
|
|
1fd6ba0bfc | ||
|
|
b64f966db1 | ||
|
|
891eeb22a5 | ||
|
|
0f6e221c14 | ||
|
|
239f20ede5 | ||
|
|
ffbbcd2bf6 | ||
|
|
b648957c8e | ||
|
|
31fe500921 | ||
|
|
2131703ca0 | ||
|
|
c429563126 | ||
|
|
1a1d66d2a2 | ||
|
|
30871bd6cf | ||
|
|
321135da3d | ||
|
|
2a23b4c719 | ||
|
|
f7d2f7a5e6 | ||
|
|
e371de38ed | ||
|
|
84af610a1f | ||
|
|
ef9f9129ba | ||
|
|
7b188aafea | ||
|
|
6ce227a6b6 | ||
|
|
1c97b9a046 | ||
|
|
137111351c | ||
|
|
c5a1e4c704 | ||
|
|
4f058245e4 | ||
|
|
ecdf6cccf8 | ||
|
|
4d7edbbad0 | ||
|
|
0f9f3f58e2 | ||
|
|
34c4967d27 | ||
|
|
6123b8e148 | ||
|
|
b86d365dde | ||
|
|
4efbd45b3c | ||
|
|
fb97687d14 | ||
|
|
0f53d9b911 | ||
|
|
5a785798b0 | ||
|
|
14168297bd | ||
|
|
7e1814e234 | ||
|
|
bdf11aa962 | ||
|
|
46807205f8 | ||
|
|
d749c172eb | ||
|
|
81db8091ea | ||
|
|
5c1a33382c | ||
|
|
db6f565dca | ||
|
|
6b4effc85a | ||
|
|
74a0c5bac5 | ||
|
|
661cf0afb3 | ||
|
|
fbb74a9896 | ||
|
|
200901e53b | ||
|
|
0eddd5ce7f | ||
|
|
a7cabec3d0 | ||
|
|
b98b3ced1c | ||
|
|
8501a45531 | ||
|
|
14b610dabf | ||
|
|
a1d1e70e43 | ||
|
|
0fa0a517ac | ||
|
|
28f9c0be0b | ||
|
|
373edbf8c0 | ||
|
|
b19bcdd882 | ||
|
|
08b96a0bd7 | ||
|
|
1e45e2ab9b | ||
|
|
2a58605727 | ||
|
|
c7ab3ea86e | ||
|
|
67046513ae | ||
|
|
f9b439ae82 | ||
|
|
80b08d17e3 | ||
|
|
f642c520bd | ||
|
|
221ddeb915 | ||
|
|
d90d0fb503 | ||
|
|
2c529f50af | ||
|
|
acfa1c4d1d | ||
|
|
ea2afeec1f | ||
|
|
a5cfc3036f | ||
|
|
ec484f81cf | ||
|
|
2ffa22e38f | ||
|
|
8fb313638c | ||
|
|
0eb1984b22 | ||
|
|
f259b0a71b | ||
|
|
82df3ebddb | ||
|
|
c87d7b0d79 | ||
|
|
612e91263c | ||
|
|
445042c0f4 | ||
|
|
0c289205de | ||
|
|
ba45592d93 | ||
|
|
7e0f2b0f08 | ||
|
|
fb30528197 | ||
|
|
48f1910075 | ||
|
|
0cb2d79889 | ||
|
|
1af1a5e9da | ||
|
|
c0d38e91f5 | ||
|
|
2f737f644f | ||
|
|
0574baf7f7 | ||
|
|
f70473dc0b | ||
|
|
de0b25862b | ||
|
|
6ff15a928a | ||
|
|
d10d1963c1 | ||
|
|
c6acca08d5 | ||
|
|
9946959599 | ||
|
|
9ce171d349 | ||
|
|
12f2975809 | ||
|
|
945125454b | ||
|
|
ea67c70437 | ||
|
|
f2f2483708 | ||
|
|
8c61a49e01 | ||
|
|
88ff68295b | ||
|
|
d93f62c030 | ||
|
|
ae5b11a2a9 | ||
|
|
baade775ab | ||
|
|
bd2da80cea | ||
|
|
550ab82f63 | ||
|
|
cece7ff741 | ||
|
|
c0b812c47a | ||
|
|
b256e5b79d | ||
|
|
2ed3038a5c | ||
|
|
779ca8b260 | ||
|
|
137fedfc9b | ||
|
|
256a47618f | ||
|
|
dfaa69be51 | ||
|
|
c34fa30ea7 | ||
|
|
3e5ee9d57a | ||
|
|
c7dd3996df | ||
|
|
e342919735 | ||
|
|
d0991bab9e | ||
|
|
efcbea1fc5 | ||
|
|
1537b84ec8 | ||
|
|
30b7535ca2 | ||
|
|
fe02c0b157 | ||
|
|
177901eca6 | ||
|
|
c2c93e7a66 | ||
|
|
00e60d2698 | ||
|
|
ec1408fbd1 | ||
|
|
23c3e62211 | ||
|
|
383c2bba58 | ||
|
|
8adb53b5a8 | ||
|
|
92401e5328 | ||
|
|
d360fb212e | ||
|
|
4c1b0297e7 | ||
|
|
a0b14b994d | ||
|
|
168c022d3e | ||
|
|
5adfacba64 | ||
|
|
52eeace20f | ||
|
|
21ff1d714d | ||
|
|
eeca5512be | ||
|
|
143a4a61b3 | ||
|
|
fef24355ab | ||
|
|
d70dfec6b6 | ||
|
|
49eccfb19f | ||
|
|
223c5bdaf6 | ||
|
|
e740cfcb52 | ||
|
|
65b03174ea | ||
|
|
134d84ded9 | ||
|
|
5707d65d0f | ||
|
|
9956538224 | ||
|
|
7818b2008f | ||
|
|
043aff6a8c | ||
|
|
7725c6f18f | ||
|
|
16a3f7c2df | ||
|
|
896d895934 | ||
|
|
067e6a5163 | ||
|
|
92f567539f | ||
|
|
951f6d4636 | ||
|
|
811fa514d2 | ||
|
|
a097602d7f | ||
|
|
a286324ab7 | ||
|
|
64bd49da16 | ||
|
|
5622bf1a0a | ||
|
|
862f40a643 | ||
|
|
41e3a69001 | ||
|
|
cabc60792b | ||
|
|
eb305edafd | ||
|
|
e6cfd726c6 | ||
|
|
ec78f5a9ec | ||
|
|
d711a9aed3 | ||
|
|
4836a5b1a9 | ||
|
|
57b093aa2a | ||
|
|
d3cc439fa8 | ||
|
|
fabc67eeb3 | ||
|
|
c29a7ccf8b | ||
|
|
af162b6897 | ||
|
|
dd269804fd | ||
|
|
ad5cc3a393 | ||
|
|
50de068a02 | ||
|
|
5d838b8980 | ||
|
|
4ec7ba0107 | ||
|
|
fa7a459e50 | ||
|
|
4b4e57889e | ||
|
|
f4ba87ea4f | ||
|
|
dc64da6f72 | ||
|
|
4e129d3d04 | ||
|
|
7542ebda48 | ||
|
|
ff559a81db | ||
|
|
909cdc2425 | ||
|
|
8b183b5f5d | ||
|
|
da13196e59 | ||
|
|
d333b0080f | ||
|
|
75de8a30f6 | ||
|
|
3e2affb08e | ||
|
|
42dbd4ea73 | ||
|
|
36f47f3696 | ||
|
|
c4eceb0915 | ||
|
|
a8b2b5892c | ||
|
|
f0129c857b | ||
|
|
66789fa713 | ||
|
|
82a585dbe2 | ||
|
|
2b4732f07b | ||
|
|
02cd646b44 | ||
|
|
c9ac18db24 | ||
|
|
5e369dc9e2 | ||
|
|
e0e66e3818 | ||
|
|
0a276a6276 | ||
|
|
536c02dc55 | ||
|
|
d607dfd5d8 | ||
|
|
d2d62adcb9 | ||
|
|
cea6d8c3cb | ||
|
|
8316a1d198 | ||
|
|
a0840ddec2 | ||
|
|
e28bed5a6c | ||
|
|
25bb3fbd59 | ||
|
|
f2cd630a90 | ||
|
|
63d702d1aa | ||
|
|
b74990c480 | ||
|
|
d6c5a23e05 | ||
|
|
dd1bda3b67 | ||
|
|
82c3348b15 | ||
|
|
058189dfcf | ||
|
|
e2140ad7a8 | ||
|
|
fedc40d578 | ||
|
|
aeed2e0128 | ||
|
|
0c917caa2a | ||
|
|
4fe4856d02 | ||
|
|
cb1df4a334 | ||
|
|
c456b944a5 | ||
|
|
6d052fdab4 | ||
|
|
7801590bef | ||
|
|
d834afd541 | ||
|
|
34ac60b35c | ||
|
|
51538b7688 | ||
|
|
b771929b6e | ||
|
|
a25eec5cfa | ||
|
|
3b5641c41b | ||
|
|
8ac3cc1542 | ||
|
|
908263df50 | ||
|
|
68f214c2be | ||
|
|
2aa3fe756e | ||
|
|
20dd436ee9 | ||
|
|
34f4dc521b | ||
|
|
7e307a69ce | ||
|
|
7f6e022852 | ||
|
|
282914e809 | ||
|
|
d324c12348 | ||
|
|
fd5e22a3f6 | ||
|
|
7eaf1db3e0 | ||
|
|
1b1608cff9 | ||
|
|
cb7b2289b7 | ||
|
|
c1ea489043 | ||
|
|
c887f84a05 | ||
|
|
fb7c827bff | ||
|
|
5f03768f5d | ||
|
|
df3bd2e082 | ||
|
|
3d80eb30b3 | ||
|
|
5ad48bda6c | ||
|
|
fd60105db3 | ||
|
|
25903431bc | ||
|
|
f8374def64 | ||
|
|
82807a1b20 | ||
|
|
61ae80819f | ||
|
|
62a9c74040 | ||
|
|
0e74f51aa3 | ||
|
|
9bbaa6993f | ||
|
|
966eead93f | ||
|
|
12cea1191e | ||
|
|
5dac46cd7d | ||
|
|
2c2aaa7fea | ||
|
|
997525076d | ||
|
|
3b280e1b32 | ||
|
|
a8b6db3967 | ||
|
|
bcf93810ad | ||
|
|
0cb33bd278 | ||
|
|
8c2ca29f4e | ||
|
|
f1767d05b0 | ||
|
|
b1cd7eb997 | ||
|
|
0f8533f123 | ||
|
|
2191ab1c6c | ||
|
|
9975d4764d | ||
|
|
31907423ce | ||
|
|
94d46a6e96 | ||
|
|
6f4c41a8d3 | ||
|
|
81de931711 | ||
|
|
9e7f004ca6 | ||
|
|
5c664eadf9 | ||
|
|
26b7e9de40 | ||
|
|
09801d0a9a | ||
|
|
a332f46f31 | ||
|
|
6f6d9e2f15 | ||
|
|
cd7c85490b | ||
|
|
a9ec7038de | ||
|
|
795e3c84fc | ||
|
|
f2b2e64426 | ||
|
|
9b7bc5109a | ||
|
|
d07887bd91 | ||
|
|
bb47bdbc43 | ||
|
|
d477f04d75 | ||
|
|
e5f059806b | ||
|
|
6649b435ce | ||
|
|
ae320ab228 | ||
|
|
29c961e52a | ||
|
|
fd466c5cff | ||
|
|
623cf0b4cd | ||
|
|
d33a748eea | ||
|
|
1f1cdf8859 | ||
|
|
334be9eb25 | ||
|
|
8f9373085a | ||
|
|
11c5d577d6 | ||
|
|
0e17023ba3 | ||
|
|
3c785fbff3 | ||
|
|
0061c57577 | ||
|
|
f59da78328 | ||
|
|
117bb07f0d | ||
|
|
01fdc482be | ||
|
|
875abcd31a | ||
|
|
3468153619 | ||
|
|
27b1d15a7a | ||
|
|
39ce0ade6d | ||
|
|
dfb0710d69 | ||
|
|
f6c9621510 | ||
|
|
7f90a8b2b3 | ||
|
|
0d17dfcd87 | ||
|
|
91134b2537 | ||
|
|
3a56d2447c | ||
|
|
550a66553e | ||
|
|
7fbe01352f | ||
|
|
3520a6e066 | ||
|
|
19d7f3e346 | ||
|
|
71f9476a51 | ||
|
|
ffcf76ddd0 | ||
|
|
d36babf506 | ||
|
|
a73cb0280c | ||
|
|
1e5a0dc7c5 | ||
|
|
fd5f3a82d2 | ||
|
|
e970620672 | ||
|
|
1befacaf39 | ||
|
|
e9bf25f108 | ||
|
|
6c06b0432b | ||
|
|
a7c50b77ea | ||
|
|
164255e516 | ||
|
|
74282c5dfb | ||
|
|
afa3501194 | ||
|
|
0f5629b514 | ||
|
|
0db58f0edd | ||
|
|
7f022b1555 | ||
|
|
9f80f918c5 | ||
|
|
34fe255336 | ||
|
|
03265c05ca | ||
|
|
6c7e1fc4eb | ||
|
|
8683872927 | ||
|
|
7b47d7e7f6 | ||
|
|
19d000e97f | ||
|
|
8f54627ea6 | ||
|
|
6bb8fd3fd6 | ||
|
|
6e97020eae | ||
|
|
30997b30b6 | ||
|
|
499321cdf7 | ||
|
|
a581e26414 | ||
|
|
26b1a82164 | ||
|
|
f5cc927a15 | ||
|
|
7ec0464072 | ||
|
|
b94a9c19e7 | ||
|
|
1c73407edf | ||
|
|
a4d15b20ff | ||
|
|
83b6a91623 | ||
|
|
5fb9afc9f5 | ||
|
|
82af78fe33 | ||
|
|
d60014987f | ||
|
|
55b8dcdd8d | ||
|
|
03261c4782 | ||
|
|
a6778604e1 | ||
|
|
c441d6cd55 | ||
|
|
eebcab610e | ||
|
|
783a0963ff | ||
|
|
dbc235cfb6 | ||
|
|
9dde854baa | ||
|
|
98375a0328 | ||
|
|
264b13f33c | ||
|
|
35a9e7e565 | ||
|
|
e088c7385a | ||
|
|
372c80ee44 | ||
|
|
5a1810e191 | ||
|
|
dee0b61bc2 | ||
|
|
2c82d32720 | ||
|
|
715aead961 | ||
|
|
ddb1d12a79 | ||
|
|
8fe437380d | ||
|
|
2d81143c98 | ||
|
|
a219e27e0b | ||
|
|
d428a540a0 | ||
|
|
e1b6e1509c | ||
|
|
1d6579e110 | ||
|
|
c50bd8d6e6 | ||
|
|
13e1fc9839 | ||
|
|
a205ddeed6 | ||
|
|
d7742d7340 | ||
|
|
ce675319b7 | ||
|
|
610138caeb | ||
|
|
25137b40d3 | ||
|
|
9b92d1584d | ||
|
|
11d024abdb | ||
|
|
57433a59d7 | ||
|
|
5ac4a9aca6 | ||
|
|
5b92c9e8f3 | ||
|
|
7951c2f014 | ||
|
|
2cfdf08500 | ||
|
|
10474cd64c | ||
|
|
da9a075000 | ||
|
|
510a546d8c | ||
|
|
562f78e53d | ||
|
|
e08590290c | ||
|
|
e2c8519b77 | ||
|
|
86b683a8f1 | ||
|
|
1ad561c307 | ||
|
|
7b60733da0 | ||
|
|
d7ce328046 | ||
|
|
25a4a112b3 | ||
|
|
cbe2a78287 | ||
|
|
83ceacf588 | ||
|
|
fe0ad30245 | ||
|
|
0ac6ba9c99 | ||
|
|
f8ecdbf287 | ||
|
|
a6f3817488 | ||
|
|
e4eb03259b | ||
|
|
0ccc93a166 | ||
|
|
2672c2ffe3 | ||
|
|
204083fcdc | ||
|
|
b21db2fd31 | ||
|
|
d1cb0781ce | ||
|
|
7c86edd825 | ||
|
|
3addbeab4c | ||
|
|
5096f1459d | ||
|
|
a3de251732 | ||
|
|
686d750ad9 | ||
|
|
39f26fe576 | ||
|
|
7a9bcc1e1e | ||
|
|
72a940bef1 | ||
|
|
cb26087c2a | ||
|
|
7d6a8adb79 | ||
|
|
c1e0def013 | ||
|
|
5993db76d3 | ||
|
|
c0afd67432 | ||
|
|
5b71681494 | ||
|
|
f63312c811 | ||
|
|
0886414c72 | ||
|
|
30d78e8857 | ||
|
|
07d01c49c0 | ||
|
|
bb896c0b02 | ||
|
|
95634bf0b8 | ||
|
|
e6735b595c | ||
|
|
03d8987d93 | ||
|
|
666e9c5c2f | ||
|
|
d57f2ab496 | ||
|
|
b3d1c7ff9f | ||
|
|
caec347371 | ||
|
|
26154d22d3 |
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -5,7 +5,7 @@ about: Create a report to help us improve
|
|||||||
---
|
---
|
||||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||||
|
|
||||||
- http://webchat.freenode.net/?channels=ansible-awx
|
- http://web.libera.chat/?channels=#ansible-awx
|
||||||
- https://groups.google.com/forum/#!forum/awx-project
|
- https://groups.google.com/forum/#!forum/awx-project
|
||||||
|
|
||||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -5,7 +5,7 @@ about: Suggest an idea for this project
|
|||||||
---
|
---
|
||||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||||
|
|
||||||
- http://webchat.freenode.net/?channels=ansible-awx
|
- http://web.libera.chat/?channels=#ansible-awx
|
||||||
- https://groups.google.com/forum/#!forum/awx-project
|
- https://groups.google.com/forum/#!forum/awx-project
|
||||||
|
|
||||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||||
|
|||||||
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/requirements"
|
|
||||||
schedule:
|
|
||||||
interval: "monthly"
|
|
||||||
38
CHANGELOG.md
38
CHANGELOG.md
@@ -1,6 +1,42 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
# 19.2.2 (June 28, 2021)
|
||||||
|
|
||||||
|
- Fixed bug where symlinks pointing to directories were not preserved (https://github.com/ansible/ansible-runner/pull/736)
|
||||||
|
- Various bugfixes found during testing (https://github.com/ansible/awx/pull/10532)
|
||||||
|
|
||||||
|
# 19.2.1 (June 17, 2021)
|
||||||
|
|
||||||
|
- There are now 2 default Instance Groups: 'controlplane' and 'default' (https://github.com/ansible/awx/pull/10324)
|
||||||
|
- Removed deprecated modules: `tower_send`, `tower_receive`, `tower_workflow_template` (https://github.com/ansible/awx/pull/9980)
|
||||||
|
- Improved UI performance when a large amount of events are being emitted by jobs (https://github.com/ansible/awx/pull/10053)
|
||||||
|
- Settings UI Revert All button now issues a DELETE instead of PATCHing all fields (https://github.com/ansible/awx/pull/10376)
|
||||||
|
- Fixed a bug with the schedule date/time picker in Firefox (https://github.com/ansible/awx/pull/10291)
|
||||||
|
- UI now preselects the system default Galaxy credential when creating a new organization (https://github.com/ansible/awx/pull/10395)
|
||||||
|
- Added favicon (https://github.com/ansible/awx/pull/10388)
|
||||||
|
- Removed `not` option from smart inventory host filter search as it's not supported by the API (https://github.com/ansible/awx/pull/10380)
|
||||||
|
- Added button to allow user to refetch project revision after project sync has finished (https://github.com/ansible/awx/pull/10334)
|
||||||
|
- Fixed bug where extraneous CONFIG requests were made on logout (https://github.com/ansible/awx/pull/10379)
|
||||||
|
- Fixed bug where users were unable to cancel inventory syncs (https://github.com/ansible/awx/pull/10346)
|
||||||
|
- Added missing dashboard graph filters (https://github.com/ansible/awx/pull/10349)
|
||||||
|
- Added support for typing in to single select lookup form fields (https://github.com/ansible/awx/pull/10257)
|
||||||
|
- Fixed various bugs related to user sessions (https://github.com/ansible/awx/pull/9908)
|
||||||
|
- Fixed bug where sorting in modals would close the modal (https://github.com/ansible/awx/pull/10215)
|
||||||
|
- Added support for Red Hat Insights as an inventory source (https://github.com/ansible/awx/pull/8650)
|
||||||
|
- Fixed bugs when selecting items in a list then sorting/paginating (https://github.com/ansible/awx/pull/10329)
|
||||||
|
|
||||||
|
# 19.2.0 (June 1, 2021)
|
||||||
|
- Fixed race condition that would sometimes cause jobs to error out at the very end of an otherwise successful run (https://github.com/ansible/receptor/pull/328)
|
||||||
|
- Fixes bug where users were unable to click on text next to checkboxes in modals (https://github.com/ansible/awx/pull/10279)
|
||||||
|
- Have the project update playbook warn if role/collection syncing is disabled. (https://github.com/ansible/awx/pull/10068)
|
||||||
|
- Move irc references to point to irc.libera.chat (https://github.com/ansible/awx/pull/10295)
|
||||||
|
- Fixes bug where activity stream changes were displaying as [object object] (https://github.com/ansible/awx/pull/10267)
|
||||||
|
- Update awxkit to enable export of Galaxy credentials associated to organizations (https://github.com/ansible/awx/pull/10271)
|
||||||
|
- Bump receptor and receptorctl versions to 1.0.0a2 (https://github.com/ansible/awx/pull/10261)
|
||||||
|
- Add the ability to disable local authentication (https://github.com/ansible/awx/pull/10102)
|
||||||
|
- Show error if no Execution Environment is found on project sync/job run (https://github.com/ansible/awx/pull/10183)
|
||||||
|
- Allow for editing and deleting managed_by_tower EEs from API/UI (https://github.com/ansible/awx/pull/10173)
|
||||||
|
|
||||||
|
|
||||||
# 19.1.0 (May 1, 2021)
|
# 19.1.0 (May 1, 2021)
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Hi there! We're excited to have you as a contributor.
|
Hi there! We're excited to have you as a contributor.
|
||||||
|
|
||||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on webchat.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.libera.chat, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ Have questions about this document or anything not covered here? Come chat with
|
|||||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
|
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
|
||||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on webchat.freenode.net, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.libera.chat, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||||
|
|
||||||
## Setting up your development environment
|
## Setting up your development environment
|
||||||
@@ -114,7 +114,7 @@ Fixing bugs, adding translations, and updating the documentation are always appr
|
|||||||
|
|
||||||
**NOTE**
|
**NOTE**
|
||||||
|
|
||||||
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on webchat.freenode.net, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||||
|
|
||||||
**NOTE**
|
**NOTE**
|
||||||
|
|
||||||
@@ -136,7 +136,7 @@ Here are a few things you can do to help the visibility of your change, and incr
|
|||||||
* Make the smallest change possible
|
* Make the smallest change possible
|
||||||
* Write good commit messages. See [How to write a Git commit message](https://chris.beams.io/posts/git-commit/).
|
* Write good commit messages. See [How to write a Git commit message](https://chris.beams.io/posts/git-commit/).
|
||||||
|
|
||||||
It's generally a good idea to discuss features with us first by engaging us in the `#ansible-awx` channel on webchat.freenode.net, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
It's generally a good idea to discuss features with us first by engaging us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||||
|
|
||||||
We like to keep our commit history clean, and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase`, rather than
|
We like to keep our commit history clean, and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase`, rather than
|
||||||
`git pull`, and `git rebase`, rather than `git merge`.
|
`git pull`, and `git rebase`, rather than `git merge`.
|
||||||
|
|||||||
108
INSTALL.md
108
INSTALL.md
@@ -3,12 +3,6 @@ Table of Contents
|
|||||||
|
|
||||||
* [Installing AWX](#installing-awx)
|
* [Installing AWX](#installing-awx)
|
||||||
* [The AWX Operator](#the-awx-operator)
|
* [The AWX Operator](#the-awx-operator)
|
||||||
* [Quickstart with minikube](#quickstart-with-minikube)
|
|
||||||
* [Starting minikube](#starting-minikube)
|
|
||||||
* [Deploying the AWX Operator](#deploying-the-awx-operator)
|
|
||||||
* [Verifying the Operator Deployment](#verifying-the-operator-deployment)
|
|
||||||
* [Deploy AWX](#deploy-awx)
|
|
||||||
* [Accessing AWX](#accessing-awx)
|
|
||||||
* [Installing the AWX CLI](#installing-the-awx-cli)
|
* [Installing the AWX CLI](#installing-the-awx-cli)
|
||||||
* [Building the CLI Documentation](#building-the-cli-documentation)
|
* [Building the CLI Documentation](#building-the-cli-documentation)
|
||||||
|
|
||||||
@@ -22,110 +16,10 @@ If you're attempting to migrate an older Docker-based AWX installation, see: [Mi
|
|||||||
|
|
||||||
## The AWX Operator
|
## The AWX Operator
|
||||||
|
|
||||||
Starting in version 18.0, the [AWX Operator](https://github.com/ansible/awx-operator) is the preferred way to install AWX.
|
Starting in version 18.0, the [AWX Operator](https://github.com/ansible/awx-operator) is the preferred way to install AWX. Please refer to the [AWX Operator](https://github.com/ansible/awx-operator) documentation.
|
||||||
|
|
||||||
AWX can also alternatively be installed and [run in Docker](./tools/docker-compose/README.md), but this install path is only recommended for development/test-oriented deployments, and has no official published release.
|
AWX can also alternatively be installed and [run in Docker](./tools/docker-compose/README.md), but this install path is only recommended for development/test-oriented deployments, and has no official published release.
|
||||||
|
|
||||||
### Quickstart with minikube
|
|
||||||
|
|
||||||
If you don't have an existing OpenShift or Kubernetes cluster, minikube is a fast and easy way to get up and running.
|
|
||||||
|
|
||||||
To install minikube, follow the steps in their [documentation](https://minikube.sigs.k8s.io/docs/start/).
|
|
||||||
|
|
||||||
:warning: NOTE |
|
|
||||||
--- |
|
|
||||||
If you're about to install minikube or have already installed it, please be sure you're using [Minikube v1.18.1](https://github.com/kubernetes/minikube/releases/tag/v1.18.1). There's a [bug](https://github.com/ansible/awx-operator/issues/205) right now that will not allow you to run it using Minikube v1.19.x.
|
|
||||||
#### Starting minikube
|
|
||||||
|
|
||||||
Once you have installed minikube, run the following command to start it. You may wish to customize these options.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube start --cpus=4 --memory=8g --addons=ingress
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Deploying the AWX Operator
|
|
||||||
|
|
||||||
For a comprehensive overview of features, see [README.md](https://github.com/ansible/awx-operator/blob/devel/README.md) in the awx-operator repo. The following steps are the bare minimum to get AWX up and running.
|
|
||||||
|
|
||||||
Start by going to https://github.com/ansible/awx-operator/releases and making note of the latest release. Replace `<tag>` in the URL below with the version you are deploying:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl -- apply -f https://raw.githubusercontent.com/ansible/awx-operator/<tag>/deploy/awx-operator.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Verifying the Operator Deployment
|
|
||||||
|
|
||||||
After a few seconds, the operator should be up and running. Verify it by running the following command:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl get pods
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
awx-operator-7c78bfbfd-xb6th 1/1 Running 0 11s
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Deploy AWX
|
|
||||||
|
|
||||||
Once the Operator is running, you can now deploy AWX by creating a simple YAML file:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ cat myawx.yml
|
|
||||||
---
|
|
||||||
apiVersion: awx.ansible.com/v1beta1
|
|
||||||
kind: AWX
|
|
||||||
metadata:
|
|
||||||
name: awx
|
|
||||||
spec:
|
|
||||||
tower_ingress_type: Ingress
|
|
||||||
```
|
|
||||||
|
|
||||||
> If a custom AWX image is needed, see [these docs](./docs/build_awx_image.md) on how to build and use it.
|
|
||||||
|
|
||||||
And then creating the AWX object in the Kubernetes API:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl apply -- -f myawx.yml
|
|
||||||
awx.awx.ansible.com/awx created
|
|
||||||
```
|
|
||||||
|
|
||||||
After creating the AWX object in the Kubernetes API, the operator will begin running its reconciliation loop.
|
|
||||||
|
|
||||||
To see what's going on, you can tail the logs of the operator pod (note that your pod name will be different):
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl logs -- -f awx-operator-7c78bfbfd-xb6th
|
|
||||||
```
|
|
||||||
|
|
||||||
After a few seconds, you will see the database and application pods show up. On a fresh system, it may take a few minutes for the container images to download.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl get pods
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
awx-5ffbfd489c-bvtvf 3/3 Running 0 2m54s
|
|
||||||
awx-operator-7c78bfbfd-xb6th 1/1 Running 0 6m42s
|
|
||||||
awx-postgres-0 1/1 Running 0 2m58s
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Accessing AWX
|
|
||||||
|
|
||||||
To access the AWX UI, you'll need to grab the service url from minikube:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube service awx-service --url
|
|
||||||
http://192.168.59.2:31868
|
|
||||||
```
|
|
||||||
|
|
||||||
On fresh installs, you will see the "AWX is currently upgrading." page until database migrations finish.
|
|
||||||
|
|
||||||
Once you are redirected to the login screen, you can now log in by obtaining the generated admin password (note: do not copy the trailing `%`):
|
|
||||||
|
|
||||||
```
|
|
||||||
$ minikube kubectl -- get secret awx-admin-password -o jsonpath='{.data.password}' | base64 --decode
|
|
||||||
b6ChwVmqEiAsil2KSpH4xGaZPeZvWnWj%
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can log in at the URL above with the username "admin" and the password above. Happy Automating!
|
|
||||||
|
|
||||||
|
|
||||||
# Installing the AWX CLI
|
# Installing the AWX CLI
|
||||||
|
|
||||||
`awx` is the official command-line client for AWX. It:
|
`awx` is the official command-line client for AWX. It:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
Use the GitHub [issue tracker](https://github.com/ansible/awx/issues) for filing bugs. In order to save time, and help us respond to issues quickly, make sure to fill out as much of the issue template
|
Use the GitHub [issue tracker](https://github.com/ansible/awx/issues) for filing bugs. In order to save time, and help us respond to issues quickly, make sure to fill out as much of the issue template
|
||||||
as possible. Version information, and an accurate reproducing scenario are critical to helping us identify the problem.
|
as possible. Version information, and an accurate reproducing scenario are critical to helping us identify the problem.
|
||||||
|
|
||||||
Please don't use the issue tracker as a way to ask how to do something. Instead, use the [mailing list](https://groups.google.com/forum/#!forum/awx-project) , and the `#ansible-awx` channel on irc.freenode.net to get help.
|
Please don't use the issue tracker as a way to ask how to do something. Instead, use the [mailing list](https://groups.google.com/forum/#!forum/awx-project) , and the `#ansible-awx` channel on irc.libera.chat to get help.
|
||||||
|
|
||||||
Before opening a new issue, please use the issue search feature to see if what you're experiencing has already been reported. If you have any extra detail to provide, please comment. Otherwise, rather than posting a "me too" comment, please consider giving it a ["thumbs up"](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comment) to give us an indication of the severity of the problem.
|
Before opening a new issue, please use the issue search feature to see if what you're experiencing has already been reported. If you have any extra detail to provide, please comment. Otherwise, rather than posting a "me too" comment, please consider giving it a ["thumbs up"](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comment) to give us an indication of the severity of the problem.
|
||||||
|
|
||||||
|
|||||||
29
Makefile
29
Makefile
@@ -13,7 +13,6 @@ MANAGEMENT_COMMAND ?= awx-manage
|
|||||||
IMAGE_REPOSITORY_AUTH ?=
|
IMAGE_REPOSITORY_AUTH ?=
|
||||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||||
VERSION := $(shell cat VERSION)
|
VERSION := $(shell cat VERSION)
|
||||||
PYCURL_SSL_LIBRARY ?= openssl
|
|
||||||
|
|
||||||
# NOTE: This defaults the container image version to the branch that's active
|
# NOTE: This defaults the container image version to the branch that's active
|
||||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||||
@@ -28,7 +27,7 @@ DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
|||||||
|
|
||||||
# Python packages to install only from source (not from binary wheels)
|
# Python packages to install only from source (not from binary wheels)
|
||||||
# Comma separated list
|
# Comma separated list
|
||||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio,pycurl
|
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||||
# to install the actual requirements
|
# to install the actual requirements
|
||||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0 wheel==0.36.2
|
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0 wheel==0.36.2
|
||||||
@@ -174,7 +173,7 @@ init:
|
|||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
|
$(MANAGEMENT_COMMAND) provision_instance --hostname=$(COMPOSE_HOST); \
|
||||||
$(MANAGEMENT_COMMAND) register_queue --queuename=tower --instance_percent=100;
|
$(MANAGEMENT_COMMAND) register_queue --queuename=controlplane --instance_percent=100;
|
||||||
|
|
||||||
# Refresh development environment after pulling new code.
|
# Refresh development environment after pulling new code.
|
||||||
refresh: clean requirements_dev version_file develop migrate
|
refresh: clean requirements_dev version_file develop migrate
|
||||||
@@ -272,7 +271,9 @@ black: reports
|
|||||||
@(set -o pipefail && $@ $(BLACK_ARGS) awx awxkit awx_collection | tee reports/$@.report)
|
@(set -o pipefail && $@ $(BLACK_ARGS) awx awxkit awx_collection | tee reports/$@.report)
|
||||||
|
|
||||||
.git/hooks/pre-commit:
|
.git/hooks/pre-commit:
|
||||||
@echo "[ -z \$$AWX_IGNORE_BLACK ] && (black --check \`git diff --cached --name-only --diff-filter=AM | grep -E '\.py$\'\` || (echo 'To fix this, run \`make black\` to auto-format your code prior to commit, or set AWX_IGNORE_BLACK=1' && exit 1))" > .git/hooks/pre-commit
|
@echo "if [ -x pre-commit.sh ]; then" > .git/hooks/pre-commit
|
||||||
|
@echo " ./pre-commit.sh;" >> .git/hooks/pre-commit
|
||||||
|
@echo "fi" >> .git/hooks/pre-commit
|
||||||
@chmod +x .git/hooks/pre-commit
|
@chmod +x .git/hooks/pre-commit
|
||||||
|
|
||||||
genschema: reports
|
genschema: reports
|
||||||
@@ -287,6 +288,11 @@ swagger: reports
|
|||||||
|
|
||||||
check: black
|
check: black
|
||||||
|
|
||||||
|
api-lint:
|
||||||
|
BLACK_ARGS="--check" make black
|
||||||
|
flake8 awx
|
||||||
|
yamllint -s .
|
||||||
|
|
||||||
awx-link:
|
awx-link:
|
||||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||||
@@ -314,7 +320,7 @@ test_collection:
|
|||||||
if [ "$(VENV_BASE)" ]; then \
|
if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi && \
|
fi && \
|
||||||
pip install ansible && \
|
pip install ansible-core && \
|
||||||
py.test $(COLLECTION_TEST_DIRS) -v
|
py.test $(COLLECTION_TEST_DIRS) -v
|
||||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||||
# First we will use anything expility set as PYTHONPATH
|
# First we will use anything expility set as PYTHONPATH
|
||||||
@@ -387,7 +393,7 @@ clean-ui:
|
|||||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||||
|
|
||||||
awx/ui_next/node_modules:
|
awx/ui_next/node_modules:
|
||||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn install
|
NODE_OPTIONS=--max-old-space-size=4096 $(NPM_BIN) --prefix awx/ui_next --loglevel warn ci
|
||||||
|
|
||||||
$(UI_BUILD_FLAG_FILE):
|
$(UI_BUILD_FLAG_FILE):
|
||||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run compile-strings
|
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run compile-strings
|
||||||
@@ -469,7 +475,7 @@ docker-compose-sources: .git/hooks/pre-commit
|
|||||||
-e cluster_node_count=$(CLUSTER_NODE_COUNT)
|
-e cluster_node_count=$(CLUSTER_NODE_COUNT)
|
||||||
|
|
||||||
docker-compose: docker-auth awx/projects docker-compose-sources
|
docker-compose: docker-auth awx/projects docker-compose-sources
|
||||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml up $(COMPOSE_UP_OPTS)
|
docker-compose -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_UP_OPTS) up
|
||||||
|
|
||||||
docker-compose-credential-plugins: docker-auth awx/projects docker-compose-sources
|
docker-compose-credential-plugins: docker-auth awx/projects docker-compose-sources
|
||||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||||
@@ -550,10 +556,13 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
|||||||
# Translation TASKS
|
# Translation TASKS
|
||||||
# --------------------------------------
|
# --------------------------------------
|
||||||
|
|
||||||
# generate UI .pot
|
# generate UI .pot file, an empty template of strings yet to be translated
|
||||||
pot: $(UI_BUILD_FLAG_FILE)
|
pot: $(UI_BUILD_FLAG_FILE)
|
||||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings
|
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template
|
|
||||||
|
# generate UI .po files for each locale (will update translated strings for `en`)
|
||||||
|
po: $(UI_BUILD_FLAG_FILE)
|
||||||
|
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||||
|
|
||||||
# generate API django .pot .po
|
# generate API django .pot .po
|
||||||
LANG = "en-us"
|
LANG = "en-us"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[](https://ansible.softwarefactory-project.io/zuul/status) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
[](https://ansible.softwarefactory-project.io/zuul/status) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||||
[](https://webchat.freenode.net/#ansible-awx)
|
[](https://libera.chat)
|
||||||
|
|
||||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ Contributing
|
|||||||
- All code submissions are made through pull requests against the `devel` branch.
|
- All code submissions are made through pull requests against the `devel` branch.
|
||||||
- All contributors must use git commit --signoff for any commit to be merged and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md)
|
- All contributors must use git commit --signoff for any commit to be merged and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md)
|
||||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs. `git merge` for this reason.
|
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs. `git merge` for this reason.
|
||||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on webchat.freenode.net and talk about what you would like to do or add first. This not only helps everyone know what's going on, but it also helps save time and effort if the community decides some changes are needed.
|
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on web.libera.chat and talk about what you would like to do or add first. This not only helps everyone know what's going on, but it also helps save time and effort if the community decides some changes are needed.
|
||||||
|
|
||||||
Reporting Issues
|
Reporting Issues
|
||||||
----------------
|
----------------
|
||||||
@@ -37,5 +37,5 @@ Get Involved
|
|||||||
|
|
||||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||||
|
|
||||||
- Join the `#ansible-awx` channel on webchat.freenode.net
|
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ else:
|
|||||||
from django.db.backends.base import schema
|
from django.db.backends.base import schema
|
||||||
from django.db.models import indexes
|
from django.db.models import indexes
|
||||||
from django.db.backends.utils import names_digest
|
from django.db.backends.utils import names_digest
|
||||||
|
from django.db import connection
|
||||||
|
|
||||||
|
|
||||||
if HAS_DJANGO is True:
|
if HAS_DJANGO is True:
|
||||||
@@ -149,6 +150,12 @@ def manage():
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.management import execute_from_command_line
|
from django.core.management import execute_from_command_line
|
||||||
|
|
||||||
|
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||||
|
if not MODE == 'development':
|
||||||
|
if (connection.pg_version // 10000) < 12:
|
||||||
|
sys.stderr.write("Postgres version 12 is required\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||||
sys.stdout.write('%s\n' % __version__)
|
sys.stdout.write('%s\n' % __version__)
|
||||||
# If running as a user without permission to read settings, display an
|
# If running as a user without permission to read settings, display an
|
||||||
|
|||||||
@@ -1,8 +1,12 @@
|
|||||||
# Django
|
# Django
|
||||||
|
from django.conf import settings
|
||||||
from django.utils.translation import ugettext_lazy as _
|
from django.utils.translation import ugettext_lazy as _
|
||||||
|
|
||||||
|
# Django REST Framework
|
||||||
|
from rest_framework import serializers
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.conf import fields, register
|
from awx.conf import fields, register, register_validate
|
||||||
from awx.api.fields import OAuth2ProviderField
|
from awx.api.fields import OAuth2ProviderField
|
||||||
from oauth2_provider.settings import oauth2_settings
|
from oauth2_provider.settings import oauth2_settings
|
||||||
|
|
||||||
@@ -27,6 +31,17 @@ register(
|
|||||||
category=_('Authentication'),
|
category=_('Authentication'),
|
||||||
category_slug='authentication',
|
category_slug='authentication',
|
||||||
)
|
)
|
||||||
|
register(
|
||||||
|
'DISABLE_LOCAL_AUTH',
|
||||||
|
field_class=fields.BooleanField,
|
||||||
|
label=_('Disable the built-in authentication system'),
|
||||||
|
help_text=_(
|
||||||
|
"Controls whether users are prevented from using the built-in authentication system. "
|
||||||
|
"You probably want to do this if you are using an LDAP or SAML integration."
|
||||||
|
),
|
||||||
|
category=_('Authentication'),
|
||||||
|
category_slug='authentication',
|
||||||
|
)
|
||||||
register(
|
register(
|
||||||
'AUTH_BASIC_ENABLED',
|
'AUTH_BASIC_ENABLED',
|
||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
@@ -81,3 +96,23 @@ register(
|
|||||||
category=_('Authentication'),
|
category=_('Authentication'),
|
||||||
category_slug='authentication',
|
category_slug='authentication',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def authentication_validate(serializer, attrs):
|
||||||
|
remote_auth_settings = [
|
||||||
|
'AUTH_LDAP_SERVER_URI',
|
||||||
|
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
|
||||||
|
'SOCIAL_AUTH_GITHUB_KEY',
|
||||||
|
'SOCIAL_AUTH_GITHUB_ORG_KEY',
|
||||||
|
'SOCIAL_AUTH_GITHUB_TEAM_KEY',
|
||||||
|
'SOCIAL_AUTH_SAML_ENABLED_IDPS',
|
||||||
|
'RADIUS_SERVER',
|
||||||
|
'TACACSPLUS_HOST',
|
||||||
|
]
|
||||||
|
if attrs.get('DISABLE_LOCAL_AUTH', False):
|
||||||
|
if not any(getattr(settings, s, None) for s in remote_auth_settings):
|
||||||
|
raise serializers.ValidationError(_("There are no remote authentication systems configured."))
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
|
||||||
|
register_validate('authentication', authentication_validate)
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
|||||||
Filter using field lookups provided via query string parameters.
|
Filter using field lookups provided via query string parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate')
|
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||||
|
|
||||||
SUPPORTED_LOOKUPS = (
|
SUPPORTED_LOOKUPS = (
|
||||||
'exact',
|
'exact',
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credenti
|
|||||||
from awx.main.access import access_registry
|
from awx.main.access import access_registry
|
||||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||||
from awx.main.utils.db import get_all_field_names
|
from awx.main.utils.db import get_all_field_names
|
||||||
|
from awx.main.utils.licensing import server_product_name
|
||||||
from awx.main.views import ApiErrorView
|
from awx.main.views import ApiErrorView
|
||||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||||
from awx.api.versioning import URLPathVersioning
|
from awx.api.versioning import URLPathVersioning
|
||||||
@@ -184,9 +185,6 @@ class APIView(views.APIView):
|
|||||||
"""
|
"""
|
||||||
Log warning for 400 requests. Add header with elapsed time.
|
Log warning for 400 requests. Add header with elapsed time.
|
||||||
"""
|
"""
|
||||||
from awx.main.utils import get_licenser
|
|
||||||
from awx.main.utils.licensing import OpenLicense
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# If the URL was rewritten, and we get a 404, we should entirely
|
# If the URL was rewritten, and we get a 404, we should entirely
|
||||||
# replace the view in the request context with an ApiErrorView()
|
# replace the view in the request context with an ApiErrorView()
|
||||||
@@ -226,7 +224,7 @@ class APIView(views.APIView):
|
|||||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||||
time_started = getattr(self, 'time_started', None)
|
time_started = getattr(self, 'time_started', None)
|
||||||
response['X-API-Product-Version'] = get_awx_version()
|
response['X-API-Product-Version'] = get_awx_version()
|
||||||
response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), OpenLicense) else 'Red Hat Ansible Tower'
|
response['X-API-Product-Name'] = server_product_name()
|
||||||
|
|
||||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||||
if time_started:
|
if time_started:
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from rest_framework.request import clone_request
|
|||||||
from awx.api.fields import ChoiceNullField
|
from awx.api.fields import ChoiceNullField
|
||||||
from awx.main.fields import JSONField, ImplicitRoleField
|
from awx.main.fields import JSONField, ImplicitRoleField
|
||||||
from awx.main.models import NotificationTemplate
|
from awx.main.models import NotificationTemplate
|
||||||
from awx.main.tasks import AWXReceptorJob
|
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||||
|
|
||||||
# Polymorphic
|
# Polymorphic
|
||||||
from polymorphic.models import PolymorphicModel
|
from polymorphic.models import PolymorphicModel
|
||||||
@@ -211,7 +211,7 @@ class Metadata(metadata.SimpleMetadata):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if field == "pod_spec_override":
|
if field == "pod_spec_override":
|
||||||
meta['default'] = AWXReceptorJob().pod_definition
|
meta['default'] = get_default_pod_spec()
|
||||||
|
|
||||||
# Add type choices if available from the serializer.
|
# Add type choices if available from the serializer.
|
||||||
if field == 'type' and hasattr(serializer, 'get_type_choices'):
|
if field == 'type' and hasattr(serializer, 'get_type_choices'):
|
||||||
|
|||||||
@@ -1,12 +1,16 @@
|
|||||||
# Copyright (c) 2015 Ansible, Inc.
|
# Copyright (c) 2015 Ansible, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
# Django REST Framework
|
# Django REST Framework
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.paginator import Paginator as DjangoPaginator
|
from django.core.paginator import Paginator as DjangoPaginator
|
||||||
from rest_framework import pagination
|
from rest_framework import pagination
|
||||||
from rest_framework.response import Response
|
from rest_framework.response import Response
|
||||||
from rest_framework.utils.urls import replace_query_param
|
from rest_framework.utils.urls import replace_query_param
|
||||||
|
from rest_framework.settings import api_settings
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
|
|
||||||
class DisabledPaginator(DjangoPaginator):
|
class DisabledPaginator(DjangoPaginator):
|
||||||
@@ -65,3 +69,65 @@ class Pagination(pagination.PageNumberPagination):
|
|||||||
if self.count_disabled:
|
if self.count_disabled:
|
||||||
return Response({'results': data})
|
return Response({'results': data})
|
||||||
return super(Pagination, self).get_paginated_response(data)
|
return super(Pagination, self).get_paginated_response(data)
|
||||||
|
|
||||||
|
|
||||||
|
class LimitPagination(pagination.BasePagination):
|
||||||
|
default_limit = api_settings.PAGE_SIZE
|
||||||
|
limit_query_param = 'limit'
|
||||||
|
limit_query_description = _('Number of results to return per page.')
|
||||||
|
max_page_size = settings.MAX_PAGE_SIZE
|
||||||
|
|
||||||
|
def paginate_queryset(self, queryset, request, view=None):
|
||||||
|
self.limit = self.get_limit(request)
|
||||||
|
self.request = request
|
||||||
|
|
||||||
|
return list(queryset[0 : self.limit])
|
||||||
|
|
||||||
|
def get_paginated_response(self, data):
|
||||||
|
return Response(OrderedDict([('results', data)]))
|
||||||
|
|
||||||
|
def get_paginated_response_schema(self, schema):
|
||||||
|
return {
|
||||||
|
'type': 'object',
|
||||||
|
'properties': {
|
||||||
|
'results': schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_limit(self, request):
|
||||||
|
try:
|
||||||
|
return pagination._positive_int(request.query_params[self.limit_query_param], strict=True)
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return self.default_limit
|
||||||
|
|
||||||
|
|
||||||
|
class UnifiedJobEventPagination(Pagination):
|
||||||
|
"""
|
||||||
|
By default, use Pagination for all operations.
|
||||||
|
If `limit` query parameter specified use LimitPagination
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.use_limit_paginator = False
|
||||||
|
self.limit_pagination = LimitPagination()
|
||||||
|
return super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def paginate_queryset(self, queryset, request, view=None):
|
||||||
|
if 'limit' in request.query_params:
|
||||||
|
self.use_limit_paginator = True
|
||||||
|
|
||||||
|
if self.use_limit_paginator:
|
||||||
|
return self.limit_pagination.paginate_queryset(queryset, request, view=view)
|
||||||
|
return super().paginate_queryset(queryset, request, view=view)
|
||||||
|
|
||||||
|
def get_paginated_response(self, data):
|
||||||
|
if self.use_limit_paginator:
|
||||||
|
return self.limit_pagination.get_paginated_response(data)
|
||||||
|
return super().get_paginated_response(data)
|
||||||
|
|
||||||
|
def get_paginated_response_schema(self, schema):
|
||||||
|
if self.use_limit_paginator:
|
||||||
|
return self.limit_pagination.get_paginated_response_schema(schema)
|
||||||
|
return super().get_paginated_response_schema(schema)
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
# Python
|
# Python
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
# Django REST Framework
|
# Django REST Framework
|
||||||
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||||
from rest_framework import permissions
|
from rest_framework import permissions
|
||||||
@@ -245,7 +247,7 @@ class IsSuperUser(permissions.BasePermission):
|
|||||||
|
|
||||||
class InstanceGroupTowerPermission(ModelAccessPermission):
|
class InstanceGroupTowerPermission(ModelAccessPermission):
|
||||||
def has_object_permission(self, request, view, obj):
|
def has_object_permission(self, request, view, obj):
|
||||||
if request.method == 'DELETE' and obj.name == "tower":
|
if request.method == 'DELETE' and obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||||
return False
|
return False
|
||||||
return super(InstanceGroupTowerPermission, self).has_object_permission(request, view, obj)
|
return super(InstanceGroupTowerPermission, self).has_object_permission(request, view, obj)
|
||||||
|
|
||||||
|
|||||||
@@ -144,13 +144,12 @@ SUMMARIZABLE_FK_FIELDS = {
|
|||||||
'inventory_sources_with_failures',
|
'inventory_sources_with_failures',
|
||||||
'organization_id',
|
'organization_id',
|
||||||
'kind',
|
'kind',
|
||||||
'insights_credential_id',
|
|
||||||
),
|
),
|
||||||
'host': DEFAULT_SUMMARY_FIELDS,
|
'host': DEFAULT_SUMMARY_FIELDS,
|
||||||
'group': DEFAULT_SUMMARY_FIELDS,
|
'group': DEFAULT_SUMMARY_FIELDS,
|
||||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
|
||||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
||||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||||
@@ -171,7 +170,6 @@ SUMMARIZABLE_FK_FIELDS = {
|
|||||||
'role': ('id', 'role_field'),
|
'role': ('id', 'role_field'),
|
||||||
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
||||||
'instance_group': ('id', 'name', 'is_container_group'),
|
'instance_group': ('id', 'name', 'is_container_group'),
|
||||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
|
||||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||||
@@ -724,6 +722,20 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
|||||||
else:
|
else:
|
||||||
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
|
||||||
|
|
||||||
|
def get_summary_fields(self, obj):
|
||||||
|
summary_fields = super().get_summary_fields(obj)
|
||||||
|
|
||||||
|
if self.is_detail_view:
|
||||||
|
resolved_ee = obj.resolve_execution_environment()
|
||||||
|
if resolved_ee is not None:
|
||||||
|
summary_fields['resolved_environment'] = {
|
||||||
|
field: getattr(resolved_ee, field, None)
|
||||||
|
for field in SUMMARIZABLE_FK_FIELDS['execution_environment']
|
||||||
|
if getattr(resolved_ee, field, None) is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
return summary_fields
|
||||||
|
|
||||||
|
|
||||||
class UnifiedJobSerializer(BaseSerializer):
|
class UnifiedJobSerializer(BaseSerializer):
|
||||||
show_capabilities = ['start', 'delete']
|
show_capabilities = ['start', 'delete']
|
||||||
@@ -754,6 +766,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
|||||||
'result_traceback',
|
'result_traceback',
|
||||||
'event_processing_finished',
|
'event_processing_finished',
|
||||||
'launched_by',
|
'launched_by',
|
||||||
|
'work_unit_id',
|
||||||
)
|
)
|
||||||
|
|
||||||
extra_kwargs = {
|
extra_kwargs = {
|
||||||
@@ -1396,11 +1409,11 @@ class ProjectOptionsSerializer(BaseSerializer):
|
|||||||
|
|
||||||
class ExecutionEnvironmentSerializer(BaseSerializer):
|
class ExecutionEnvironmentSerializer(BaseSerializer):
|
||||||
show_capabilities = ['edit', 'delete', 'copy']
|
show_capabilities = ['edit', 'delete', 'copy']
|
||||||
managed_by_tower = serializers.ReadOnlyField()
|
managed = serializers.ReadOnlyField()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = ExecutionEnvironment
|
model = ExecutionEnvironment
|
||||||
fields = ('*', 'organization', 'image', 'managed_by_tower', 'credential', 'pull')
|
fields = ('*', 'organization', 'image', 'managed', 'credential', 'pull')
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(ExecutionEnvironmentSerializer, self).get_related(obj)
|
res = super(ExecutionEnvironmentSerializer, self).get_related(obj)
|
||||||
@@ -1646,7 +1659,6 @@ class InventorySerializer(BaseSerializerWithVariables):
|
|||||||
'has_inventory_sources',
|
'has_inventory_sources',
|
||||||
'total_inventory_sources',
|
'total_inventory_sources',
|
||||||
'inventory_sources_with_failures',
|
'inventory_sources_with_failures',
|
||||||
'insights_credential',
|
|
||||||
'pending_deletion',
|
'pending_deletion',
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1671,8 +1683,6 @@ class InventorySerializer(BaseSerializerWithVariables):
|
|||||||
copy=self.reverse('api:inventory_copy', kwargs={'pk': obj.pk}),
|
copy=self.reverse('api:inventory_copy', kwargs={'pk': obj.pk}),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if obj.insights_credential:
|
|
||||||
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
|
|
||||||
if obj.organization:
|
if obj.organization:
|
||||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||||
return res
|
return res
|
||||||
@@ -1740,10 +1750,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
|||||||
'has_inventory_sources',
|
'has_inventory_sources',
|
||||||
'last_job',
|
'last_job',
|
||||||
'last_job_host_summary',
|
'last_job_host_summary',
|
||||||
'insights_system_id',
|
|
||||||
'ansible_facts_modified',
|
'ansible_facts_modified',
|
||||||
)
|
)
|
||||||
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified')
|
read_only_fields = ('last_job', 'last_job_host_summary', 'ansible_facts_modified')
|
||||||
|
|
||||||
def build_relational_field(self, field_name, relation_info):
|
def build_relational_field(self, field_name, relation_info):
|
||||||
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
|
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
|
||||||
@@ -1767,7 +1776,6 @@ class HostSerializer(BaseSerializerWithVariables):
|
|||||||
smart_inventories=self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
|
smart_inventories=self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
|
||||||
ad_hoc_commands=self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
ad_hoc_commands=self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||||
ad_hoc_command_events=self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
|
ad_hoc_command_events=self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
|
||||||
insights=self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
|
|
||||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -2207,6 +2215,7 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
|||||||
'org_host_limit_error',
|
'org_host_limit_error',
|
||||||
'source_project_update',
|
'source_project_update',
|
||||||
'custom_virtualenv',
|
'custom_virtualenv',
|
||||||
|
'instance_group',
|
||||||
'-controller_node',
|
'-controller_node',
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -2472,14 +2481,14 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
|||||||
|
|
||||||
class CredentialTypeSerializer(BaseSerializer):
|
class CredentialTypeSerializer(BaseSerializer):
|
||||||
show_capabilities = ['edit', 'delete']
|
show_capabilities = ['edit', 'delete']
|
||||||
managed_by_tower = serializers.ReadOnlyField()
|
managed = serializers.ReadOnlyField()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = CredentialType
|
model = CredentialType
|
||||||
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs', 'injectors')
|
fields = ('*', 'kind', 'namespace', 'name', 'managed', 'inputs', 'injectors')
|
||||||
|
|
||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
if self.instance and self.instance.managed_by_tower:
|
if self.instance and self.instance.managed:
|
||||||
raise PermissionDenied(detail=_("Modifications not allowed for managed credential types"))
|
raise PermissionDenied(detail=_("Modifications not allowed for managed credential types"))
|
||||||
|
|
||||||
old_inputs = {}
|
old_inputs = {}
|
||||||
@@ -2511,8 +2520,8 @@ class CredentialTypeSerializer(BaseSerializer):
|
|||||||
def to_representation(self, data):
|
def to_representation(self, data):
|
||||||
value = super(CredentialTypeSerializer, self).to_representation(data)
|
value = super(CredentialTypeSerializer, self).to_representation(data)
|
||||||
|
|
||||||
# translate labels and help_text for credential fields "managed by Tower"
|
# translate labels and help_text for credential fields "managed"
|
||||||
if value.get('managed_by_tower'):
|
if value.get('managed'):
|
||||||
value['name'] = _(value['name'])
|
value['name'] = _(value['name'])
|
||||||
for field in value.get('inputs', {}).get('fields', []):
|
for field in value.get('inputs', {}).get('fields', []):
|
||||||
field['label'] = _(field['label'])
|
field['label'] = _(field['label'])
|
||||||
@@ -2531,11 +2540,11 @@ class CredentialTypeSerializer(BaseSerializer):
|
|||||||
class CredentialSerializer(BaseSerializer):
|
class CredentialSerializer(BaseSerializer):
|
||||||
show_capabilities = ['edit', 'delete', 'copy', 'use']
|
show_capabilities = ['edit', 'delete', 'copy', 'use']
|
||||||
capabilities_prefetch = ['admin', 'use']
|
capabilities_prefetch = ['admin', 'use']
|
||||||
managed_by_tower = serializers.ReadOnlyField()
|
managed = serializers.ReadOnlyField()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Credential
|
model = Credential
|
||||||
fields = ('*', 'organization', 'credential_type', 'managed_by_tower', 'inputs', 'kind', 'cloud', 'kubernetes')
|
fields = ('*', 'organization', 'credential_type', 'managed', 'inputs', 'kind', 'cloud', 'kubernetes')
|
||||||
extra_kwargs = {'credential_type': {'label': _('Credential Type')}}
|
extra_kwargs = {'credential_type': {'label': _('Credential Type')}}
|
||||||
|
|
||||||
def to_representation(self, data):
|
def to_representation(self, data):
|
||||||
@@ -2602,7 +2611,7 @@ class CredentialSerializer(BaseSerializer):
|
|||||||
return summary_dict
|
return summary_dict
|
||||||
|
|
||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
if self.instance and self.instance.managed_by_tower:
|
if self.instance and self.instance.managed:
|
||||||
raise PermissionDenied(detail=_("Modifications not allowed for managed credentials"))
|
raise PermissionDenied(detail=_("Modifications not allowed for managed credentials"))
|
||||||
return super(CredentialSerializer, self).validate(attrs)
|
return super(CredentialSerializer, self).validate(attrs)
|
||||||
|
|
||||||
@@ -2614,7 +2623,7 @@ class CredentialSerializer(BaseSerializer):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_organization(self, org):
|
def validate_organization(self, org):
|
||||||
if self.instance and self.instance.credential_type.kind == 'galaxy' and org is None:
|
if self.instance and (not self.instance.managed) and self.instance.credential_type.kind == 'galaxy' and org is None:
|
||||||
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
|
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
|
||||||
return org
|
return org
|
||||||
|
|
||||||
@@ -2622,7 +2631,6 @@ class CredentialSerializer(BaseSerializer):
|
|||||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||||
for related_objects in (
|
for related_objects in (
|
||||||
'ad_hoc_commands',
|
'ad_hoc_commands',
|
||||||
'insights_inventories',
|
|
||||||
'unifiedjobs',
|
'unifiedjobs',
|
||||||
'unifiedjobtemplates',
|
'unifiedjobtemplates',
|
||||||
'projects',
|
'projects',
|
||||||
@@ -3030,7 +3038,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
|||||||
res = super(JobSerializer, self).get_related(obj)
|
res = super(JobSerializer, self).get_related(obj)
|
||||||
res.update(
|
res.update(
|
||||||
dict(
|
dict(
|
||||||
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
|
job_events=self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}), # TODO: consider adding job_created
|
||||||
job_host_summaries=self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
|
job_host_summaries=self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
|
||||||
activity_stream=self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
|
activity_stream=self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||||
notifications=self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
|
notifications=self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
|
||||||
@@ -3097,8 +3105,8 @@ class JobDetailSerializer(JobSerializer):
|
|||||||
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
|
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
|
||||||
|
|
||||||
def get_playbook_counts(self, obj):
|
def get_playbook_counts(self, obj):
|
||||||
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
|
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||||
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
|
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||||
|
|
||||||
data = {'play_count': play_count, 'task_count': task_count}
|
data = {'play_count': play_count, 'task_count': task_count}
|
||||||
|
|
||||||
@@ -3106,7 +3114,7 @@ class JobDetailSerializer(JobSerializer):
|
|||||||
|
|
||||||
def get_host_status_counts(self, obj):
|
def get_host_status_counts(self, obj):
|
||||||
try:
|
try:
|
||||||
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||||
except JobEvent.DoesNotExist:
|
except JobEvent.DoesNotExist:
|
||||||
counts = {}
|
counts = {}
|
||||||
|
|
||||||
@@ -3413,6 +3421,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
|||||||
'ask_limit_on_launch',
|
'ask_limit_on_launch',
|
||||||
'webhook_service',
|
'webhook_service',
|
||||||
'webhook_credential',
|
'webhook_credential',
|
||||||
|
'-execution_environment',
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -3439,6 +3448,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
|||||||
survey_spec=self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
|
survey_spec=self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
|
||||||
copy=self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
|
copy=self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
|
||||||
)
|
)
|
||||||
|
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||||
if obj.organization:
|
if obj.organization:
|
||||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||||
if obj.webhook_credential_id:
|
if obj.webhook_credential_id:
|
||||||
@@ -3490,6 +3500,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
|||||||
'allow_simultaneous',
|
'allow_simultaneous',
|
||||||
'job_template',
|
'job_template',
|
||||||
'is_sliced_job',
|
'is_sliced_job',
|
||||||
|
'-execution_environment',
|
||||||
'-execution_node',
|
'-execution_node',
|
||||||
'-event_processing_finished',
|
'-event_processing_finished',
|
||||||
'-controller_node',
|
'-controller_node',
|
||||||
@@ -3503,6 +3514,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
|||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(WorkflowJobSerializer, self).get_related(obj)
|
res = super(WorkflowJobSerializer, self).get_related(obj)
|
||||||
|
res.pop('execution_environment', None) # EEs aren't meaningful for workflows
|
||||||
if obj.workflow_job_template:
|
if obj.workflow_job_template:
|
||||||
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
|
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
|
||||||
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
||||||
@@ -3527,7 +3539,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
|||||||
|
|
||||||
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
|
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
fields = ('*', '-execution_node', '-controller_node')
|
fields = ('*', '-execution_environment', '-execution_node', '-controller_node')
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
|
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
|
||||||
@@ -4177,7 +4189,7 @@ class JobLaunchSerializer(BaseSerializer):
|
|||||||
elif field_name == 'credentials':
|
elif field_name == 'credentials':
|
||||||
for cred in obj.credentials.all():
|
for cred in obj.credentials.all():
|
||||||
cred_dict = dict(id=cred.id, name=cred.name, credential_type=cred.credential_type.pk, passwords_needed=cred.passwords_needed)
|
cred_dict = dict(id=cred.id, name=cred.name, credential_type=cred.credential_type.pk, passwords_needed=cred.passwords_needed)
|
||||||
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
|
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
||||||
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
||||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||||
else:
|
else:
|
||||||
@@ -4374,7 +4386,7 @@ class NotificationTemplateSerializer(BaseSerializer):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
def _recent_notifications(self, obj):
|
def _recent_notifications(self, obj):
|
||||||
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
|
return [{'id': x.id, 'status': x.status, 'created': x.created, 'error': x.error} for x in obj.notifications.all().order_by('-created')[:5]]
|
||||||
|
|
||||||
def get_summary_fields(self, obj):
|
def get_summary_fields(self, obj):
|
||||||
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
|
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
|
||||||
@@ -4904,8 +4916,12 @@ class InstanceGroupSerializer(BaseSerializer):
|
|||||||
return value
|
return value
|
||||||
|
|
||||||
def validate_name(self, value):
|
def validate_name(self, value):
|
||||||
if self.instance and self.instance.name == 'tower' and value != 'tower':
|
if self.instance and self.instance.name == settings.DEFAULT_EXECUTION_QUEUE_NAME and value != settings.DEFAULT_EXECUTION_QUEUE_NAME:
|
||||||
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
|
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_EXECUTION_QUEUE_NAME))
|
||||||
|
|
||||||
|
if self.instance and self.instance.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME and value != settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||||
|
raise serializers.ValidationError(_('%s instance group name may not be changed.' % settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME))
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def validate_credential(self, value):
|
def validate_credential(self, value):
|
||||||
@@ -4972,7 +4988,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
|||||||
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
|
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
|
||||||
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
|
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
|
||||||
('o_auth2_application', ('id', 'name', 'description')),
|
('o_auth2_application', ('id', 'name', 'description')),
|
||||||
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
|
('credential_type', ('id', 'name', 'description', 'kind', 'managed')),
|
||||||
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
|
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
|
||||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
{% include "api/job_job_events_list.md" %}
|
||||||
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
1
awx/api/templates/api/inventory_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{% include "api/job_job_events_list.md" %}
|
||||||
21
awx/api/templates/api/job_job_events_list.md
Normal file
21
awx/api/templates/api/job_job_events_list.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{% include "api/sub_list_api_view.md" %}
|
||||||
|
{% ifmeth GET %}
|
||||||
|
## Special limit feature for event list views
|
||||||
|
|
||||||
|
Use the `limit` query string parameter to opt out of the pagination keys.
|
||||||
|
Doing this can improve response times for jobs that produce a large volume
|
||||||
|
of outputs.
|
||||||
|
|
||||||
|
?limit=25
|
||||||
|
|
||||||
|
This will set the page size to 25 and the `previous` and `next` keys will be
|
||||||
|
omitted from the response data. The data structure will look like this.
|
||||||
|
|
||||||
|
{
|
||||||
|
"results": [
|
||||||
|
...
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
{% endifmeth %}
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
Make a GET request to retrieve the list of aggregated play data associated with a job
|
|
||||||
|
|
||||||
## Filtering
|
|
||||||
|
|
||||||
This endpoints supports a limited filtering subset:
|
|
||||||
|
|
||||||
?event_id__in=1,2,3
|
|
||||||
|
|
||||||
Will show only the given ids.
|
|
||||||
|
|
||||||
?event_id__gt=1
|
|
||||||
|
|
||||||
Will show ids greater than the given one.
|
|
||||||
|
|
||||||
?event_id__lt=3
|
|
||||||
|
|
||||||
Will show ids less than the given one.
|
|
||||||
|
|
||||||
?failed=true
|
|
||||||
|
|
||||||
Will show only failed plays. Alternatively `false` may be used.
|
|
||||||
|
|
||||||
?play__icontains=test
|
|
||||||
|
|
||||||
Will filter plays matching the substring `test`
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
Make a GET request to retrieve the list of aggregated task data associated with the play given by event_id.
|
|
||||||
|
|
||||||
`event_id` is a required query parameter and must match the job event id of the parent play in order to receive the list of tasks associated with the play
|
|
||||||
|
|
||||||
## Filtering
|
|
||||||
|
|
||||||
This endpoints supports a limited filtering subset:
|
|
||||||
|
|
||||||
?event_id__in=1,2,3
|
|
||||||
|
|
||||||
Will show only the given task ids under the play given by `event_id`.
|
|
||||||
|
|
||||||
?event_id__gt=1
|
|
||||||
|
|
||||||
Will show ids greater than the given one.
|
|
||||||
|
|
||||||
?event_id__lt=3
|
|
||||||
|
|
||||||
Will show ids less than the given one.
|
|
||||||
|
|
||||||
?failed=true
|
|
||||||
|
|
||||||
Will show only failed plays. Alternatively `false` may be used.
|
|
||||||
|
|
||||||
?task__icontains=test
|
|
||||||
|
|
||||||
Will filter tasks matching the substring `test`
|
|
||||||
1
awx/api/templates/api/project_update_events_list.md
Normal file
1
awx/api/templates/api/project_update_events_list.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{% include "api/job_job_events_list.md" %}
|
||||||
1
awx/api/templates/api/system_job_events_list.md
Normal file
1
awx/api/templates/api/system_job_events_list.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{% include "api/job_job_events_list.md" %}
|
||||||
@@ -16,7 +16,6 @@ from awx.api.views import (
|
|||||||
HostSmartInventoriesList,
|
HostSmartInventoriesList,
|
||||||
HostAdHocCommandsList,
|
HostAdHocCommandsList,
|
||||||
HostAdHocCommandEventsList,
|
HostAdHocCommandEventsList,
|
||||||
HostInsights,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -33,7 +32,6 @@ urls = [
|
|||||||
url(r'^(?P<pk>[0-9]+)/smart_inventories/$', HostSmartInventoriesList.as_view(), name='host_smart_inventories_list'),
|
url(r'^(?P<pk>[0-9]+)/smart_inventories/$', HostSmartInventoriesList.as_view(), name='host_smart_inventories_list'),
|
||||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_commands/$', HostAdHocCommandsList.as_view(), name='host_ad_hoc_commands_list'),
|
url(r'^(?P<pk>[0-9]+)/ad_hoc_commands/$', HostAdHocCommandsList.as_view(), name='host_ad_hoc_commands_list'),
|
||||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_command_events/$', HostAdHocCommandEventsList.as_view(), name='host_ad_hoc_command_events_list'),
|
url(r'^(?P<pk>[0-9]+)/ad_hoc_command_events/$', HostAdHocCommandEventsList.as_view(), name='host_ad_hoc_command_events_list'),
|
||||||
url(r'^(?P<pk>[0-9]+)/insights/$', HostInsights.as_view(), name='host_insights'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -3,14 +3,11 @@
|
|||||||
|
|
||||||
from django.conf.urls import url
|
from django.conf.urls import url
|
||||||
|
|
||||||
from awx.api.views import JobEventList, JobEventDetail, JobEventChildrenList, JobEventHostsList
|
from awx.api.views import JobEventDetail, JobEventChildrenList
|
||||||
|
|
||||||
|
|
||||||
urls = [
|
urls = [
|
||||||
url(r'^$', JobEventList.as_view(), name='job_event_list'),
|
|
||||||
url(r'^(?P<pk>[0-9]+)/$', JobEventDetail.as_view(), name='job_event_detail'),
|
url(r'^(?P<pk>[0-9]+)/$', JobEventDetail.as_view(), name='job_event_detail'),
|
||||||
url(r'^(?P<pk>[0-9]+)/children/$', JobEventChildrenList.as_view(), name='job_event_children_list'),
|
url(r'^(?P<pk>[0-9]+)/children/$', JobEventChildrenList.as_view(), name='job_event_children_list'),
|
||||||
url(r'^(?P<pk>[0-9]+)/hosts/$', JobEventHostsList.as_view(), name='job_event_hosts_list'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ from urllib3.exceptions import ConnectTimeoutError
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||||
from django.db.models import Q, Sum
|
from django.db.models import Q, Sum
|
||||||
from django.db import IntegrityError, transaction, connection
|
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||||
from django.shortcuts import get_object_or_404
|
from django.shortcuts import get_object_or_404
|
||||||
from django.utils.safestring import mark_safe
|
from django.utils.safestring import mark_safe
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
@@ -90,17 +90,14 @@ from awx.main import models
|
|||||||
from awx.main.utils import (
|
from awx.main.utils import (
|
||||||
camelcase_to_underscore,
|
camelcase_to_underscore,
|
||||||
extract_ansible_vars,
|
extract_ansible_vars,
|
||||||
get_awx_http_client_headers,
|
|
||||||
get_object_or_400,
|
get_object_or_400,
|
||||||
getattrd,
|
getattrd,
|
||||||
get_pk_from_dict,
|
get_pk_from_dict,
|
||||||
schedule_task_manager,
|
schedule_task_manager,
|
||||||
ignore_inventory_computed_fields,
|
ignore_inventory_computed_fields,
|
||||||
set_environ,
|
|
||||||
)
|
)
|
||||||
from awx.main.utils.encryption import encrypt_value
|
from awx.main.utils.encryption import encrypt_value
|
||||||
from awx.main.utils.filters import SmartFilter
|
from awx.main.utils.filters import SmartFilter
|
||||||
from awx.main.utils.insights import filter_insights_api_response
|
|
||||||
from awx.main.redact import UriCleaner
|
from awx.main.redact import UriCleaner
|
||||||
from awx.api.permissions import (
|
from awx.api.permissions import (
|
||||||
JobTemplateCallbackPermission,
|
JobTemplateCallbackPermission,
|
||||||
@@ -172,11 +169,21 @@ from awx.api.views.root import ( # noqa
|
|||||||
ApiV2AttachView,
|
ApiV2AttachView,
|
||||||
)
|
)
|
||||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
|
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
|
||||||
|
from awx.api.pagination import UnifiedJobEventPagination
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views')
|
logger = logging.getLogger('awx.api.views')
|
||||||
|
|
||||||
|
|
||||||
|
def unpartitioned_event_horizon(cls):
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
try:
|
||||||
|
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||||
|
return cursor.fetchone()[0] or -1
|
||||||
|
except ProgrammingError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def api_exception_handler(exc, context):
|
def api_exception_handler(exc, context):
|
||||||
"""
|
"""
|
||||||
Override default API exception handler to catch IntegrityError exceptions.
|
Override default API exception handler to catch IntegrityError exceptions.
|
||||||
@@ -698,6 +705,21 @@ class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView):
|
|||||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||||
swagger_topic = "Execution Environments"
|
swagger_topic = "Execution Environments"
|
||||||
|
|
||||||
|
def update(self, request, *args, **kwargs):
|
||||||
|
instance = self.get_object()
|
||||||
|
fields_to_check = ['name', 'description', 'organization', 'image', 'credential']
|
||||||
|
if instance.managed and request.user.can_access(models.ExecutionEnvironment, 'change', instance):
|
||||||
|
for field in fields_to_check:
|
||||||
|
if kwargs.get('partial') and field not in request.data:
|
||||||
|
continue
|
||||||
|
left = getattr(instance, field, None)
|
||||||
|
if hasattr(left, 'id'):
|
||||||
|
left = left.id
|
||||||
|
right = request.data.get(field)
|
||||||
|
if left != right:
|
||||||
|
raise PermissionDenied(_("Only the 'pull' field can be edited for managed execution environments."))
|
||||||
|
return super().update(request, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
|
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
|
||||||
|
|
||||||
@@ -880,11 +902,17 @@ class ProjectUpdateEventsList(SubListAPIView):
|
|||||||
relationship = 'project_update_events'
|
relationship = 'project_update_events'
|
||||||
name = _('Project Update Events List')
|
name = _('Project Update Events List')
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
pagination_class = UnifiedJobEventPagination
|
||||||
|
|
||||||
def finalize_response(self, request, response, *args, **kwargs):
|
def finalize_response(self, request, response, *args, **kwargs):
|
||||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||||
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
pu = self.get_parent_object()
|
||||||
|
self.check_parent_access(pu)
|
||||||
|
return pu.get_event_queryset()
|
||||||
|
|
||||||
|
|
||||||
class SystemJobEventsList(SubListAPIView):
|
class SystemJobEventsList(SubListAPIView):
|
||||||
|
|
||||||
@@ -894,11 +922,17 @@ class SystemJobEventsList(SubListAPIView):
|
|||||||
relationship = 'system_job_events'
|
relationship = 'system_job_events'
|
||||||
name = _('System Job Events List')
|
name = _('System Job Events List')
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
pagination_class = UnifiedJobEventPagination
|
||||||
|
|
||||||
def finalize_response(self, request, response, *args, **kwargs):
|
def finalize_response(self, request, response, *args, **kwargs):
|
||||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||||
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
job = self.get_parent_object()
|
||||||
|
self.check_parent_access(job)
|
||||||
|
return job.get_event_queryset()
|
||||||
|
|
||||||
|
|
||||||
class ProjectUpdateCancel(RetrieveAPIView):
|
class ProjectUpdateCancel(RetrieveAPIView):
|
||||||
|
|
||||||
@@ -1276,7 +1310,7 @@ class CredentialTypeDetail(RetrieveUpdateDestroyAPIView):
|
|||||||
|
|
||||||
def destroy(self, request, *args, **kwargs):
|
def destroy(self, request, *args, **kwargs):
|
||||||
instance = self.get_object()
|
instance = self.get_object()
|
||||||
if instance.managed_by_tower:
|
if instance.managed:
|
||||||
raise PermissionDenied(detail=_("Deletion not allowed for managed credential types"))
|
raise PermissionDenied(detail=_("Deletion not allowed for managed credential types"))
|
||||||
if instance.credentials.exists():
|
if instance.credentials.exists():
|
||||||
raise PermissionDenied(detail=_("Credential types that are in use cannot be deleted"))
|
raise PermissionDenied(detail=_("Credential types that are in use cannot be deleted"))
|
||||||
@@ -1391,7 +1425,7 @@ class CredentialDetail(RetrieveUpdateDestroyAPIView):
|
|||||||
|
|
||||||
def destroy(self, request, *args, **kwargs):
|
def destroy(self, request, *args, **kwargs):
|
||||||
instance = self.get_object()
|
instance = self.get_object()
|
||||||
if instance.managed_by_tower:
|
if instance.managed:
|
||||||
raise PermissionDenied(detail=_("Deletion not allowed for managed credentials"))
|
raise PermissionDenied(detail=_("Deletion not allowed for managed credentials"))
|
||||||
return super(CredentialDetail, self).destroy(request, *args, **kwargs)
|
return super(CredentialDetail, self).destroy(request, *args, **kwargs)
|
||||||
|
|
||||||
@@ -1667,106 +1701,6 @@ class GatewayTimeout(APIException):
|
|||||||
default_code = 'gateway_timeout'
|
default_code = 'gateway_timeout'
|
||||||
|
|
||||||
|
|
||||||
class HostInsights(GenericAPIView):
|
|
||||||
|
|
||||||
model = models.Host
|
|
||||||
serializer_class = serializers.EmptySerializer
|
|
||||||
|
|
||||||
def _call_insights_api(self, url, session, headers):
|
|
||||||
try:
|
|
||||||
with set_environ(**settings.AWX_TASK_ENV):
|
|
||||||
res = session.get(url, headers=headers, timeout=120)
|
|
||||||
except requests.exceptions.SSLError:
|
|
||||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
|
||||||
except requests.exceptions.Timeout:
|
|
||||||
raise GatewayTimeout(_('Request to {} timed out.').format(url))
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
raise BadGateway(_('Unknown exception {} while trying to GET {}').format(e, url))
|
|
||||||
|
|
||||||
if res.status_code == 401:
|
|
||||||
raise BadGateway(_('Unauthorized access. Please check your Insights Credential username and password.'))
|
|
||||||
elif res.status_code != 200:
|
|
||||||
raise BadGateway(
|
|
||||||
_('Failed to access the Insights API at URL {}.' ' Server responded with {} status code and message {}').format(
|
|
||||||
url, res.status_code, res.content
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return res.json()
|
|
||||||
except ValueError:
|
|
||||||
raise BadGateway(_('Expected JSON response from Insights at URL {}' ' but instead got {}').format(url, res.content))
|
|
||||||
|
|
||||||
def _get_session(self, username, password):
|
|
||||||
session = requests.Session()
|
|
||||||
session.auth = requests.auth.HTTPBasicAuth(username, password)
|
|
||||||
|
|
||||||
return session
|
|
||||||
|
|
||||||
def _get_platform_info(self, host, session, headers):
|
|
||||||
url = '{}/api/inventory/v1/hosts?insights_id={}'.format(settings.INSIGHTS_URL_BASE, host.insights_system_id)
|
|
||||||
res = self._call_insights_api(url, session, headers)
|
|
||||||
try:
|
|
||||||
res['results'][0]['id']
|
|
||||||
except (IndexError, KeyError):
|
|
||||||
raise NotFound(_('Could not translate Insights system ID {}' ' into an Insights platform ID.').format(host.insights_system_id))
|
|
||||||
|
|
||||||
return res['results'][0]
|
|
||||||
|
|
||||||
def _get_reports(self, platform_id, session, headers):
|
|
||||||
url = '{}/api/insights/v1/system/{}/reports/'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
|
||||||
|
|
||||||
return self._call_insights_api(url, session, headers)
|
|
||||||
|
|
||||||
def _get_remediations(self, platform_id, session, headers):
|
|
||||||
url = '{}/api/remediations/v1/remediations?system={}'.format(settings.INSIGHTS_URL_BASE, platform_id)
|
|
||||||
|
|
||||||
remediations = []
|
|
||||||
|
|
||||||
# Iterate over all of the pages of content.
|
|
||||||
while url:
|
|
||||||
data = self._call_insights_api(url, session, headers)
|
|
||||||
remediations.extend(data['data'])
|
|
||||||
|
|
||||||
url = data['links']['next'] # Will be `None` if this is the last page.
|
|
||||||
|
|
||||||
return remediations
|
|
||||||
|
|
||||||
def _get_insights(self, host, session, headers):
|
|
||||||
platform_info = self._get_platform_info(host, session, headers)
|
|
||||||
platform_id = platform_info['id']
|
|
||||||
reports = self._get_reports(platform_id, session, headers)
|
|
||||||
remediations = self._get_remediations(platform_id, session, headers)
|
|
||||||
|
|
||||||
return {'insights_content': filter_insights_api_response(platform_info, reports, remediations)}
|
|
||||||
|
|
||||||
def get(self, request, *args, **kwargs):
|
|
||||||
host = self.get_object()
|
|
||||||
cred = None
|
|
||||||
|
|
||||||
if host.insights_system_id is None:
|
|
||||||
return Response(dict(error=_('This host is not recognized as an Insights host.')), status=status.HTTP_404_NOT_FOUND)
|
|
||||||
|
|
||||||
if host.inventory and host.inventory.insights_credential:
|
|
||||||
cred = host.inventory.insights_credential
|
|
||||||
else:
|
|
||||||
return Response(dict(error=_('The Insights Credential for "{}" was not found.').format(host.inventory.name)), status=status.HTTP_404_NOT_FOUND)
|
|
||||||
|
|
||||||
username = cred.get_input('username', default='')
|
|
||||||
password = cred.get_input('password', default='')
|
|
||||||
session = self._get_session(username, password)
|
|
||||||
headers = get_awx_http_client_headers()
|
|
||||||
|
|
||||||
data = self._get_insights(host, session, headers)
|
|
||||||
return Response(data, status=status.HTTP_200_OK)
|
|
||||||
|
|
||||||
def handle_exception(self, exc):
|
|
||||||
# Continue supporting the slightly different way we have handled error responses on this view.
|
|
||||||
response = super().handle_exception(exc)
|
|
||||||
response.data['error'] = response.data.pop('detail')
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
class GroupList(ListCreateAPIView):
|
class GroupList(ListCreateAPIView):
|
||||||
|
|
||||||
model = models.Group
|
model = models.Group
|
||||||
@@ -3604,7 +3538,7 @@ class JobRelaunch(RetrieveAPIView):
|
|||||||
status=status.HTTP_400_BAD_REQUEST,
|
status=status.HTTP_400_BAD_REQUEST,
|
||||||
)
|
)
|
||||||
host_qs = obj.retry_qs(retry_hosts)
|
host_qs = obj.retry_qs(retry_hosts)
|
||||||
if not obj.job_events.filter(event='playbook_on_stats').exists():
|
if not obj.get_event_queryset().filter(event='playbook_on_stats').exists():
|
||||||
return Response(
|
return Response(
|
||||||
{'hosts': _('Cannot retry on {status_value} hosts, playbook stats not available.').format(status_value=retry_hosts)},
|
{'hosts': _('Cannot retry on {status_value} hosts, playbook stats not available.').format(status_value=retry_hosts)},
|
||||||
status=status.HTTP_400_BAD_REQUEST,
|
status=status.HTTP_400_BAD_REQUEST,
|
||||||
@@ -3731,18 +3665,22 @@ class JobHostSummaryDetail(RetrieveAPIView):
|
|||||||
serializer_class = serializers.JobHostSummarySerializer
|
serializer_class = serializers.JobHostSummarySerializer
|
||||||
|
|
||||||
|
|
||||||
class JobEventList(NoTruncateMixin, ListAPIView):
|
|
||||||
|
|
||||||
model = models.JobEvent
|
|
||||||
serializer_class = serializers.JobEventSerializer
|
|
||||||
search_fields = ('stdout',)
|
|
||||||
|
|
||||||
|
|
||||||
class JobEventDetail(RetrieveAPIView):
|
class JobEventDetail(RetrieveAPIView):
|
||||||
|
|
||||||
model = models.JobEvent
|
|
||||||
serializer_class = serializers.JobEventSerializer
|
serializer_class = serializers.JobEventSerializer
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_partitioned(self):
|
||||||
|
if 'pk' not in self.kwargs:
|
||||||
|
return True
|
||||||
|
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model(self):
|
||||||
|
if self.is_partitioned:
|
||||||
|
return models.JobEvent
|
||||||
|
return models.UnpartitionedJobEvent
|
||||||
|
|
||||||
def get_serializer_context(self):
|
def get_serializer_context(self):
|
||||||
context = super().get_serializer_context()
|
context = super().get_serializer_context()
|
||||||
context.update(no_truncate=True)
|
context.update(no_truncate=True)
|
||||||
@@ -3751,33 +3689,31 @@ class JobEventDetail(RetrieveAPIView):
|
|||||||
|
|
||||||
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
|
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
|
||||||
|
|
||||||
model = models.JobEvent
|
|
||||||
serializer_class = serializers.JobEventSerializer
|
serializer_class = serializers.JobEventSerializer
|
||||||
parent_model = models.JobEvent
|
|
||||||
relationship = 'children'
|
relationship = 'children'
|
||||||
name = _('Job Event Children List')
|
name = _('Job Event Children List')
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
|
||||||
def get_queryset(self):
|
@property
|
||||||
parent_event = self.get_parent_object()
|
def is_partitioned(self):
|
||||||
self.check_parent_access(parent_event)
|
if 'pk' not in self.kwargs:
|
||||||
qs = self.request.user.get_queryset(self.model).filter(parent_uuid=parent_event.uuid)
|
return True
|
||||||
return qs
|
return int(self.kwargs['pk']) > unpartitioned_event_horizon(models.JobEvent)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model(self):
|
||||||
|
if self.is_partitioned:
|
||||||
|
return models.JobEvent
|
||||||
|
return models.UnpartitionedJobEvent
|
||||||
|
|
||||||
class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
@property
|
||||||
|
def parent_model(self):
|
||||||
model = models.Host
|
return self.model
|
||||||
serializer_class = serializers.HostSerializer
|
|
||||||
parent_model = models.JobEvent
|
|
||||||
relationship = 'hosts'
|
|
||||||
name = _('Job Event Hosts List')
|
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
parent_event = self.get_parent_object()
|
parent_event = self.get_parent_object()
|
||||||
self.check_parent_access(parent_event)
|
self.check_parent_access(parent_event)
|
||||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
return parent_event.job.get_event_queryset().filter(parent_uuid=parent_event.uuid)
|
||||||
return qs
|
|
||||||
|
|
||||||
|
|
||||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||||
@@ -3813,12 +3749,12 @@ class GroupJobEventsList(BaseJobEventsList):
|
|||||||
class JobJobEventsList(BaseJobEventsList):
|
class JobJobEventsList(BaseJobEventsList):
|
||||||
|
|
||||||
parent_model = models.Job
|
parent_model = models.Job
|
||||||
|
pagination_class = UnifiedJobEventPagination
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
job = self.get_parent_object()
|
job = self.get_parent_object()
|
||||||
self.check_parent_access(job)
|
self.check_parent_access(job)
|
||||||
qs = job.job_events.select_related('host').order_by('start_line')
|
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||||
return qs.all()
|
|
||||||
|
|
||||||
|
|
||||||
class AdHocCommandList(ListCreateAPIView):
|
class AdHocCommandList(ListCreateAPIView):
|
||||||
@@ -3976,6 +3912,11 @@ class AdHocCommandEventList(NoTruncateMixin, ListAPIView):
|
|||||||
serializer_class = serializers.AdHocCommandEventSerializer
|
serializer_class = serializers.AdHocCommandEventSerializer
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
adhoc = self.get_parent_object()
|
||||||
|
self.check_parent_access(adhoc)
|
||||||
|
return adhoc.get_event_queryset()
|
||||||
|
|
||||||
|
|
||||||
class AdHocCommandEventDetail(RetrieveAPIView):
|
class AdHocCommandEventDetail(RetrieveAPIView):
|
||||||
|
|
||||||
@@ -3996,12 +3937,21 @@ class BaseAdHocCommandEventsList(NoTruncateMixin, SubListAPIView):
|
|||||||
relationship = 'ad_hoc_command_events'
|
relationship = 'ad_hoc_command_events'
|
||||||
name = _('Ad Hoc Command Events List')
|
name = _('Ad Hoc Command Events List')
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
pagination_class = UnifiedJobEventPagination
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
parent = self.get_parent_object()
|
||||||
|
self.check_parent_access(parent)
|
||||||
|
return parent.get_event_queryset()
|
||||||
|
|
||||||
|
|
||||||
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
|
class HostAdHocCommandEventsList(BaseAdHocCommandEventsList):
|
||||||
|
|
||||||
parent_model = models.Host
|
parent_model = models.Host
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
return super(BaseAdHocCommandEventsList, self).get_queryset()
|
||||||
|
|
||||||
|
|
||||||
# class GroupJobEventsList(BaseJobEventsList):
|
# class GroupJobEventsList(BaseJobEventsList):
|
||||||
# parent_model = Group
|
# parent_model = Group
|
||||||
|
|||||||
@@ -38,6 +38,9 @@ from awx.api.serializers import (
|
|||||||
)
|
)
|
||||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||||
|
|
||||||
|
from awx.api.pagination import UnifiedJobEventPagination
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views.organization')
|
logger = logging.getLogger('awx.api.views.organization')
|
||||||
|
|
||||||
|
|
||||||
@@ -49,6 +52,12 @@ class InventoryUpdateEventsList(SubListAPIView):
|
|||||||
relationship = 'inventory_update_events'
|
relationship = 'inventory_update_events'
|
||||||
name = _('Inventory Update Events List')
|
name = _('Inventory Update Events List')
|
||||||
search_fields = ('stdout',)
|
search_fields = ('stdout',)
|
||||||
|
pagination_class = UnifiedJobEventPagination
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
iu = self.get_parent_object()
|
||||||
|
self.check_parent_access(iu)
|
||||||
|
return iu.get_event_queryset()
|
||||||
|
|
||||||
def finalize_response(self, request, response, *args, **kwargs):
|
def finalize_response(self, request, response, *args, **kwargs):
|
||||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||||
|
|||||||
@@ -52,6 +52,11 @@ class UnifiedJobDeletionMixin(object):
|
|||||||
else:
|
else:
|
||||||
# if it has been > 1 minute, events are probably lost
|
# if it has been > 1 minute, events are probably lost
|
||||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||||
|
|
||||||
|
# Manually cascade delete events if unpartitioned job
|
||||||
|
if obj.has_unpartitioned_events:
|
||||||
|
obj.get_event_queryset().delete()
|
||||||
|
|
||||||
obj.delete()
|
obj.delete()
|
||||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|||||||
@@ -24,12 +24,13 @@ from awx.api.generics import APIView
|
|||||||
from awx.conf.registry import settings_registry
|
from awx.conf.registry import settings_registry
|
||||||
from awx.main.analytics import all_collectors
|
from awx.main.analytics import all_collectors
|
||||||
from awx.main.ha import is_ha_environment
|
from awx.main.ha import is_ha_environment
|
||||||
from awx.main.utils import get_awx_version, get_custom_venv_choices, to_python_boolean
|
from awx.main.utils import get_awx_version, get_custom_venv_choices
|
||||||
from awx.main.utils.licensing import validate_entitlement_manifest
|
from awx.main.utils.licensing import validate_entitlement_manifest
|
||||||
from awx.api.versioning import reverse, drf_reverse
|
from awx.api.versioning import reverse, drf_reverse
|
||||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||||
from awx.main.utils import set_environ
|
from awx.main.utils import set_environ
|
||||||
|
from awx.main.utils.licensing import get_licenser
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views.root')
|
logger = logging.getLogger('awx.api.views.root')
|
||||||
|
|
||||||
@@ -106,7 +107,6 @@ class ApiVersionRootView(APIView):
|
|||||||
data['hosts'] = reverse('api:host_list', request=request)
|
data['hosts'] = reverse('api:host_list', request=request)
|
||||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||||
data['jobs'] = reverse('api:job_list', request=request)
|
data['jobs'] = reverse('api:job_list', request=request)
|
||||||
data['job_events'] = reverse('api:job_event_list', request=request)
|
|
||||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||||
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
|
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
|
||||||
data['system_jobs'] = reverse('api:system_job_list', request=request)
|
data['system_jobs'] = reverse('api:system_job_list', request=request)
|
||||||
@@ -174,8 +174,6 @@ class ApiV2SubscriptionView(APIView):
|
|||||||
self.permission_denied(request) # Raises PermissionDenied exception.
|
self.permission_denied(request) # Raises PermissionDenied exception.
|
||||||
|
|
||||||
def post(self, request):
|
def post(self, request):
|
||||||
from awx.main.utils.common import get_licenser
|
|
||||||
|
|
||||||
data = request.data.copy()
|
data = request.data.copy()
|
||||||
if data.get('subscriptions_password') == '$encrypted$':
|
if data.get('subscriptions_password') == '$encrypted$':
|
||||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||||
@@ -223,7 +221,6 @@ class ApiV2AttachView(APIView):
|
|||||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||||
if pool_id and user and pw:
|
if pool_id and user and pw:
|
||||||
from awx.main.utils.common import get_licenser
|
|
||||||
|
|
||||||
data = request.data.copy()
|
data = request.data.copy()
|
||||||
try:
|
try:
|
||||||
@@ -265,8 +262,6 @@ class ApiV2ConfigView(APIView):
|
|||||||
def get(self, request, format=None):
|
def get(self, request, format=None):
|
||||||
'''Return various sitewide configuration settings'''
|
'''Return various sitewide configuration settings'''
|
||||||
|
|
||||||
from awx.main.utils.common import get_licenser
|
|
||||||
|
|
||||||
license_data = get_licenser().validate()
|
license_data = get_licenser().validate()
|
||||||
|
|
||||||
if not license_data.get('valid_key', False):
|
if not license_data.get('valid_key', False):
|
||||||
@@ -302,7 +297,9 @@ class ApiV2ConfigView(APIView):
|
|||||||
):
|
):
|
||||||
data.update(
|
data.update(
|
||||||
dict(
|
dict(
|
||||||
project_base_dir=settings.PROJECTS_ROOT, project_local_paths=Project.get_local_path_choices(), custom_virtualenvs=get_custom_venv_choices()
|
project_base_dir=settings.PROJECTS_ROOT,
|
||||||
|
project_local_paths=Project.get_local_path_choices(),
|
||||||
|
custom_virtualenvs=get_custom_venv_choices(),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||||
@@ -313,24 +310,12 @@ class ApiV2ConfigView(APIView):
|
|||||||
def post(self, request):
|
def post(self, request):
|
||||||
if not isinstance(request.data, dict):
|
if not isinstance(request.data, dict):
|
||||||
return Response({"error": _("Invalid subscription data")}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _("Invalid subscription data")}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
if "eula_accepted" not in request.data:
|
|
||||||
return Response({"error": _("Missing 'eula_accepted' property")}, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
try:
|
|
||||||
eula_accepted = to_python_boolean(request.data["eula_accepted"])
|
|
||||||
except ValueError:
|
|
||||||
return Response({"error": _("'eula_accepted' value is invalid")}, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
|
|
||||||
if not eula_accepted:
|
|
||||||
return Response({"error": _("'eula_accepted' must be True")}, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
request.data.pop("eula_accepted")
|
|
||||||
try:
|
try:
|
||||||
data_actual = json.dumps(request.data)
|
data_actual = json.dumps(request.data)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.info(smart_text(u"Invalid JSON submitted for license."), extra=dict(actor=request.user.username))
|
logger.info(smart_text(u"Invalid JSON submitted for license."), extra=dict(actor=request.user.username))
|
||||||
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
from awx.main.utils.common import get_licenser
|
|
||||||
|
|
||||||
license_data = json.loads(data_actual)
|
license_data = json.loads(data_actual)
|
||||||
if 'license_key' in license_data:
|
if 'license_key' in license_data:
|
||||||
return Response({"error": _('Legacy license submitted. A subscription manifest is now required.')}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _('Legacy license submitted. A subscription manifest is now required.')}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import cachetools
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.main.utils import encrypt_field, decrypt_field
|
from awx.main.utils import encrypt_field, decrypt_field
|
||||||
from awx.conf import settings_registry
|
from awx.conf import settings_registry
|
||||||
|
from awx.conf.fields import PrimaryKeyRelatedField
|
||||||
from awx.conf.models import Setting
|
from awx.conf.models import Setting
|
||||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||||
|
|
||||||
@@ -420,9 +421,9 @@ class SettingsWrapper(UserSettingsHolder):
|
|||||||
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = field.to_representation(value)
|
data = None if value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(value)
|
||||||
setting_value = field.run_validation(data)
|
setting_value = field.run_validation(data)
|
||||||
db_value = field.to_representation(setting_value)
|
db_value = None if setting_value is None and isinstance(field, PrimaryKeyRelatedField) else field.to_representation(setting_value)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception('Unable to assign value "%r" to setting "%s".', value, name, exc_info=True)
|
logger.exception('Unable to assign value "%r" to setting "%s".', value, name, exc_info=True)
|
||||||
raise e
|
raise e
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ import logging
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from django.core.cache import cache
|
||||||
from django.core.signals import setting_changed
|
from django.core.signals import setting_changed
|
||||||
from django.db.models.signals import post_save, pre_delete, post_delete
|
from django.db.models.signals import post_save, pre_delete, post_delete
|
||||||
from django.core.cache import cache
|
|
||||||
from django.dispatch import receiver
|
from django.dispatch import receiver
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -25,7 +25,7 @@ def handle_setting_change(key, for_delete=False):
|
|||||||
# Note: Doesn't handle multiple levels of dependencies!
|
# Note: Doesn't handle multiple levels of dependencies!
|
||||||
setting_keys.append(dependent_key)
|
setting_keys.append(dependent_key)
|
||||||
# NOTE: This block is probably duplicated.
|
# NOTE: This block is probably duplicated.
|
||||||
cache_keys = set([Setting.get_cache_key(k) for k in setting_keys])
|
cache_keys = {Setting.get_cache_key(k) for k in setting_keys}
|
||||||
cache.delete_many(cache_keys)
|
cache.delete_many(cache_keys)
|
||||||
|
|
||||||
# Send setting_changed signal with new value for each setting.
|
# Send setting_changed signal with new value for each setting.
|
||||||
@@ -58,3 +58,18 @@ def on_post_delete_setting(sender, **kwargs):
|
|||||||
key = getattr(instance, '_saved_key_', None)
|
key = getattr(instance, '_saved_key_', None)
|
||||||
if key:
|
if key:
|
||||||
handle_setting_change(key, True)
|
handle_setting_change(key, True)
|
||||||
|
|
||||||
|
|
||||||
|
@receiver(setting_changed)
|
||||||
|
def disable_local_auth(**kwargs):
|
||||||
|
if (kwargs['setting'], kwargs['value']) == ('DISABLE_LOCAL_AUTH', True):
|
||||||
|
from django.contrib.auth.models import User
|
||||||
|
from oauth2_provider.models import RefreshToken
|
||||||
|
from awx.main.models.oauth import OAuth2AccessToken
|
||||||
|
from awx.main.management.commands.revoke_oauth2_tokens import revoke_tokens
|
||||||
|
|
||||||
|
logger.warning("Triggering token invalidation for local users.")
|
||||||
|
|
||||||
|
qs = User.objects.filter(profile__ldap_dn='', enterprise_auth__isnull=True, social_auth__isnull=True)
|
||||||
|
revoke_tokens(RefreshToken.objects.filter(revoked=None, user__in=qs))
|
||||||
|
revoke_tokens(OAuth2AccessToken.objects.filter(user__in=qs))
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ from awx.main.models import (
|
|||||||
InventoryUpdateEvent,
|
InventoryUpdateEvent,
|
||||||
Job,
|
Job,
|
||||||
JobEvent,
|
JobEvent,
|
||||||
|
UnpartitionedJobEvent,
|
||||||
JobHostSummary,
|
JobHostSummary,
|
||||||
JobLaunchConfig,
|
JobLaunchConfig,
|
||||||
JobTemplate,
|
JobTemplate,
|
||||||
@@ -464,7 +465,7 @@ class BaseAccess(object):
|
|||||||
if display_method == 'schedule':
|
if display_method == 'schedule':
|
||||||
user_capabilities['schedule'] = user_capabilities['start']
|
user_capabilities['schedule'] = user_capabilities['start']
|
||||||
continue
|
continue
|
||||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource)):
|
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CredentialInputSource, ExecutionEnvironment)):
|
||||||
user_capabilities['delete'] = user_capabilities['edit']
|
user_capabilities['delete'] = user_capabilities['edit']
|
||||||
continue
|
continue
|
||||||
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
||||||
@@ -866,13 +867,11 @@ class InventoryAccess(BaseAccess):
|
|||||||
# If no data is specified, just checking for generic add permission?
|
# If no data is specified, just checking for generic add permission?
|
||||||
if not data:
|
if not data:
|
||||||
return Organization.accessible_objects(self.user, 'inventory_admin_role').exists()
|
return Organization.accessible_objects(self.user, 'inventory_admin_role').exists()
|
||||||
return self.check_related('organization', Organization, data, role_field='inventory_admin_role') and self.check_related(
|
return self.check_related('organization', Organization, data, role_field='inventory_admin_role')
|
||||||
'insights_credential', Credential, data, role_field='use_role'
|
|
||||||
)
|
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
def can_change(self, obj, data):
|
def can_change(self, obj, data):
|
||||||
return self.can_admin(obj, data) and self.check_related('insights_credential', Credential, data, obj=obj, role_field='use_role')
|
return self.can_admin(obj, data)
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
def can_admin(self, obj, data):
|
def can_admin(self, obj, data):
|
||||||
@@ -1037,7 +1036,7 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
|||||||
|
|
||||||
def can_add(self, data):
|
def can_add(self, data):
|
||||||
if not data or 'inventory' not in data:
|
if not data or 'inventory' not in data:
|
||||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
return Inventory.accessible_objects(self.user, 'admin_role').exists()
|
||||||
|
|
||||||
if not self.check_related('source_project', Project, data, role_field='use_role'):
|
if not self.check_related('source_project', Project, data, role_field='use_role'):
|
||||||
return False
|
return False
|
||||||
@@ -1120,7 +1119,7 @@ class CredentialTypeAccess(BaseAccess):
|
|||||||
I can create when:
|
I can create when:
|
||||||
- I'm a superuser:
|
- I'm a superuser:
|
||||||
I can change when:
|
I can change when:
|
||||||
- I'm a superuser and the type is not "managed by Tower"
|
- I'm a superuser and the type is not "managed"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model = CredentialType
|
model = CredentialType
|
||||||
@@ -1206,7 +1205,7 @@ class CredentialAccess(BaseAccess):
|
|||||||
def get_user_capabilities(self, obj, **kwargs):
|
def get_user_capabilities(self, obj, **kwargs):
|
||||||
user_capabilities = super(CredentialAccess, self).get_user_capabilities(obj, **kwargs)
|
user_capabilities = super(CredentialAccess, self).get_user_capabilities(obj, **kwargs)
|
||||||
user_capabilities['use'] = self.can_use(obj)
|
user_capabilities['use'] = self.can_use(obj)
|
||||||
if getattr(obj, 'managed_by_tower', False) is True:
|
if getattr(obj, 'managed', False) is True:
|
||||||
user_capabilities['edit'] = user_capabilities['delete'] = False
|
user_capabilities['edit'] = user_capabilities['delete'] = False
|
||||||
return user_capabilities
|
return user_capabilities
|
||||||
|
|
||||||
@@ -1356,11 +1355,8 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
|||||||
return Organization.accessible_objects(self.user, 'execution_environment_admin_role').exists()
|
return Organization.accessible_objects(self.user, 'execution_environment_admin_role').exists()
|
||||||
return self.check_related('organization', Organization, data, mandatory=True, role_field='execution_environment_admin_role')
|
return self.check_related('organization', Organization, data, mandatory=True, role_field='execution_environment_admin_role')
|
||||||
|
|
||||||
|
@check_superuser
|
||||||
def can_change(self, obj, data):
|
def can_change(self, obj, data):
|
||||||
if obj.managed_by_tower:
|
|
||||||
raise PermissionDenied
|
|
||||||
if self.user.is_superuser:
|
|
||||||
return True
|
|
||||||
if obj and obj.organization_id is None:
|
if obj and obj.organization_id is None:
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
if self.user not in obj.organization.execution_environment_admin_role:
|
if self.user not in obj.organization.execution_environment_admin_role:
|
||||||
@@ -1372,6 +1368,8 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
|||||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
||||||
|
|
||||||
def can_delete(self, obj):
|
def can_delete(self, obj):
|
||||||
|
if obj.managed:
|
||||||
|
raise PermissionDenied
|
||||||
return self.can_change(obj, None)
|
return self.can_change(obj, None)
|
||||||
|
|
||||||
|
|
||||||
@@ -2355,6 +2353,11 @@ class JobEventAccess(BaseAccess):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedJobEventAccess(JobEventAccess):
|
||||||
|
|
||||||
|
model = UnpartitionedJobEvent
|
||||||
|
|
||||||
|
|
||||||
class ProjectUpdateEventAccess(BaseAccess):
|
class ProjectUpdateEventAccess(BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can see project update event records whenever I can access the project update
|
I can see project update event records whenever I can access the project update
|
||||||
@@ -2898,3 +2901,4 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
|||||||
|
|
||||||
for cls in BaseAccess.__subclasses__():
|
for cls in BaseAccess.__subclasses__():
|
||||||
access_registry[cls.model] = cls
|
access_registry[cls.model] = cls
|
||||||
|
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import platform
|
|||||||
import distro
|
import distro
|
||||||
|
|
||||||
from django.db import connection
|
from django.db import connection
|
||||||
from django.db.models import Count, Max, Min
|
from django.db.models import Count
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.contrib.sessions.models import Session
|
from django.contrib.sessions.models import Session
|
||||||
from django.utils.timezone import now, timedelta
|
from django.utils.timezone import now, timedelta
|
||||||
@@ -15,7 +15,7 @@ from django.utils.translation import ugettext_lazy as _
|
|||||||
from psycopg2.errors import UntranslatableCharacter
|
from psycopg2.errors import UntranslatableCharacter
|
||||||
|
|
||||||
from awx.conf.license import get_license
|
from awx.conf.license import get_license
|
||||||
from awx.main.utils import get_awx_version, get_custom_venv_choices, camelcase_to_underscore, datetime_hook
|
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||||
from awx.main import models
|
from awx.main import models
|
||||||
from awx.main.analytics import register
|
from awx.main.analytics import register
|
||||||
|
|
||||||
@@ -58,7 +58,10 @@ def four_hour_slicing(key, since, until, last_gather):
|
|||||||
horizon = until - timedelta(weeks=4)
|
horizon = until - timedelta(weeks=4)
|
||||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||||
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
try:
|
||||||
|
last_entry = max(last_entries.get(key) or last_gather, horizon)
|
||||||
|
except TypeError: # last_entries has a stale non-datetime entry for this collector
|
||||||
|
last_entry = max(last_gather, horizon)
|
||||||
|
|
||||||
start, end = last_entry, None
|
start, end = last_entry, None
|
||||||
while start < until:
|
while start < until:
|
||||||
@@ -67,7 +70,7 @@ def four_hour_slicing(key, since, until, last_gather):
|
|||||||
start = end
|
start = end
|
||||||
|
|
||||||
|
|
||||||
def events_slicing(key, since, until, last_gather):
|
def _identify_lower(key, since, until, last_gather):
|
||||||
from awx.conf.models import Setting
|
from awx.conf.models import Setting
|
||||||
|
|
||||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||||
@@ -77,16 +80,8 @@ def events_slicing(key, since, until, last_gather):
|
|||||||
lower = since or last_gather
|
lower = since or last_gather
|
||||||
if not since and last_entries.get(key):
|
if not since and last_entries.get(key):
|
||||||
lower = horizon
|
lower = horizon
|
||||||
pk_values = models.JobEvent.objects.filter(created__gte=lower, created__lte=until).aggregate(Min('pk'), Max('pk'))
|
|
||||||
|
|
||||||
previous_pk = pk_values['pk__min'] - 1 if pk_values['pk__min'] is not None else 0
|
return lower, last_entries
|
||||||
if not since and last_entries.get(key):
|
|
||||||
previous_pk = max(last_entries[key], previous_pk)
|
|
||||||
final_pk = pk_values['pk__max'] or 0
|
|
||||||
|
|
||||||
step = 100000
|
|
||||||
for start in range(previous_pk, final_pk + 1, step):
|
|
||||||
yield (start, min(start + step, final_pk))
|
|
||||||
|
|
||||||
|
|
||||||
@register('config', '1.3', description=_('General platform configuration.'))
|
@register('config', '1.3', description=_('General platform configuration.'))
|
||||||
@@ -120,7 +115,7 @@ def config(since, **kwargs):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@register('counts', '1.0', description=_('Counts of objects such as organizations, inventories, and projects'))
|
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||||
def counts(since, **kwargs):
|
def counts(since, **kwargs):
|
||||||
counts = {}
|
counts = {}
|
||||||
for cls in (
|
for cls in (
|
||||||
@@ -138,9 +133,6 @@ def counts(since, **kwargs):
|
|||||||
):
|
):
|
||||||
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
||||||
|
|
||||||
venvs = get_custom_venv_choices()
|
|
||||||
counts['custom_virtualenvs'] = len([v for v in venvs if os.path.basename(v.rstrip('/')) != 'ansible'])
|
|
||||||
|
|
||||||
inv_counts = dict(models.Inventory.objects.order_by().values_list('kind').annotate(Count('kind')))
|
inv_counts = dict(models.Inventory.objects.order_by().values_list('kind').annotate(Count('kind')))
|
||||||
inv_counts['normal'] = inv_counts.get('', 0)
|
inv_counts['normal'] = inv_counts.get('', 0)
|
||||||
inv_counts.pop('', None)
|
inv_counts.pop('', None)
|
||||||
@@ -183,12 +175,12 @@ def org_counts(since, **kwargs):
|
|||||||
def cred_type_counts(since, **kwargs):
|
def cred_type_counts(since, **kwargs):
|
||||||
counts = {}
|
counts = {}
|
||||||
for cred_type in models.CredentialType.objects.annotate(num_credentials=Count('credentials', distinct=True)).values(
|
for cred_type in models.CredentialType.objects.annotate(num_credentials=Count('credentials', distinct=True)).values(
|
||||||
'name', 'id', 'managed_by_tower', 'num_credentials'
|
'name', 'id', 'managed', 'num_credentials'
|
||||||
):
|
):
|
||||||
counts[cred_type['id']] = {
|
counts[cred_type['id']] = {
|
||||||
'name': cred_type['name'],
|
'name': cred_type['name'],
|
||||||
'credential_count': cred_type['num_credentials'],
|
'credential_count': cred_type['num_credentials'],
|
||||||
'managed_by_tower': cred_type['managed_by_tower'],
|
'managed': cred_type['managed'],
|
||||||
}
|
}
|
||||||
return counts
|
return counts
|
||||||
|
|
||||||
@@ -335,39 +327,49 @@ def _copy_table(table, query, path):
|
|||||||
return file.file_list()
|
return file.file_list()
|
||||||
|
|
||||||
|
|
||||||
@register('events_table', '1.2', format='csv', description=_('Automation task records'), expensive=events_slicing)
|
def _events_table(since, full_path, until, tbl, where_column, project_job_created=False, **kwargs):
|
||||||
def events_table(since, full_path, until, **kwargs):
|
|
||||||
def query(event_data):
|
def query(event_data):
|
||||||
return f'''COPY (SELECT main_jobevent.id,
|
query = f'''COPY (SELECT {tbl}.id,
|
||||||
main_jobevent.created,
|
{tbl}.created,
|
||||||
main_jobevent.modified,
|
{tbl}.modified,
|
||||||
main_jobevent.uuid,
|
{tbl + '.job_created' if project_job_created else 'NULL'} as job_created,
|
||||||
main_jobevent.parent_uuid,
|
{tbl}.uuid,
|
||||||
main_jobevent.event,
|
{tbl}.parent_uuid,
|
||||||
{event_data}->'task_action' AS task_action,
|
{tbl}.event,
|
||||||
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
task_action,
|
||||||
main_jobevent.failed,
|
(CASE WHEN event = 'playbook_on_stats' THEN event_data END) as playbook_on_stats,
|
||||||
main_jobevent.changed,
|
{tbl}.failed,
|
||||||
main_jobevent.playbook,
|
{tbl}.changed,
|
||||||
main_jobevent.play,
|
{tbl}.playbook,
|
||||||
main_jobevent.task,
|
{tbl}.play,
|
||||||
main_jobevent.role,
|
{tbl}.task,
|
||||||
main_jobevent.job_id,
|
{tbl}.role,
|
||||||
main_jobevent.host_id,
|
{tbl}.job_id,
|
||||||
main_jobevent.host_name,
|
{tbl}.host_id,
|
||||||
CAST({event_data}->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
{tbl}.host_name,
|
||||||
CAST({event_data}->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
CAST(x.start AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||||
{event_data}->'duration' AS duration,
|
CAST(x.end AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||||
{event_data}->'res'->'warnings' AS warnings,
|
x.duration AS duration,
|
||||||
{event_data}->'res'->'deprecations' AS deprecations
|
x.res->'warnings' AS warnings,
|
||||||
FROM main_jobevent
|
x.res->'deprecations' AS deprecations
|
||||||
WHERE (main_jobevent.id > {since} AND main_jobevent.id <= {until})
|
FROM {tbl}, json_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "start" text, "end" text)
|
||||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''
|
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||||
|
return query
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return _copy_table(table='events', query=query("main_jobevent.event_data::json"), path=full_path)
|
return _copy_table(table='events', query=query(f"{tbl}.event_data::json"), path=full_path)
|
||||||
except UntranslatableCharacter:
|
except UntranslatableCharacter:
|
||||||
return _copy_table(table='events', query=query("replace(main_jobevent.event_data::text, '\\u0000', '')::json"), path=full_path)
|
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::json"), path=full_path)
|
||||||
|
|
||||||
|
|
||||||
|
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||||
|
def events_table_unpartitioned(since, full_path, until, **kwargs):
|
||||||
|
return _events_table(since, full_path, until, '_unpartitioned_main_jobevent', 'created', **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
@register('events_table', '1.3', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||||
|
def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||||
|
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
@register('unified_jobs_table', '1.2', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
@register('unified_jobs_table', '1.2', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
|||||||
|
|
||||||
if collection_type != 'dry-run':
|
if collection_type != 'dry-run':
|
||||||
if not settings.INSIGHTS_TRACKING_STATE:
|
if not settings.INSIGHTS_TRACKING_STATE:
|
||||||
logger.log(log_level, "Automation Analytics not enabled. Use --dry-run to gather locally without sending.")
|
logger.log(log_level, "Insights for Ansible Automation Platform not enabled. Use --dry-run to gather locally without sending.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
||||||
@@ -270,7 +270,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
|||||||
if not files:
|
if not files:
|
||||||
if collection_type != 'dry-run':
|
if collection_type != 'dry-run':
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
entry = last_entries.get(key)
|
||||||
|
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -293,7 +294,8 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
|||||||
|
|
||||||
if slice_succeeded and collection_type != 'dry-run':
|
if slice_succeeded and collection_type != 'dry-run':
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
last_entries[key] = max(last_entries[key], end) if last_entries.get(key) else end
|
entry = last_entries.get(key)
|
||||||
|
last_entries[key] = max(entry, end) if entry and type(entry) == type(end) else end
|
||||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||||
except Exception:
|
except Exception:
|
||||||
succeeded = False
|
succeeded = False
|
||||||
@@ -330,10 +332,10 @@ def ship(path):
|
|||||||
Ship gathered metrics to the Insights API
|
Ship gathered metrics to the Insights API
|
||||||
"""
|
"""
|
||||||
if not path:
|
if not path:
|
||||||
logger.error('Automation Analytics TAR not found')
|
logger.error('Insights for Ansible Automation Platform TAR not found')
|
||||||
return False
|
return False
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
logger.error('Automation Analytics TAR {} not found'.format(path))
|
logger.error('Insights for Ansible Automation Platform TAR {} not found'.format(path))
|
||||||
return False
|
return False
|
||||||
if "Error:" in str(path):
|
if "Error:" in str(path):
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ def metrics():
|
|||||||
],
|
],
|
||||||
registry=REGISTRY,
|
registry=REGISTRY,
|
||||||
)
|
)
|
||||||
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
|
|
||||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
|
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the system', registry=REGISTRY)
|
||||||
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
|
PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the system', registry=REGISTRY)
|
||||||
STATUS = Gauge(
|
STATUS = Gauge(
|
||||||
@@ -159,7 +158,6 @@ def metrics():
|
|||||||
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
||||||
|
|
||||||
SCHEDULE_COUNT.set(current_counts['schedule'])
|
SCHEDULE_COUNT.set(current_counts['schedule'])
|
||||||
CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])
|
|
||||||
|
|
||||||
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
||||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||||
|
|||||||
14
awx/main/backends.py
Normal file
14
awx/main/backends.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
from django.contrib.auth.backends import ModelBackend
|
||||||
|
|
||||||
|
logger = logging.getLogger('awx.main.backends')
|
||||||
|
|
||||||
|
|
||||||
|
class AWXModelBackend(ModelBackend):
|
||||||
|
def authenticate(self, request, **kwargs):
|
||||||
|
if settings.DISABLE_LOCAL_AUTH:
|
||||||
|
logger.warning(f"User '{kwargs['username']}' attempted login through the disabled local authentication system.")
|
||||||
|
return
|
||||||
|
return super().authenticate(request, **kwargs)
|
||||||
@@ -36,7 +36,7 @@ register(
|
|||||||
'ORG_ADMINS_CAN_SEE_ALL_USERS',
|
'ORG_ADMINS_CAN_SEE_ALL_USERS',
|
||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
label=_('All Users Visible to Organization Admins'),
|
label=_('All Users Visible to Organization Admins'),
|
||||||
help_text=_('Controls whether any Organization Admin can view all users and teams, ' 'even those not associated with their Organization.'),
|
help_text=_('Controls whether any Organization Admin can view all users and teams, even those not associated with their Organization.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -59,7 +59,7 @@ register(
|
|||||||
schemes=('http', 'https'),
|
schemes=('http', 'https'),
|
||||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||||
label=_('Base URL of the service'),
|
label=_('Base URL of the service'),
|
||||||
help_text=_('This setting is used by services like notifications to render ' 'a valid url to the service.'),
|
help_text=_('This setting is used by services like notifications to render a valid url to the service.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -94,13 +94,12 @@ register(
|
|||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
register(
|
register(
|
||||||
'LICENSE',
|
'LICENSE',
|
||||||
field_class=fields.DictField,
|
field_class=fields.DictField,
|
||||||
default=lambda: {},
|
default=lambda: {},
|
||||||
label=_('License'),
|
label=_('License'),
|
||||||
help_text=_('The license controls which features and functionality are ' 'enabled. Use /api/v2/config/ to update or change ' 'the license.'),
|
help_text=_('The license controls which features and functionality are enabled. Use /api/v2/config/ to update or change the license.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -113,7 +112,7 @@ register(
|
|||||||
encrypted=False,
|
encrypted=False,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat customer username'),
|
label=_('Red Hat customer username'),
|
||||||
help_text=_('This username is used to send data to Automation Analytics'),
|
help_text=_('This username is used to send data to Insights for Ansible Automation Platform'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -126,7 +125,7 @@ register(
|
|||||||
encrypted=True,
|
encrypted=True,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat customer password'),
|
label=_('Red Hat customer password'),
|
||||||
help_text=_('This password is used to send data to Automation Analytics'),
|
help_text=_('This password is used to send data to Insights for Ansible Automation Platform'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -163,8 +162,8 @@ register(
|
|||||||
default='https://example.com',
|
default='https://example.com',
|
||||||
schemes=('http', 'https'),
|
schemes=('http', 'https'),
|
||||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||||
label=_('Automation Analytics upload URL'),
|
label=_('Insights for Ansible Automation Platform upload URL'),
|
||||||
help_text=_('This setting is used to to configure data collection for the Automation Analytics dashboard'),
|
help_text=_('This setting is used to to configure the upload URL for data collection for Red Hat Insights.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -178,6 +177,24 @@ register(
|
|||||||
read_only=True,
|
read_only=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
register(
|
||||||
|
'DEFAULT_CONTROL_PLANE_QUEUE_NAME',
|
||||||
|
field_class=fields.CharField,
|
||||||
|
label=_('The instance group where control plane tasks run'),
|
||||||
|
category=_('System'),
|
||||||
|
category_slug='system',
|
||||||
|
read_only=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
register(
|
||||||
|
'DEFAULT_EXECUTION_QUEUE_NAME',
|
||||||
|
field_class=fields.CharField,
|
||||||
|
label=_('The instance group where user jobs run (currently only on non-VM installs)'),
|
||||||
|
category=_('System'),
|
||||||
|
category_slug='system',
|
||||||
|
read_only=True,
|
||||||
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||||
field_class=fields.PrimaryKeyRelatedField,
|
field_class=fields.PrimaryKeyRelatedField,
|
||||||
@@ -194,7 +211,7 @@ register(
|
|||||||
'CUSTOM_VENV_PATHS',
|
'CUSTOM_VENV_PATHS',
|
||||||
field_class=fields.StringListPathField,
|
field_class=fields.StringListPathField,
|
||||||
label=_('Custom virtual environment paths'),
|
label=_('Custom virtual environment paths'),
|
||||||
help_text=_('Paths where Tower will look for custom virtual environments ' '(in addition to /var/lib/awx/venv/). Enter one path per line.'),
|
help_text=_('Paths where Tower will look for custom virtual environments (in addition to /var/lib/awx/venv/). Enter one path per line.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
default=[],
|
default=[],
|
||||||
@@ -265,7 +282,7 @@ register(
|
|||||||
'INSIGHTS_TRACKING_STATE',
|
'INSIGHTS_TRACKING_STATE',
|
||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
default=False,
|
default=False,
|
||||||
label=_('Gather data for Automation Analytics'),
|
label=_('Gather data for Insights for Ansible Automation Platform'),
|
||||||
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
|
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
@@ -318,7 +335,7 @@ register(
|
|||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
default=False,
|
default=False,
|
||||||
label=_('Ignore Ansible Galaxy SSL Certificate Verification'),
|
label=_('Ignore Ansible Galaxy SSL Certificate Verification'),
|
||||||
help_text=_('If set to true, certificate validation will not be done when ' 'installing content from any Galaxy server.'),
|
help_text=_('If set to true, certificate validation will not be done when installing content from any Galaxy server.'),
|
||||||
category=_('Jobs'),
|
category=_('Jobs'),
|
||||||
category_slug='jobs',
|
category_slug='jobs',
|
||||||
)
|
)
|
||||||
@@ -345,6 +362,17 @@ register(
|
|||||||
category_slug='jobs',
|
category_slug='jobs',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
register(
|
||||||
|
'MAX_WEBSOCKET_EVENT_RATE',
|
||||||
|
field_class=fields.IntegerField,
|
||||||
|
min_value=0,
|
||||||
|
default=30,
|
||||||
|
label=_('Job Event Maximum Websocket Messages Per Second'),
|
||||||
|
help_text=_('Maximum number of messages to update the UI live job output with per second. Value of 0 means no limit.'),
|
||||||
|
category=_('Jobs'),
|
||||||
|
category_slug='jobs',
|
||||||
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
'SCHEDULE_MAX_JOBS',
|
'SCHEDULE_MAX_JOBS',
|
||||||
field_class=fields.IntegerField,
|
field_class=fields.IntegerField,
|
||||||
@@ -433,7 +461,7 @@ register(
|
|||||||
allow_null=False,
|
allow_null=False,
|
||||||
default=200,
|
default=200,
|
||||||
label=_('Maximum number of forks per job'),
|
label=_('Maximum number of forks per job'),
|
||||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. ' 'When set to 0, no limit is applied.'),
|
help_text=_('Saving a Job Template with more than this number of forks will result in an error. When set to 0, no limit is applied.'),
|
||||||
category=_('Jobs'),
|
category=_('Jobs'),
|
||||||
category_slug='jobs',
|
category_slug='jobs',
|
||||||
)
|
)
|
||||||
@@ -454,7 +482,7 @@ register(
|
|||||||
allow_null=True,
|
allow_null=True,
|
||||||
default=None,
|
default=None,
|
||||||
label=_('Logging Aggregator Port'),
|
label=_('Logging Aggregator Port'),
|
||||||
help_text=_('Port on Logging Aggregator to send logs to (if required and not' ' provided in Logging Aggregator).'),
|
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
required=False,
|
required=False,
|
||||||
@@ -561,7 +589,7 @@ register(
|
|||||||
field_class=fields.IntegerField,
|
field_class=fields.IntegerField,
|
||||||
default=5,
|
default=5,
|
||||||
label=_('TCP Connection Timeout'),
|
label=_('TCP Connection Timeout'),
|
||||||
help_text=_('Number of seconds for a TCP connection to external log ' 'aggregator to timeout. Applies to HTTPS and TCP log ' 'aggregator protocols.'),
|
help_text=_('Number of seconds for a TCP connection to external log aggregator to timeout. Applies to HTTPS and TCP log aggregator protocols.'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
unit=_('seconds'),
|
unit=_('seconds'),
|
||||||
@@ -627,7 +655,7 @@ register(
|
|||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
default=False,
|
default=False,
|
||||||
label=_('Enable rsyslogd debugging'),
|
label=_('Enable rsyslogd debugging'),
|
||||||
help_text=_('Enabled high verbosity debugging for rsyslogd. ' 'Useful for debugging connection issues for external log aggregation.'),
|
help_text=_('Enabled high verbosity debugging for rsyslogd. Useful for debugging connection issues for external log aggregation.'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
)
|
)
|
||||||
@@ -636,7 +664,7 @@ register(
|
|||||||
register(
|
register(
|
||||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||||
field_class=fields.DateTimeField,
|
field_class=fields.DateTimeField,
|
||||||
label=_('Last gather date for Automation Analytics.'),
|
label=_('Last gather date for Insights for Ansible Automation Platform.'),
|
||||||
allow_null=True,
|
allow_null=True,
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
@@ -644,7 +672,7 @@ register(
|
|||||||
register(
|
register(
|
||||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||||
field_class=fields.CharField,
|
field_class=fields.CharField,
|
||||||
label=_('Last gathered entries for expensive Automation Analytics collectors.'),
|
label=_('Last gathered entries for expensive collectors for Insights for Ansible Automation Platform.'),
|
||||||
default='',
|
default='',
|
||||||
allow_blank=True,
|
allow_blank=True,
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
@@ -655,7 +683,7 @@ register(
|
|||||||
register(
|
register(
|
||||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||||
field_class=fields.IntegerField,
|
field_class=fields.IntegerField,
|
||||||
label=_('Automation Analytics Gather Interval'),
|
label=_('Insights for Ansible Automation Platform Gather Interval'),
|
||||||
help_text=_('Interval (in seconds) between data gathering.'),
|
help_text=_('Interval (in seconds) between data gathering.'),
|
||||||
default=14400, # every 4 hours
|
default=14400, # every 4 hours
|
||||||
min_value=1800, # every 30 minutes
|
min_value=1800, # every 30 minutes
|
||||||
@@ -664,6 +692,15 @@ register(
|
|||||||
unit=_('seconds'),
|
unit=_('seconds'),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
register(
|
||||||
|
'IS_K8S',
|
||||||
|
field_class=fields.BooleanField,
|
||||||
|
read_only=True,
|
||||||
|
category=_('System'),
|
||||||
|
category_slug='system',
|
||||||
|
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def logging_validate(serializer, attrs):
|
def logging_validate(serializer, attrs):
|
||||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ __all__ = [
|
|||||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||||
]
|
]
|
||||||
|
|
||||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
|
||||||
PRIVILEGE_ESCALATION_METHODS = [
|
PRIVILEGE_ESCALATION_METHODS = [
|
||||||
('sudo', _('Sudo')),
|
('sudo', _('Sudo')),
|
||||||
('su', _('Su')),
|
('su', _('Su')),
|
||||||
@@ -41,6 +41,7 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
|||||||
}
|
}
|
||||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||||
ACTIVE_STATES = CAN_CANCEL
|
ACTIVE_STATES = CAN_CANCEL
|
||||||
|
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||||
CENSOR_VALUE = '************'
|
CENSOR_VALUE = '************'
|
||||||
ENV_BLOCKLIST = frozenset(
|
ENV_BLOCKLIST = frozenset(
|
||||||
(
|
(
|
||||||
|
|||||||
56
awx/main/credential_plugins/dsv.py
Normal file
56
awx/main/credential_plugins/dsv.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
from .plugin import CredentialPlugin
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
from django.utils.translation import ugettext_lazy as _
|
||||||
|
from thycotic.secrets.vault import SecretsVault
|
||||||
|
|
||||||
|
|
||||||
|
dsv_inputs = {
|
||||||
|
'fields': [
|
||||||
|
{
|
||||||
|
'id': 'tenant',
|
||||||
|
'label': _('Tenant'),
|
||||||
|
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||||
|
'type': 'string',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'id': 'tld',
|
||||||
|
'label': _('Top-level Domain (TLD)'),
|
||||||
|
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||||
|
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||||
|
'default': 'com',
|
||||||
|
},
|
||||||
|
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||||
|
{
|
||||||
|
'id': 'client_secret',
|
||||||
|
'label': _('Client Secret'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': True,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'metadata': [
|
||||||
|
{
|
||||||
|
'id': 'path',
|
||||||
|
'label': _('Secret Path'),
|
||||||
|
'type': 'string',
|
||||||
|
'help_text': _('The secret path e.g. /test/secret1'),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||||
|
}
|
||||||
|
|
||||||
|
if settings.DEBUG:
|
||||||
|
dsv_inputs['fields'].append(
|
||||||
|
{
|
||||||
|
'id': 'url_template',
|
||||||
|
'label': _('URL template'),
|
||||||
|
'type': 'string',
|
||||||
|
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
dsv_plugin = CredentialPlugin(
|
||||||
|
'Thycotic DevOps Secrets Vault',
|
||||||
|
dsv_inputs,
|
||||||
|
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||||
|
)
|
||||||
@@ -63,7 +63,15 @@ base_inputs = {
|
|||||||
'id': 'secret_path',
|
'id': 'secret_path',
|
||||||
'label': _('Path to Secret'),
|
'label': _('Path to Secret'),
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
'help_text': _('The path to the secret stored in the secret backend e.g, /some/secret/'),
|
'help_text': _(
|
||||||
|
(
|
||||||
|
'The path to the secret stored in the secret backend e.g, /some/secret/. It is recommended'
|
||||||
|
' that you use the secret backend field to identify the storage backend and to use this field'
|
||||||
|
' for locating a specific secret within that store. However, if you prefer to fully identify'
|
||||||
|
' both the secret backend and one of its secrets using only this field, join their locations'
|
||||||
|
' into a single path without any additional separators, e.g, /location/of/backend/some/secret.'
|
||||||
|
)
|
||||||
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 'auth_path',
|
'id': 'auth_path',
|
||||||
|
|||||||
@@ -142,7 +142,8 @@ class CallbackBrokerWorker(BaseWorker):
|
|||||||
logger.exception('Database Error Saving Job Event')
|
logger.exception('Database Error Saving Job Event')
|
||||||
duration_to_save = time.perf_counter() - duration_to_save
|
duration_to_save = time.perf_counter() - duration_to_save
|
||||||
for e in events:
|
for e in events:
|
||||||
emit_event_detail(e)
|
if not getattr(e, '_skip_websocket_message', False):
|
||||||
|
emit_event_detail(e)
|
||||||
self.buff = {}
|
self.buff = {}
|
||||||
self.last_flush = time.time()
|
self.last_flush = time.time()
|
||||||
# only update metrics if we saved events
|
# only update metrics if we saved events
|
||||||
@@ -207,7 +208,13 @@ class CallbackBrokerWorker(BaseWorker):
|
|||||||
GuidMiddleware.set_guid('')
|
GuidMiddleware.set_guid('')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
skip_websocket_message = body.pop('skip_websocket_message', False)
|
||||||
|
|
||||||
event = cls.create_from_data(**body)
|
event = cls.create_from_data(**body)
|
||||||
|
|
||||||
|
if skip_websocket_message:
|
||||||
|
event._skip_websocket_message = True
|
||||||
|
|
||||||
self.buff.setdefault(cls, []).append(event)
|
self.buff.setdefault(cls, []).append(event)
|
||||||
|
|
||||||
retries = 0
|
retries = 0
|
||||||
|
|||||||
@@ -642,7 +642,7 @@ class CredentialInputField(JSONSchemaField):
|
|||||||
|
|
||||||
# `ssh_key_unlock` requirements are very specific and can't be
|
# `ssh_key_unlock` requirements are very specific and can't be
|
||||||
# represented without complicated JSON schema
|
# represented without complicated JSON schema
|
||||||
if model_instance.credential_type.managed_by_tower is True and 'ssh_key_unlock' in defined_fields:
|
if model_instance.credential_type.managed is True and 'ssh_key_unlock' in defined_fields:
|
||||||
|
|
||||||
# in order to properly test the necessity of `ssh_key_unlock`, we
|
# in order to properly test the necessity of `ssh_key_unlock`, we
|
||||||
# need to know the real value of `ssh_key_data`; for a payload like:
|
# need to know the real value of `ssh_key_data`; for a payload like:
|
||||||
@@ -711,7 +711,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def validate(self, value, model_instance):
|
def validate(self, value, model_instance):
|
||||||
if isinstance(value, dict) and 'dependencies' in value and not model_instance.managed_by_tower:
|
if isinstance(value, dict) and 'dependencies' in value and not model_instance.managed:
|
||||||
raise django_exceptions.ValidationError(
|
raise django_exceptions.ValidationError(
|
||||||
_("'dependencies' is not supported for custom credentials."),
|
_("'dependencies' is not supported for custom credentials."),
|
||||||
code='invalid',
|
code='invalid',
|
||||||
|
|||||||
@@ -1,13 +1,5 @@
|
|||||||
from django.db import connections
|
|
||||||
from django.db.backends.sqlite3.base import DatabaseWrapper
|
|
||||||
from django.core.management.commands.makemigrations import Command as MakeMigrations
|
from django.core.management.commands.makemigrations import Command as MakeMigrations
|
||||||
|
|
||||||
|
|
||||||
class Command(MakeMigrations):
|
class Command(MakeMigrations):
|
||||||
def execute(self, *args, **options):
|
pass
|
||||||
settings = connections['default'].settings_dict.copy()
|
|
||||||
settings['ENGINE'] = 'sqlite3'
|
|
||||||
if 'application_name' in settings['OPTIONS']:
|
|
||||||
del settings['OPTIONS']['application_name']
|
|
||||||
connections['default'] = DatabaseWrapper(settings)
|
|
||||||
return MakeMigrations().execute(*args, **options)
|
|
||||||
|
|||||||
@@ -4,11 +4,13 @@
|
|||||||
# Python
|
# Python
|
||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
|
import pytz
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.core.management.base import BaseCommand, CommandError
|
from django.core.management.base import BaseCommand, CommandError
|
||||||
from django.db import transaction
|
from django.db import transaction, connection
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -18,6 +20,132 @@ from awx.main.signals import disable_activity_stream, disable_computed_fields
|
|||||||
from awx.main.utils.deletion import AWXCollector, pre_delete
|
from awx.main.utils.deletion import AWXCollector, pre_delete
|
||||||
|
|
||||||
|
|
||||||
|
def unified_job_class_to_event_table_name(job_class):
|
||||||
|
return f'main_{job_class().event_class.__name__.lower()}'
|
||||||
|
|
||||||
|
|
||||||
|
def partition_table_name(job_class, dt):
|
||||||
|
suffix = dt.replace(microsecond=0, second=0, minute=0).strftime('%Y%m%d_%H')
|
||||||
|
|
||||||
|
event_tbl_name = unified_job_class_to_event_table_name(job_class)
|
||||||
|
event_tbl_name += f'_{suffix}'
|
||||||
|
return event_tbl_name
|
||||||
|
|
||||||
|
|
||||||
|
def partition_name_dt(part_name):
|
||||||
|
"""
|
||||||
|
part_name examples:
|
||||||
|
main_jobevent_20210318_09
|
||||||
|
main_projectupdateevent_20210318_11
|
||||||
|
main_inventoryupdateevent_20210318_03
|
||||||
|
"""
|
||||||
|
if '_unpartitioned' in part_name:
|
||||||
|
return None
|
||||||
|
p = re.compile('([a-z]+)_([a-z]+)_([0-9]+)_([0-9][0-9])')
|
||||||
|
m = p.match(part_name)
|
||||||
|
if not m:
|
||||||
|
return m
|
||||||
|
dt_str = f"{m.group(3)}_{m.group(4)}"
|
||||||
|
dt = datetime.datetime.strptime(dt_str, '%Y%m%d_%H').replace(tzinfo=pytz.UTC)
|
||||||
|
return dt
|
||||||
|
|
||||||
|
|
||||||
|
def dt_to_partition_name(tbl_name, dt):
|
||||||
|
return f"{tbl_name}_{dt.strftime('%Y%m%d_%H')}"
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteMeta:
|
||||||
|
def __init__(self, logger, job_class, cutoff, dry_run):
|
||||||
|
self.logger = logger
|
||||||
|
self.job_class = job_class
|
||||||
|
self.cutoff = cutoff
|
||||||
|
self.dry_run = dry_run
|
||||||
|
|
||||||
|
self.jobs_qs = None # Set in by find_jobs_to_delete()
|
||||||
|
|
||||||
|
self.parts_no_drop = set() # Set in identify_excluded_partitions()
|
||||||
|
self.parts_to_drop = set() # Set in find_partitions_to_drop()
|
||||||
|
self.jobs_pk_list = [] # Set in find_jobs_to_delete()
|
||||||
|
self.jobs_to_delete_count = 0 # Set in find_jobs_to_delete()
|
||||||
|
self.jobs_no_delete_count = 0 # Set in find_jobs_to_delete()
|
||||||
|
|
||||||
|
def find_jobs_to_delete(self):
|
||||||
|
self.jobs_qs = self.job_class.objects.filter(created__lt=self.cutoff).values_list('pk', 'status', 'created')
|
||||||
|
for pk, status, created in self.jobs_qs:
|
||||||
|
if status not in ['pending', 'waiting', 'running']:
|
||||||
|
self.jobs_to_delete_count += 1
|
||||||
|
self.jobs_pk_list.append(pk)
|
||||||
|
self.jobs_no_delete_count = (
|
||||||
|
self.job_class.objects.filter(created__gte=self.cutoff) | self.job_class.objects.filter(status__in=['pending', 'waiting', 'running'])
|
||||||
|
).count()
|
||||||
|
|
||||||
|
def identify_excluded_partitions(self):
|
||||||
|
|
||||||
|
part_drop = {}
|
||||||
|
|
||||||
|
for pk, status, created in self.jobs_qs:
|
||||||
|
|
||||||
|
part_key = partition_table_name(self.job_class, created)
|
||||||
|
if status in ['pending', 'waiting', 'running']:
|
||||||
|
part_drop[part_key] = False
|
||||||
|
else:
|
||||||
|
part_drop.setdefault(part_key, True)
|
||||||
|
|
||||||
|
# Note that parts_no_drop _may_ contain the names of partitions that don't exist
|
||||||
|
# This can happen when the cleanup of _unpartitioned_* logic leaves behind jobs with status pending, waiting, running. The find_jobs_to_delete() will
|
||||||
|
# pick these jobs up.
|
||||||
|
self.parts_no_drop = set([k for k, v in part_drop.items() if v is False])
|
||||||
|
|
||||||
|
def delete_jobs(self):
|
||||||
|
if not self.dry_run:
|
||||||
|
self.job_class.objects.filter(pk__in=self.jobs_pk_list).delete()
|
||||||
|
|
||||||
|
def find_partitions_to_drop(self):
|
||||||
|
tbl_name = unified_job_class_to_event_table_name(self.job_class)
|
||||||
|
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
query = "SELECT inhrelid::regclass::text AS child FROM pg_catalog.pg_inherits"
|
||||||
|
query += f" WHERE inhparent = 'public.{tbl_name}'::regclass"
|
||||||
|
query += f" AND TO_TIMESTAMP(LTRIM(inhrelid::regclass::text, '{tbl_name}_'), 'YYYYMMDD_HH24') < '{self.cutoff}'"
|
||||||
|
query += " ORDER BY inhrelid::regclass::text"
|
||||||
|
|
||||||
|
cursor.execute(query)
|
||||||
|
partitions_from_db = [r[0] for r in cursor.fetchall()]
|
||||||
|
|
||||||
|
partitions_dt = [partition_name_dt(p) for p in partitions_from_db if not None]
|
||||||
|
partitions_dt = [p for p in partitions_dt if not None]
|
||||||
|
|
||||||
|
# convert datetime partition back to string partition
|
||||||
|
partitions_maybe_drop = set([dt_to_partition_name(tbl_name, dt) for dt in partitions_dt])
|
||||||
|
|
||||||
|
# Do not drop partition if there is a job that will not be deleted pointing at it
|
||||||
|
self.parts_to_drop = partitions_maybe_drop - self.parts_no_drop
|
||||||
|
|
||||||
|
def drop_partitions(self):
|
||||||
|
if len(self.parts_to_drop) > 0:
|
||||||
|
parts_to_drop = list(self.parts_to_drop)
|
||||||
|
parts_to_drop.sort() # sort it to make reading it easier for humans
|
||||||
|
parts_to_drop_str = ','.join(parts_to_drop)
|
||||||
|
if self.dry_run:
|
||||||
|
self.logger.debug(f"Would drop event partition(s) {parts_to_drop_str}")
|
||||||
|
else:
|
||||||
|
self.logger.debug(f"Dropping event partition(s) {parts_to_drop_str}")
|
||||||
|
|
||||||
|
if not self.dry_run:
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
cursor.execute(f"DROP TABLE {parts_to_drop_str}")
|
||||||
|
else:
|
||||||
|
self.logger.debug("No event partitions to drop")
|
||||||
|
|
||||||
|
def delete(self):
|
||||||
|
self.find_jobs_to_delete()
|
||||||
|
self.identify_excluded_partitions()
|
||||||
|
self.find_partitions_to_drop()
|
||||||
|
self.drop_partitions()
|
||||||
|
self.delete_jobs()
|
||||||
|
return (self.jobs_no_delete_count, self.jobs_to_delete_count)
|
||||||
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
class Command(BaseCommand):
|
||||||
"""
|
"""
|
||||||
Management command to cleanup old jobs and project updates.
|
Management command to cleanup old jobs and project updates.
|
||||||
@@ -36,6 +164,43 @@ class Command(BaseCommand):
|
|||||||
parser.add_argument('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications')
|
parser.add_argument('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications')
|
||||||
parser.add_argument('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs')
|
parser.add_argument('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs')
|
||||||
|
|
||||||
|
def cleanup(self, job_class):
|
||||||
|
delete_meta = DeleteMeta(self.logger, job_class, self.cutoff, self.dry_run)
|
||||||
|
skipped, deleted = delete_meta.delete()
|
||||||
|
|
||||||
|
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||||
|
|
||||||
|
def cleanup_jobs_partition(self):
|
||||||
|
return self.cleanup(Job)
|
||||||
|
|
||||||
|
def cleanup_ad_hoc_commands_partition(self):
|
||||||
|
return self.cleanup(AdHocCommand)
|
||||||
|
|
||||||
|
def cleanup_project_updates_partition(self):
|
||||||
|
return self.cleanup(ProjectUpdate)
|
||||||
|
|
||||||
|
def cleanup_inventory_updates_partition(self):
|
||||||
|
return self.cleanup(InventoryUpdate)
|
||||||
|
|
||||||
|
def cleanup_management_jobs_partition(self):
|
||||||
|
return self.cleanup(SystemJob)
|
||||||
|
|
||||||
|
def cleanup_workflow_jobs_partition(self):
|
||||||
|
delete_meta = DeleteMeta(self.logger, WorkflowJob, self.cutoff, self.dry_run)
|
||||||
|
|
||||||
|
delete_meta.find_jobs_to_delete()
|
||||||
|
delete_meta.delete_jobs()
|
||||||
|
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||||
|
|
||||||
|
def _cascade_delete_job_events(self, model, pk_list):
|
||||||
|
if len(pk_list) > 0:
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
tblname = unified_job_class_to_event_table_name(model)
|
||||||
|
|
||||||
|
pk_list_csv = ','.join(map(str, pk_list))
|
||||||
|
rel_name = model().event_parent_key
|
||||||
|
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||||
|
|
||||||
def cleanup_jobs(self):
|
def cleanup_jobs(self):
|
||||||
skipped, deleted = 0, 0
|
skipped, deleted = 0, 0
|
||||||
|
|
||||||
@@ -45,12 +210,14 @@ class Command(BaseCommand):
|
|||||||
# get queryset for available jobs to remove
|
# get queryset for available jobs to remove
|
||||||
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
||||||
# get pk list for the first N (batch_size) objects
|
# get pk list for the first N (batch_size) objects
|
||||||
pk_list = qs[0:batch_size].values_list('pk')
|
pk_list = qs[0:batch_size].values_list('pk', flat=True)
|
||||||
# You cannot delete queries with sql LIMIT set, so we must
|
# You cannot delete queries with sql LIMIT set, so we must
|
||||||
# create a new query from this pk_list
|
# create a new query from this pk_list
|
||||||
qs_batch = Job.objects.filter(pk__in=pk_list)
|
qs_batch = Job.objects.filter(pk__in=pk_list)
|
||||||
just_deleted = 0
|
just_deleted = 0
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
|
self._cascade_delete_job_events(Job, pk_list)
|
||||||
|
|
||||||
del_query = pre_delete(qs_batch)
|
del_query = pre_delete(qs_batch)
|
||||||
collector = AWXCollector(del_query.db)
|
collector = AWXCollector(del_query.db)
|
||||||
collector.collect(del_query)
|
collector.collect(del_query)
|
||||||
@@ -71,6 +238,7 @@ class Command(BaseCommand):
|
|||||||
def cleanup_ad_hoc_commands(self):
|
def cleanup_ad_hoc_commands(self):
|
||||||
skipped, deleted = 0, 0
|
skipped, deleted = 0, 0
|
||||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||||
|
pk_list = []
|
||||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||||
ad_hoc_command_display = '"%s" (%d events)' % (str(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count())
|
ad_hoc_command_display = '"%s" (%d events)' % (str(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count())
|
||||||
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
if ad_hoc_command.status in ('pending', 'waiting', 'running'):
|
||||||
@@ -81,15 +249,20 @@ class Command(BaseCommand):
|
|||||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||||
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
|
pk_list.append(ad_hoc_command.pk)
|
||||||
ad_hoc_command.delete()
|
ad_hoc_command.delete()
|
||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
|
if not self.dry_run:
|
||||||
|
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||||
|
|
||||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|
||||||
def cleanup_project_updates(self):
|
def cleanup_project_updates(self):
|
||||||
skipped, deleted = 0, 0
|
skipped, deleted = 0, 0
|
||||||
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
||||||
|
pk_list = []
|
||||||
for pu in project_updates.iterator():
|
for pu in project_updates.iterator():
|
||||||
pu_display = '"%s" (type %s)' % (str(pu), str(pu.launch_type))
|
pu_display = '"%s" (type %s)' % (str(pu), str(pu.launch_type))
|
||||||
if pu.status in ('pending', 'waiting', 'running'):
|
if pu.status in ('pending', 'waiting', 'running'):
|
||||||
@@ -104,15 +277,20 @@ class Command(BaseCommand):
|
|||||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||||
self.logger.info('%s %s', action_text, pu_display)
|
self.logger.info('%s %s', action_text, pu_display)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
|
pk_list.append(pu.pk)
|
||||||
pu.delete()
|
pu.delete()
|
||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
|
if not self.dry_run:
|
||||||
|
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||||
|
|
||||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|
||||||
def cleanup_inventory_updates(self):
|
def cleanup_inventory_updates(self):
|
||||||
skipped, deleted = 0, 0
|
skipped, deleted = 0, 0
|
||||||
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
||||||
|
pk_list = []
|
||||||
for iu in inventory_updates.iterator():
|
for iu in inventory_updates.iterator():
|
||||||
iu_display = '"%s" (source %s)' % (str(iu), str(iu.source))
|
iu_display = '"%s" (source %s)' % (str(iu), str(iu.source))
|
||||||
if iu.status in ('pending', 'waiting', 'running'):
|
if iu.status in ('pending', 'waiting', 'running'):
|
||||||
@@ -127,15 +305,20 @@ class Command(BaseCommand):
|
|||||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||||
self.logger.info('%s %s', action_text, iu_display)
|
self.logger.info('%s %s', action_text, iu_display)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
|
pk_list.append(iu.pk)
|
||||||
iu.delete()
|
iu.delete()
|
||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
|
if not self.dry_run:
|
||||||
|
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||||
|
|
||||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|
||||||
def cleanup_management_jobs(self):
|
def cleanup_management_jobs(self):
|
||||||
skipped, deleted = 0, 0
|
skipped, deleted = 0, 0
|
||||||
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
||||||
|
pk_list = []
|
||||||
for sj in system_jobs.iterator():
|
for sj in system_jobs.iterator():
|
||||||
sj_display = '"%s" (type %s)' % (str(sj), str(sj.job_type))
|
sj_display = '"%s" (type %s)' % (str(sj), str(sj.job_type))
|
||||||
if sj.status in ('pending', 'waiting', 'running'):
|
if sj.status in ('pending', 'waiting', 'running'):
|
||||||
@@ -146,9 +329,13 @@ class Command(BaseCommand):
|
|||||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||||
self.logger.info('%s %s', action_text, sj_display)
|
self.logger.info('%s %s', action_text, sj_display)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
|
pk_list.append(sj.pk)
|
||||||
sj.delete()
|
sj.delete()
|
||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
|
if not self.dry_run:
|
||||||
|
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||||
|
|
||||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|
||||||
@@ -222,6 +409,13 @@ class Command(BaseCommand):
|
|||||||
for m in model_names:
|
for m in model_names:
|
||||||
if m in models_to_cleanup:
|
if m in models_to_cleanup:
|
||||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||||
|
|
||||||
|
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||||
|
if func:
|
||||||
|
skipped_partition, deleted_partition = func()
|
||||||
|
skipped += skipped_partition
|
||||||
|
deleted += deleted_partition
|
||||||
|
|
||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -2,9 +2,8 @@
|
|||||||
# All Rights Reserved
|
# All Rights Reserved
|
||||||
|
|
||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
from django.conf import settings
|
|
||||||
from crum import impersonate
|
from crum import impersonate
|
||||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate, ExecutionEnvironment
|
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||||
from awx.main.signals import disable_computed_fields
|
from awx.main.signals import disable_computed_fields
|
||||||
|
|
||||||
|
|
||||||
@@ -45,7 +44,7 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
public_galaxy_credential = Credential(
|
public_galaxy_credential = Credential(
|
||||||
name='Ansible Galaxy',
|
name='Ansible Galaxy',
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||||
inputs={'url': 'https://galaxy.ansible.com/'},
|
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||||
)
|
)
|
||||||
@@ -68,13 +67,6 @@ class Command(BaseCommand):
|
|||||||
print('Demo Credential, Inventory, and Job Template added.')
|
print('Demo Credential, Inventory, and Job Template added.')
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
for ee in reversed(settings.DEFAULT_EXECUTION_ENVIRONMENTS):
|
|
||||||
_, created = ExecutionEnvironment.objects.get_or_create(name=ee['name'], defaults={'image': ee['image'], 'managed_by_tower': True})
|
|
||||||
|
|
||||||
if created:
|
|
||||||
changed = True
|
|
||||||
print('Default Execution Environment(s) registered.')
|
|
||||||
|
|
||||||
if changed:
|
if changed:
|
||||||
print('(changed: True)')
|
print('(changed: True)')
|
||||||
else:
|
else:
|
||||||
|
|||||||
59
awx/main/management/commands/custom_venv_associations.py
Normal file
59
awx/main/management/commands/custom_venv_associations.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Copyright (c) 2021 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from awx.main.utils.common import get_custom_venv_choices
|
||||||
|
from awx.main.models import Organization, InventorySource, JobTemplate, Project
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""Returns the pip freeze from the path passed in the argument"""
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument(
|
||||||
|
'path',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
default='',
|
||||||
|
help='run this with a path to a virtual environment as an argument to see the associated Job Templates, Organizations, Projects, and Inventory Sources.',
|
||||||
|
)
|
||||||
|
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
# look organiztions and unified job templates (which include JTs, workflows, and Inventory updates)
|
||||||
|
super(Command, self).__init__()
|
||||||
|
results = {}
|
||||||
|
path = options.get('path')
|
||||||
|
if path:
|
||||||
|
all_venvs = get_custom_venv_choices()
|
||||||
|
if path[0] in all_venvs: # verify this is a valid path
|
||||||
|
path = path[0]
|
||||||
|
orgs = [{"name": org.name, "id": org.id} for org in Organization.objects.filter(custom_virtualenv=path)]
|
||||||
|
jts = [{"name": jt.name, "id": jt.id} for jt in JobTemplate.objects.filter(custom_virtualenv=path)]
|
||||||
|
proj = [{"name": proj.name, "id": proj.id} for proj in Project.objects.filter(custom_virtualenv=path)]
|
||||||
|
invsrc = [{"name": inv.name, "id": inv.id} for inv in InventorySource.objects.filter(custom_virtualenv=path)]
|
||||||
|
results["organizations"] = orgs
|
||||||
|
results["job_templates"] = jts
|
||||||
|
results["projects"] = proj
|
||||||
|
results["inventory_sources"] = invsrc
|
||||||
|
if not options.get('q'):
|
||||||
|
msg = [
|
||||||
|
'# Virtual Environments Associations:',
|
||||||
|
yaml.dump(results),
|
||||||
|
'- To list all (now deprecated) custom virtual environments run:',
|
||||||
|
'awx-manage list_custom_venvs',
|
||||||
|
'',
|
||||||
|
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||||
|
'awx-manage export_custom_venv /path/to/venv',
|
||||||
|
'',
|
||||||
|
'- Run these commands with `-q` to remove tool tips.',
|
||||||
|
'',
|
||||||
|
]
|
||||||
|
print('\n'.join(msg))
|
||||||
|
else:
|
||||||
|
print(yaml.dump(results))
|
||||||
|
|
||||||
|
else:
|
||||||
|
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||||
|
print('\n'.join(all_venvs), '\n')
|
||||||
@@ -31,6 +31,7 @@ class Command(BaseCommand):
|
|||||||
for session in sessions:
|
for session in sessions:
|
||||||
user_id = session.get_decoded().get('_auth_user_id')
|
user_id = session.get_decoded().get('_auth_user_id')
|
||||||
if (user is None) or (user_id and user.id == int(user_id)):
|
if (user is None) or (user_id and user.id == int(user_id)):
|
||||||
|
# The Session model instance doesn't have .flush(), we need a SessionStore instance.
|
||||||
session = import_module(settings.SESSION_ENGINE).SessionStore(session.session_key)
|
session = import_module(settings.SESSION_ENGINE).SessionStore(session.session_key)
|
||||||
# Log out the session, but without the need for a request object.
|
# Log out the session, but without the need for a request object.
|
||||||
session.flush()
|
session.flush()
|
||||||
|
|||||||
48
awx/main/management/commands/export_custom_venv.py
Normal file
48
awx/main/management/commands/export_custom_venv.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Copyright (c) 2021 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
|
||||||
|
from awx.main.utils.common import get_custom_venv_pip_freeze, get_custom_venv_choices
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""Returns the pip freeze from the path passed in the argument"""
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument(
|
||||||
|
'path',
|
||||||
|
type=str,
|
||||||
|
nargs=1,
|
||||||
|
default='',
|
||||||
|
help='run this with a path to a virtual environment as an argument to see the pip freeze data',
|
||||||
|
)
|
||||||
|
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
super(Command, self).__init__()
|
||||||
|
if options.get('path'):
|
||||||
|
path = options.get('path')
|
||||||
|
all_venvs = get_custom_venv_choices()
|
||||||
|
if path[0] in all_venvs:
|
||||||
|
pip_data = get_custom_venv_pip_freeze(options.get('path')[0])
|
||||||
|
if pip_data:
|
||||||
|
if not options.get('q'):
|
||||||
|
msg = [
|
||||||
|
'# Virtual environment contents:',
|
||||||
|
pip_data,
|
||||||
|
'- To list all (now deprecated) custom virtual environments run:',
|
||||||
|
'awx-manage list_custom_venvs',
|
||||||
|
'',
|
||||||
|
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||||
|
'awx-manage custom_venv_associations /path/to/venv',
|
||||||
|
'',
|
||||||
|
'- Run these commands with `-q` to remove tool tips.',
|
||||||
|
'',
|
||||||
|
]
|
||||||
|
print('\n'.join(msg))
|
||||||
|
else:
|
||||||
|
print(pip_data)
|
||||||
|
|
||||||
|
else:
|
||||||
|
print('\n', '# Incorrect path, verify your path is from the following list:')
|
||||||
|
print('\n'.join(all_venvs))
|
||||||
54
awx/main/management/commands/host_metric.py
Normal file
54
awx/main/management/commands/host_metric.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
import datetime
|
||||||
|
from django.core.serializers.json import DjangoJSONEncoder
|
||||||
|
from awx.main.models.inventory import HostMetric
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
|
||||||
|
help = 'This is for offline licensing usage'
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
|
||||||
|
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
|
||||||
|
parser.add_argument('--json', action='store_true', help='Select output as JSON')
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
since = options.get('since')
|
||||||
|
until = options.get('until')
|
||||||
|
|
||||||
|
if since is None and until is None:
|
||||||
|
print("No Arguments received")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if since is not None and since.tzinfo is None:
|
||||||
|
since = since.replace(tzinfo=datetime.timezone.utc)
|
||||||
|
|
||||||
|
if until is not None and until.tzinfo is None:
|
||||||
|
until = until.replace(tzinfo=datetime.timezone.utc)
|
||||||
|
|
||||||
|
filter_kwargs = {}
|
||||||
|
if since is not None:
|
||||||
|
filter_kwargs['last_automation__gte'] = since
|
||||||
|
if until is not None:
|
||||||
|
filter_kwargs['last_automation__lte'] = until
|
||||||
|
|
||||||
|
result = HostMetric.objects.filter(**filter_kwargs)
|
||||||
|
|
||||||
|
# if --json flag is set, output the result in json format
|
||||||
|
if options['json']:
|
||||||
|
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
|
||||||
|
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
|
||||||
|
print(json_result)
|
||||||
|
|
||||||
|
# --json flag is not set, output in plain text
|
||||||
|
else:
|
||||||
|
print(f"Total Number of hosts automated: {len(result)}")
|
||||||
|
for item in result:
|
||||||
|
print(
|
||||||
|
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
|
||||||
|
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
@@ -10,7 +10,6 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import shutil
|
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
@@ -37,20 +36,20 @@ from awx.main.utils.pglock import advisory_lock
|
|||||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||||
|
|
||||||
LICENSE_EXPIRED_MESSAGE = '''\
|
LICENSE_EXPIRED_MESSAGE = '''\
|
||||||
License expired.
|
Subscription expired.
|
||||||
See http://www.ansible.com/renew for license extension information.'''
|
Contact us (https://www.redhat.com/contact) for subscription extension information.'''
|
||||||
|
|
||||||
LICENSE_NON_EXISTANT_MESSAGE = '''\
|
LICENSE_NON_EXISTANT_MESSAGE = '''\
|
||||||
No license.
|
No subscription.
|
||||||
See http://www.ansible.com/renew for license information.'''
|
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||||
|
|
||||||
LICENSE_MESSAGE = '''\
|
LICENSE_MESSAGE = '''\
|
||||||
Number of licensed instances exceeded, would bring available instances to %(new_count)d, system is licensed for %(instance_count)d.
|
%(new_count)d instances have been automated, system is subscribed for %(instance_count)d.
|
||||||
See http://www.ansible.com/renew for license extension information.'''
|
Contact us (https://www.redhat.com/contact) for upgrade information.'''
|
||||||
|
|
||||||
DEMO_LICENSE_MESSAGE = '''\
|
DEMO_LICENSE_MESSAGE = '''\
|
||||||
Demo mode free license count exceeded, would bring available instances to %(new_count)d, demo mode allows %(instance_count)d.
|
Demo mode free subscription count exceeded. Current automated instances are %(new_count)d, demo mode allows %(instance_count)d.
|
||||||
See http://www.ansible.com/renew for licensing information.'''
|
Contact us (https://www.redhat.com/contact) for subscription information.'''
|
||||||
|
|
||||||
|
|
||||||
def functioning_dir(path):
|
def functioning_dir(path):
|
||||||
@@ -67,24 +66,9 @@ class AnsibleInventoryLoader(object):
|
|||||||
/usr/bin/ansible/ansible-inventory -i hosts --list
|
/usr/bin/ansible/ansible-inventory -i hosts --list
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, source, venv_path=None, verbosity=0):
|
def __init__(self, source, verbosity=0):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.verbosity = verbosity
|
self.verbosity = verbosity
|
||||||
if venv_path:
|
|
||||||
self.venv_path = venv_path
|
|
||||||
else:
|
|
||||||
self.venv_path = settings.ANSIBLE_VENV_PATH
|
|
||||||
|
|
||||||
def get_path_to_ansible_inventory(self):
|
|
||||||
venv_exe = os.path.join(self.venv_path, 'bin', 'ansible-inventory')
|
|
||||||
if os.path.exists(venv_exe):
|
|
||||||
return venv_exe
|
|
||||||
elif os.path.exists(os.path.join(self.venv_path, 'bin', 'ansible')):
|
|
||||||
# if bin/ansible exists but bin/ansible-inventory doesn't, it's
|
|
||||||
# probably a really old version of ansible that doesn't support
|
|
||||||
# ansible-inventory
|
|
||||||
raise RuntimeError("{} does not exist (please upgrade to ansible >= 2.4)".format(venv_exe))
|
|
||||||
return shutil.which('ansible-inventory')
|
|
||||||
|
|
||||||
def get_base_args(self):
|
def get_base_args(self):
|
||||||
bargs = ['podman', 'run', '--user=root', '--quiet']
|
bargs = ['podman', 'run', '--user=root', '--quiet']
|
||||||
@@ -143,7 +127,6 @@ class Command(BaseCommand):
|
|||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
parser.add_argument('--inventory-name', dest='inventory_name', type=str, default=None, metavar='n', help='name of inventory to sync')
|
parser.add_argument('--inventory-name', dest='inventory_name', type=str, default=None, metavar='n', help='name of inventory to sync')
|
||||||
parser.add_argument('--inventory-id', dest='inventory_id', type=int, default=None, metavar='i', help='id of inventory to sync')
|
parser.add_argument('--inventory-id', dest='inventory_id', type=int, default=None, metavar='i', help='id of inventory to sync')
|
||||||
parser.add_argument('--venv', dest='venv', type=str, default=None, help='absolute path to the AWX custom virtualenv to use')
|
|
||||||
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False, help='overwrite the destination hosts and groups')
|
||||||
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
parser.add_argument('--overwrite-vars', dest='overwrite_vars', action='store_true', default=False, help='overwrite (rather than merge) variables')
|
||||||
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False, help='DEPRECATED legacy option, has no effect')
|
||||||
@@ -773,29 +756,22 @@ class Command(BaseCommand):
|
|||||||
instance_count = license_info.get('instance_count', 0)
|
instance_count = license_info.get('instance_count', 0)
|
||||||
free_instances = license_info.get('free_instances', 0)
|
free_instances = license_info.get('free_instances', 0)
|
||||||
time_remaining = license_info.get('time_remaining', 0)
|
time_remaining = license_info.get('time_remaining', 0)
|
||||||
|
automated_count = license_info.get('automated_instances', 0)
|
||||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||||
new_count = Host.objects.active_count()
|
|
||||||
if time_remaining <= 0:
|
if time_remaining <= 0:
|
||||||
if hard_error:
|
if hard_error:
|
||||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||||
raise PermissionDenied("License has expired!")
|
raise PermissionDenied("Subscription has expired!")
|
||||||
else:
|
else:
|
||||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||||
# special check for tower-type inventory sources
|
|
||||||
# but only if running the plugin
|
|
||||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
|
||||||
if self.inventory_source.source == 'tower' and any(f in self.inventory_source.source_path for f in TOWER_SOURCE_FILES):
|
|
||||||
# only if this is the 2nd call to license check, we cannot compare before running plugin
|
|
||||||
if hasattr(self, 'all_group'):
|
|
||||||
self.remote_tower_license_compare(local_license_type)
|
|
||||||
if free_instances < 0:
|
if free_instances < 0:
|
||||||
d = {
|
d = {
|
||||||
'new_count': new_count,
|
'new_count': automated_count,
|
||||||
'instance_count': instance_count,
|
'instance_count': instance_count,
|
||||||
}
|
}
|
||||||
if hard_error:
|
if hard_error:
|
||||||
logger.error(LICENSE_MESSAGE % d)
|
logger.error(LICENSE_MESSAGE % d)
|
||||||
raise PermissionDenied('License count exceeded!')
|
raise PermissionDenied('Subscription count exceeded!')
|
||||||
else:
|
else:
|
||||||
logger.warning(LICENSE_MESSAGE % d)
|
logger.warning(LICENSE_MESSAGE % d)
|
||||||
|
|
||||||
@@ -836,7 +812,6 @@ class Command(BaseCommand):
|
|||||||
raise CommandError('--source is required')
|
raise CommandError('--source is required')
|
||||||
verbosity = int(options.get('verbosity', 1))
|
verbosity = int(options.get('verbosity', 1))
|
||||||
self.set_logging_level(verbosity)
|
self.set_logging_level(verbosity)
|
||||||
venv_path = options.get('venv', None)
|
|
||||||
|
|
||||||
# Load inventory object based on name or ID.
|
# Load inventory object based on name or ID.
|
||||||
if inventory_id:
|
if inventory_id:
|
||||||
@@ -866,7 +841,7 @@ class Command(BaseCommand):
|
|||||||
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
_eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||||
)
|
)
|
||||||
|
|
||||||
data = AnsibleInventoryLoader(source=source, venv_path=venv_path, verbosity=verbosity).load()
|
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||||
|
|
||||||
logger.debug('Finished loading from source: %s', source)
|
logger.debug('Finished loading from source: %s', source)
|
||||||
|
|
||||||
|
|||||||
43
awx/main/management/commands/list_custom_venvs.py
Normal file
43
awx/main/management/commands/list_custom_venvs.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Copyright (c) 2021 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from awx.main.utils.common import get_custom_venv_choices
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""Returns a list of custom venv paths from the path passed in the argument"""
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument('-q', action='store_true', help='run with -q to output only the results of the query.')
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
super(Command, self).__init__()
|
||||||
|
venvs = get_custom_venv_choices()
|
||||||
|
if venvs:
|
||||||
|
if not options.get('q'):
|
||||||
|
msg = [
|
||||||
|
'# Discovered Virtual Environments:',
|
||||||
|
'\n'.join(venvs),
|
||||||
|
'',
|
||||||
|
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||||
|
'awx-manage export_custom_venv /path/to/venv',
|
||||||
|
'',
|
||||||
|
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||||
|
'awx-manage custom_venv_associations /path/to/venv',
|
||||||
|
'',
|
||||||
|
'- Run these commands with `-q` to remove tool tips.',
|
||||||
|
'',
|
||||||
|
]
|
||||||
|
print('\n'.join(msg))
|
||||||
|
else:
|
||||||
|
print('\n'.join(venvs), '\n')
|
||||||
|
else:
|
||||||
|
msg = ["No custom virtual environments detected in:", settings.BASE_VENV_PATH]
|
||||||
|
|
||||||
|
for path in settings.CUSTOM_VENV_PATHS:
|
||||||
|
msg.append(path)
|
||||||
|
|
||||||
|
print('\n'.join(msg), file=sys.stderr)
|
||||||
@@ -0,0 +1,135 @@
|
|||||||
|
# Copyright (c) 2015 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
import sys
|
||||||
|
from distutils.util import strtobool
|
||||||
|
from argparse import RawTextHelpFormatter
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from django.conf import settings
|
||||||
|
from awx.main.models import CredentialType, Credential, ExecutionEnvironment
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""Create default execution environments, intended for new installs"""
|
||||||
|
|
||||||
|
help = """
|
||||||
|
Creates or updates the execution environments set in settings.DEFAULT_EXECUTION_ENVIRONMENTS if they are not yet created.
|
||||||
|
Optionally provide authentication details to create or update a container registry credential that will be set on all of these default execution environments.
|
||||||
|
Note that settings.DEFAULT_EXECUTION_ENVIRONMENTS is and ordered list, the first in the list will be used for project updates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Preserves newlines in the help text
|
||||||
|
def create_parser(self, *args, **kwargs):
|
||||||
|
parser = super(Command, self).create_parser(*args, **kwargs)
|
||||||
|
parser.formatter_class = RawTextHelpFormatter
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument(
|
||||||
|
"--registry-url",
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help="URL for the container registry",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--registry-username",
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help="username for the container registry",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--registry-password",
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help="Password or token for CLI authentication with the container registry",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--verify-ssl",
|
||||||
|
type=lambda x: bool(strtobool(str(x))),
|
||||||
|
default=True,
|
||||||
|
help="Verify SSL when authenticating with the container registry",
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
changed = False
|
||||||
|
registry_cred = None
|
||||||
|
|
||||||
|
if options.get("registry_username"):
|
||||||
|
if not options.get("registry_password"):
|
||||||
|
sys.stderr.write("Registry password must be provided when providing registry username\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not options.get("registry_url"):
|
||||||
|
sys.stderr.write("Registry url must be provided when providing registry username\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
registry_cred_type = CredentialType.objects.filter(kind="registry")
|
||||||
|
if not registry_cred_type.exists():
|
||||||
|
sys.stderr.write("No registry credential type found")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
"host": options.get("registry_url"),
|
||||||
|
"password": options.get("registry_password"),
|
||||||
|
"username": options.get("registry_username"),
|
||||||
|
"verify_ssl": options.get("verify_ssl"),
|
||||||
|
}
|
||||||
|
registry_cred, cred_created = Credential.objects.get_or_create(
|
||||||
|
name="Default Execution Environment Registry Credential",
|
||||||
|
managed=True,
|
||||||
|
credential_type=registry_cred_type[0],
|
||||||
|
defaults={'inputs': inputs},
|
||||||
|
)
|
||||||
|
|
||||||
|
if cred_created:
|
||||||
|
changed = True
|
||||||
|
print("'Default Execution Environment Credential' registered.")
|
||||||
|
|
||||||
|
for key, value in inputs.items():
|
||||||
|
if not registry_cred.inputs.get(key) or registry_cred.get_input(key) != value:
|
||||||
|
registry_cred.inputs[key] = value
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
registry_cred.save()
|
||||||
|
print("'Default Execution Environment Credential' updated.")
|
||||||
|
|
||||||
|
# Create default globally available Execution Environments
|
||||||
|
for ee in reversed(settings.GLOBAL_JOB_EXECUTION_ENVIRONMENTS):
|
||||||
|
_this_ee, ee_created = ExecutionEnvironment.objects.get_or_create(name=ee["name"], defaults={'image': ee["image"], 'credential': registry_cred})
|
||||||
|
if ee_created:
|
||||||
|
changed = True
|
||||||
|
print(f"'{ee['name']}' Default Execution Environment registered.")
|
||||||
|
else:
|
||||||
|
if _this_ee.image != ee["image"]:
|
||||||
|
_this_ee.image = ee["image"]
|
||||||
|
changed = True
|
||||||
|
if _this_ee.credential != registry_cred:
|
||||||
|
_this_ee.credential = registry_cred
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
_this_ee.save()
|
||||||
|
print(f"'{ee['name']}' Default Execution Environment updated.")
|
||||||
|
|
||||||
|
# Create the control plane execution environment that is used for project updates and system jobs
|
||||||
|
ee = settings.CONTROL_PLANE_EXECUTION_ENVIRONMENT
|
||||||
|
_this_ee, cp_created = ExecutionEnvironment.objects.get_or_create(
|
||||||
|
name="Control Plane Execution Environment", defaults={'image': ee, 'managed': True, 'credential': registry_cred}
|
||||||
|
)
|
||||||
|
if cp_created:
|
||||||
|
changed = True
|
||||||
|
print("Control Plane Execution Environment registered.")
|
||||||
|
else:
|
||||||
|
if _this_ee.image != ee:
|
||||||
|
_this_ee.image = ee
|
||||||
|
changed = True
|
||||||
|
if _this_ee.credential != registry_cred:
|
||||||
|
_this_ee.credential = registry_cred
|
||||||
|
changed = True
|
||||||
|
if changed:
|
||||||
|
_this_ee.save()
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
print("(changed: True)")
|
||||||
|
else:
|
||||||
|
print("(changed: False)")
|
||||||
@@ -11,11 +11,16 @@ from django.conf import settings
|
|||||||
from awx.main.utils.filters import SmartFilter
|
from awx.main.utils.filters import SmartFilter
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
|
|
||||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
|
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager', 'DeferJobCreatedManager']
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.managers')
|
logger = logging.getLogger('awx.main.managers')
|
||||||
|
|
||||||
|
|
||||||
|
class DeferJobCreatedManager(models.Manager):
|
||||||
|
def get_queryset(self):
|
||||||
|
return super(DeferJobCreatedManager, self).get_queryset().defer('job_created')
|
||||||
|
|
||||||
|
|
||||||
class HostManager(models.Manager):
|
class HostManager(models.Manager):
|
||||||
"""Custom manager class for Hosts model."""
|
"""Custom manager class for Hosts model."""
|
||||||
|
|
||||||
@@ -28,7 +33,7 @@ class HostManager(models.Manager):
|
|||||||
- Only consider results that are unique
|
- Only consider results that are unique
|
||||||
- Return the count of this query
|
- Return the count of this query
|
||||||
"""
|
"""
|
||||||
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
|
return self.order_by().exclude(inventory_sources__source='controller').values('name').distinct().count()
|
||||||
|
|
||||||
def org_active_count(self, org_id):
|
def org_active_count(self, org_id):
|
||||||
"""Return count of active, unique hosts used by an organization.
|
"""Return count of active, unique hosts used by an organization.
|
||||||
@@ -40,7 +45,7 @@ class HostManager(models.Manager):
|
|||||||
- Only consider results that are unique
|
- Only consider results that are unique
|
||||||
- Return the count of this query
|
- Return the count of this query
|
||||||
"""
|
"""
|
||||||
return self.order_by().exclude(inventory_sources__source='tower').filter(inventory__organization=org_id).values('name').distinct().count()
|
return self.order_by().exclude(inventory_sources__source='controller').filter(inventory__organization=org_id).values('name').distinct().count()
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
||||||
@@ -141,8 +146,8 @@ class InstanceManager(models.Manager):
|
|||||||
|
|
||||||
pod_ip = os.environ.get('MY_POD_IP')
|
pod_ip = os.environ.get('MY_POD_IP')
|
||||||
registered = self.register(ip_address=pod_ip)
|
registered = self.register(ip_address=pod_ip)
|
||||||
is_container_group = settings.IS_K8S
|
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||||
RegisterQueue('tower', 100, 0, [], is_container_group).register()
|
RegisterQueue(settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True).register()
|
||||||
return registered
|
return registered
|
||||||
else:
|
else:
|
||||||
return (False, self.me())
|
return (False, self.me())
|
||||||
@@ -151,10 +156,6 @@ class InstanceManager(models.Manager):
|
|||||||
"""Return count of active Tower nodes for licensing."""
|
"""Return count of active Tower nodes for licensing."""
|
||||||
return self.all().count()
|
return self.all().count()
|
||||||
|
|
||||||
def my_role(self):
|
|
||||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
|
||||||
return "tower"
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceGroupManager(models.Manager):
|
class InstanceGroupManager(models.Manager):
|
||||||
"""A custom manager class for the Instance model.
|
"""A custom manager class for the Instance model.
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import time
|
|||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from django.contrib.auth import logout
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.db.migrations.executor import MigrationExecutor
|
from django.db.migrations.executor import MigrationExecutor
|
||||||
from django.db import connection
|
from django.db import connection
|
||||||
@@ -45,7 +46,7 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
|||||||
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
||||||
if settings.AWX_REQUEST_PROFILE:
|
if settings.AWX_REQUEST_PROFILE:
|
||||||
response['X-API-Profile-File'] = self.prof.stop()
|
response['X-API-Profile-File'] = self.prof.stop()
|
||||||
perf_logger.info(
|
perf_logger.debug(
|
||||||
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
||||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"])),
|
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"])),
|
||||||
)
|
)
|
||||||
@@ -71,6 +72,21 @@ class SessionTimeoutMiddleware(MiddlewareMixin):
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
class DisableLocalAuthMiddleware(MiddlewareMixin):
|
||||||
|
"""
|
||||||
|
Respects the presence of the DISABLE_LOCAL_AUTH setting and forces
|
||||||
|
local-only users to logout when they make a request.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def process_request(self, request):
|
||||||
|
if settings.DISABLE_LOCAL_AUTH:
|
||||||
|
user = request.user
|
||||||
|
if not user.pk:
|
||||||
|
return
|
||||||
|
if not (user.profile.ldap_dn or user.social_auth.exists() or user.enterprise_auth.exists()):
|
||||||
|
logout(request)
|
||||||
|
|
||||||
|
|
||||||
def _customize_graph():
|
def _customize_graph():
|
||||||
from awx.main.models import Instance, Schedule, UnifiedJobTemplate
|
from awx.main.models import Instance, Schedule, UnifiedJobTemplate
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from awx.main.utils.common import set_current_apps
|
|||||||
|
|
||||||
def migrate_to_static_inputs(apps, schema_editor):
|
def migrate_to_static_inputs(apps, schema_editor):
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from awx.main.utils.common import set_current_apps
|
|||||||
|
|
||||||
def setup_tower_managed_defaults(apps, schema_editor):
|
def setup_tower_managed_defaults(apps, schema_editor):
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from awx.main.utils.common import set_current_apps
|
|||||||
|
|
||||||
def setup_tower_managed_defaults(apps, schema_editor):
|
def setup_tower_managed_defaults(apps, schema_editor):
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from awx.main.utils.common import set_current_apps
|
|||||||
|
|
||||||
def create_new_credential_types(apps, schema_editor):
|
def create_new_credential_types(apps, schema_editor):
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from awx.main.models import CredentialType
|
|||||||
|
|
||||||
|
|
||||||
def update_cyberark_aim_name(apps, schema_editor):
|
def update_cyberark_aim_name(apps, schema_editor):
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
aim_types = apps.get_model('main', 'CredentialType').objects.filter(namespace='aim').order_by('id')
|
aim_types = apps.get_model('main', 'CredentialType').objects.filter(namespace='aim').order_by('id')
|
||||||
|
|
||||||
if aim_types.count() == 2:
|
if aim_types.count() == 2:
|
||||||
|
|||||||
@@ -10,15 +10,6 @@ def migrate_event_data(apps, schema_editor):
|
|||||||
# that have a bigint primary key (because the old usage of an integer
|
# that have a bigint primary key (because the old usage of an integer
|
||||||
# numeric isn't enough, as its range is about 2.1B, see:
|
# numeric isn't enough, as its range is about 2.1B, see:
|
||||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||||
|
|
||||||
# unfortunately, we can't do this with a simple ALTER TABLE, because
|
|
||||||
# for tables with hundreds of millions or billions of rows, the ALTER TABLE
|
|
||||||
# can take *hours* on modest hardware.
|
|
||||||
#
|
|
||||||
# the approach in this migration means that post-migration, event data will
|
|
||||||
# *not* immediately show up, but will be repopulated over time progressively
|
|
||||||
# the trade-off here is not having to wait hours for the full data migration
|
|
||||||
# before you can start and run AWX again (including new playbook runs)
|
|
||||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||||
with connection.cursor() as cursor:
|
with connection.cursor() as cursor:
|
||||||
# rename the current event table
|
# rename the current event table
|
||||||
@@ -35,30 +26,7 @@ def migrate_event_data(apps, schema_editor):
|
|||||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||||
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
||||||
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
||||||
|
cursor.execute(f'DROP TABLE _old_{tblname};')
|
||||||
# replace the BTREE index on main_jobevent.job_id with
|
|
||||||
# a BRIN index to drastically improve per-UJ lookup performance
|
|
||||||
# see: https://info.crunchydata.com/blog/postgresql-brin-indexes-big-data-performance-with-minimal-storage
|
|
||||||
if tblname == 'main_jobevent':
|
|
||||||
cursor.execute("SELECT indexname FROM pg_indexes WHERE tablename='main_jobevent' AND indexdef LIKE '%USING btree (job_id)';")
|
|
||||||
old_index = cursor.fetchone()[0]
|
|
||||||
cursor.execute(f'DROP INDEX {old_index}')
|
|
||||||
cursor.execute('CREATE INDEX main_jobevent_job_id_brin_idx ON main_jobevent USING brin (job_id);')
|
|
||||||
|
|
||||||
# remove all of the indexes and constraints from the old table
|
|
||||||
# (they just slow down the data migration)
|
|
||||||
cursor.execute(f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename='_old_{tblname}' AND indexname != '{tblname}_pkey';")
|
|
||||||
indexes = cursor.fetchall()
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
f"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = '_old_{tblname}'::regclass AND conname != '{tblname}_pkey';"
|
|
||||||
)
|
|
||||||
constraints = cursor.fetchall()
|
|
||||||
|
|
||||||
for indexname, indexdef in indexes:
|
|
||||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
|
||||||
for conname, contype, condef in constraints:
|
|
||||||
cursor.execute(f'ALTER TABLE _old_{tblname} DROP CONSTRAINT IF EXISTS {conname}')
|
|
||||||
|
|
||||||
|
|
||||||
class FakeAlterField(migrations.AlterField):
|
class FakeAlterField(migrations.AlterField):
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ class Migration(migrations.Migration):
|
|||||||
field=models.CharField(
|
field=models.CharField(
|
||||||
choices=[
|
choices=[
|
||||||
('always', 'Always pull container before running.'),
|
('always', 'Always pull container before running.'),
|
||||||
('missing', 'No pull option has been selected.'),
|
('missing', 'Only pull the image if not present before running.'),
|
||||||
('never', 'Never pull container before running.'),
|
('never', 'Never pull container before running.'),
|
||||||
],
|
],
|
||||||
blank=True,
|
blank=True,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from awx.main.utils.common import set_current_apps
|
|||||||
|
|
||||||
def setup_tower_managed_defaults(apps, schema_editor):
|
def setup_tower_managed_defaults(apps, schema_editor):
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
CredentialType.setup_tower_managed_defaults()
|
CredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
|||||||
@@ -1,15 +1,33 @@
|
|||||||
# Generated by Django 2.2.16 on 2021-04-21 15:02
|
# Generated by Django 2.2.16 on 2021-04-21 15:02
|
||||||
|
|
||||||
from django.db import migrations, models
|
from django.db import migrations, models, transaction
|
||||||
|
|
||||||
|
|
||||||
|
def remove_iso_instances(apps, schema_editor):
|
||||||
|
Instance = apps.get_model('main', 'Instance')
|
||||||
|
with transaction.atomic():
|
||||||
|
Instance.objects.filter(rampart_groups__controller__isnull=False).delete()
|
||||||
|
|
||||||
|
|
||||||
|
def remove_iso_groups(apps, schema_editor):
|
||||||
|
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||||
|
UnifiedJob = apps.get_model('main', 'UnifiedJob')
|
||||||
|
with transaction.atomic():
|
||||||
|
for ig in InstanceGroup.objects.filter(controller__isnull=False):
|
||||||
|
UnifiedJob.objects.filter(instance_group=ig).update(instance_group=None)
|
||||||
|
ig.delete()
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
|
atomic = False
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
('main', '0138_custom_inventory_scripts_removal'),
|
('main', '0138_custom_inventory_scripts_removal'),
|
||||||
]
|
]
|
||||||
|
|
||||||
operations = [
|
operations = [
|
||||||
|
migrations.RunPython(remove_iso_instances),
|
||||||
|
migrations.RunPython(remove_iso_groups),
|
||||||
migrations.RemoveField(
|
migrations.RemoveField(
|
||||||
model_name='instance',
|
model_name='instance',
|
||||||
name='last_isolated_check',
|
name='last_isolated_check',
|
||||||
|
|||||||
19
awx/main/migrations/0141_remove_isolated_instances.py
Normal file
19
awx/main/migrations/0141_remove_isolated_instances.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-05-11 19:38
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
def forwards(apps, schema_editor):
|
||||||
|
Instance = apps.get_model('main', 'Instance')
|
||||||
|
Instance.objects.filter(version__startswith='ansible-runner-').delete()
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0140_rename'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RunPython(forwards),
|
||||||
|
]
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-05-12 20:08
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0141_remove_isolated_instances'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='executionenvironment',
|
||||||
|
name='image',
|
||||||
|
field=models.CharField(
|
||||||
|
help_text='The full image location, including the container registry, image name, and version tag.',
|
||||||
|
max_length=1024,
|
||||||
|
verbose_name='image location',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
21
awx/main/migrations/0143_hostmetric.py
Normal file
21
awx/main/migrations/0143_hostmetric.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-05-18 18:08
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0142_update_ee_image_field_description'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='HostMetric',
|
||||||
|
fields=[
|
||||||
|
('hostname', models.CharField(max_length=512, primary_key=True, serialize=False)),
|
||||||
|
('first_automation', models.DateTimeField(auto_now_add=True, db_index=True, help_text='When the host was first automated against')),
|
||||||
|
('last_automation', models.DateTimeField(db_index=True, help_text='When the host was last automated against')),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]
|
||||||
268
awx/main/migrations/0144_event_partitions.py
Normal file
268
awx/main/migrations/0144_event_partitions.py
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
from django.db import migrations, models, connection
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_event_data(apps, schema_editor):
|
||||||
|
# see: https://github.com/ansible/awx/issues/9039
|
||||||
|
#
|
||||||
|
# the goal of this function is -- for each job event table -- to:
|
||||||
|
# - create a parent partition table
|
||||||
|
# - .. with a single partition
|
||||||
|
# - .. that includes all existing job events
|
||||||
|
#
|
||||||
|
# the new main_jobevent_parent table should have a new
|
||||||
|
# denormalized column, job_created, this is used as a
|
||||||
|
# basis for partitioning job event rows
|
||||||
|
#
|
||||||
|
# The initial partion will be a unique case. After
|
||||||
|
# the migration is completed, awx should create
|
||||||
|
# new partitions on an hourly basis, as needed.
|
||||||
|
# All events for a given job should be placed in
|
||||||
|
# a partition based on the job's _created time_.
|
||||||
|
|
||||||
|
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
# mark existing table as _unpartitioned_*
|
||||||
|
# we will drop this table after its data
|
||||||
|
# has been moved over
|
||||||
|
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _unpartitioned_{tblname}')
|
||||||
|
|
||||||
|
# create a copy of the table that we will use as a reference for schema
|
||||||
|
# otherwise, the schema changes we would make on the old jobevents table
|
||||||
|
# (namely, dropping the primary key constraint) would cause the migration
|
||||||
|
# to suffer a serious performance degradation
|
||||||
|
cursor.execute(f'CREATE TABLE tmp_{tblname} ' f'(LIKE _unpartitioned_{tblname} INCLUDING ALL)')
|
||||||
|
|
||||||
|
# drop primary key constraint; in a partioned table
|
||||||
|
# constraints must include the partition key itself
|
||||||
|
# TODO: do more generic search for pkey constraints
|
||||||
|
# instead of hardcoding this one that applies to main_jobevent
|
||||||
|
cursor.execute(f'ALTER TABLE tmp_{tblname} DROP CONSTRAINT tmp_{tblname}_pkey')
|
||||||
|
|
||||||
|
# create parent table
|
||||||
|
cursor.execute(
|
||||||
|
f'CREATE TABLE {tblname} '
|
||||||
|
f'(LIKE tmp_{tblname} INCLUDING ALL, job_created TIMESTAMP WITH TIME ZONE NOT NULL) '
|
||||||
|
f'PARTITION BY RANGE(job_created);'
|
||||||
|
)
|
||||||
|
|
||||||
|
cursor.execute(f'DROP TABLE tmp_{tblname}')
|
||||||
|
|
||||||
|
# recreate primary key constraint
|
||||||
|
cursor.execute(f'ALTER TABLE ONLY {tblname} ' f'ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
|
||||||
|
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
"""
|
||||||
|
Big int migration introduced the brin index main_jobevent_job_id_brin_idx index. For upgardes, we drop the index, new installs do nothing.
|
||||||
|
I have seen the second index in my dev environment. I can not find where in the code it was created. Drop it just in case
|
||||||
|
"""
|
||||||
|
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_brin_idx')
|
||||||
|
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||||
|
|
||||||
|
|
||||||
|
class FakeAddField(migrations.AddField):
|
||||||
|
def database_forwards(self, *args):
|
||||||
|
# this is intentionally left blank, because we're
|
||||||
|
# going to accomplish the migration with some custom raw SQL
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0143_hostmetric'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RunPython(migrate_event_data),
|
||||||
|
FakeAddField(
|
||||||
|
model_name='jobevent',
|
||||||
|
name='job_created',
|
||||||
|
field=models.DateTimeField(null=True, editable=False),
|
||||||
|
),
|
||||||
|
FakeAddField(
|
||||||
|
model_name='inventoryupdateevent',
|
||||||
|
name='job_created',
|
||||||
|
field=models.DateTimeField(null=True, editable=False),
|
||||||
|
),
|
||||||
|
FakeAddField(
|
||||||
|
model_name='projectupdateevent',
|
||||||
|
name='job_created',
|
||||||
|
field=models.DateTimeField(null=True, editable=False),
|
||||||
|
),
|
||||||
|
FakeAddField(
|
||||||
|
model_name='adhoccommandevent',
|
||||||
|
name='job_created',
|
||||||
|
field=models.DateTimeField(null=True, editable=False),
|
||||||
|
),
|
||||||
|
FakeAddField(
|
||||||
|
model_name='systemjobevent',
|
||||||
|
name='job_created',
|
||||||
|
field=models.DateTimeField(null=True, editable=False),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='UnpartitionedAdHocCommandEvent',
|
||||||
|
fields=[],
|
||||||
|
options={
|
||||||
|
'proxy': True,
|
||||||
|
'indexes': [],
|
||||||
|
'constraints': [],
|
||||||
|
},
|
||||||
|
bases=('main.adhoccommandevent',),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='UnpartitionedInventoryUpdateEvent',
|
||||||
|
fields=[],
|
||||||
|
options={
|
||||||
|
'proxy': True,
|
||||||
|
'indexes': [],
|
||||||
|
'constraints': [],
|
||||||
|
},
|
||||||
|
bases=('main.inventoryupdateevent',),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='UnpartitionedJobEvent',
|
||||||
|
fields=[],
|
||||||
|
options={
|
||||||
|
'proxy': True,
|
||||||
|
'indexes': [],
|
||||||
|
'constraints': [],
|
||||||
|
},
|
||||||
|
bases=('main.jobevent',),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='UnpartitionedProjectUpdateEvent',
|
||||||
|
fields=[],
|
||||||
|
options={
|
||||||
|
'proxy': True,
|
||||||
|
'indexes': [],
|
||||||
|
'constraints': [],
|
||||||
|
},
|
||||||
|
bases=('main.projectupdateevent',),
|
||||||
|
),
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='UnpartitionedSystemJobEvent',
|
||||||
|
fields=[],
|
||||||
|
options={
|
||||||
|
'proxy': True,
|
||||||
|
'indexes': [],
|
||||||
|
'constraints': [],
|
||||||
|
},
|
||||||
|
bases=('main.systemjobevent',),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='adhoccommandevent',
|
||||||
|
name='ad_hoc_command',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='ad_hoc_command_events', to='main.AdHocCommand'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='adhoccommandevent',
|
||||||
|
name='created',
|
||||||
|
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='adhoccommandevent',
|
||||||
|
name='modified',
|
||||||
|
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdateevent',
|
||||||
|
name='created',
|
||||||
|
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdateevent',
|
||||||
|
name='inventory_update',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='inventory_update_events', to='main.InventoryUpdate'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdateevent',
|
||||||
|
name='modified',
|
||||||
|
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='jobevent',
|
||||||
|
name='created',
|
||||||
|
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='jobevent',
|
||||||
|
name='job',
|
||||||
|
field=models.ForeignKey(db_index=False, editable=False, null=True, on_delete=models.deletion.DO_NOTHING, related_name='job_events', to='main.Job'),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='jobevent',
|
||||||
|
name='modified',
|
||||||
|
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='projectupdateevent',
|
||||||
|
name='created',
|
||||||
|
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='projectupdateevent',
|
||||||
|
name='modified',
|
||||||
|
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='projectupdateevent',
|
||||||
|
name='project_update',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='project_update_events', to='main.ProjectUpdate'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='systemjobevent',
|
||||||
|
name='created',
|
||||||
|
field=models.DateTimeField(default=None, editable=False, null=True),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='systemjobevent',
|
||||||
|
name='modified',
|
||||||
|
field=models.DateTimeField(db_index=True, default=None, editable=False),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='systemjobevent',
|
||||||
|
name='system_job',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
db_index=False, editable=False, on_delete=models.deletion.DO_NOTHING, related_name='system_job_events', to='main.SystemJob'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterIndexTogether(
|
||||||
|
name='adhoccommandevent',
|
||||||
|
index_together={
|
||||||
|
('ad_hoc_command', 'job_created', 'event'),
|
||||||
|
('ad_hoc_command', 'job_created', 'counter'),
|
||||||
|
('ad_hoc_command', 'job_created', 'uuid'),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
migrations.AlterIndexTogether(
|
||||||
|
name='inventoryupdateevent',
|
||||||
|
index_together={('inventory_update', 'job_created', 'counter'), ('inventory_update', 'job_created', 'uuid')},
|
||||||
|
),
|
||||||
|
migrations.AlterIndexTogether(
|
||||||
|
name='jobevent',
|
||||||
|
index_together={
|
||||||
|
('job', 'job_created', 'counter'),
|
||||||
|
('job', 'job_created', 'uuid'),
|
||||||
|
('job', 'job_created', 'event'),
|
||||||
|
('job', 'job_created', 'parent_uuid'),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
migrations.AlterIndexTogether(
|
||||||
|
name='projectupdateevent',
|
||||||
|
index_together={
|
||||||
|
('project_update', 'job_created', 'uuid'),
|
||||||
|
('project_update', 'job_created', 'event'),
|
||||||
|
('project_update', 'job_created', 'counter'),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
migrations.AlterIndexTogether(
|
||||||
|
name='systemjobevent',
|
||||||
|
index_together={('system_job', 'job_created', 'uuid'), ('system_job', 'job_created', 'counter')},
|
||||||
|
),
|
||||||
|
]
|
||||||
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
21
awx/main/migrations/0145_deregister_managed_ee_objs.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-07 19:36
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
def forwards(apps, schema_editor):
|
||||||
|
ExecutionEnvironment = apps.get_model('main', 'ExecutionEnvironment')
|
||||||
|
for row in ExecutionEnvironment.objects.filter(managed_by_tower=True):
|
||||||
|
row.managed_by_tower = False
|
||||||
|
row.save(update_fields=['managed_by_tower'])
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0144_event_partitions'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RunPython(forwards),
|
||||||
|
]
|
||||||
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
59
awx/main/migrations/0146_add_insights_inventory.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-08 18:59
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0145_deregister_managed_ee_objs'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='host',
|
||||||
|
name='insights_system_id',
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventorysource',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('tower', 'Ansible Tower'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdate',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('tower', 'Ansible Tower'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
24
awx/main/migrations/0147_validate_ee_image_field.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-15 02:49
|
||||||
|
|
||||||
|
import awx.main.validators
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0146_add_insights_inventory'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='executionenvironment',
|
||||||
|
name='image',
|
||||||
|
field=models.CharField(
|
||||||
|
help_text='The full image location, including the container registry, image name, and version tag.',
|
||||||
|
max_length=1024,
|
||||||
|
validators=[awx.main.validators.validate_container_image_name],
|
||||||
|
verbose_name='image location',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
20
awx/main/migrations/0148_unifiedjob_receptor_unit_id.py
Normal file
20
awx/main/migrations/0148_unifiedjob_receptor_unit_id.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-11 04:50
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0147_validate_ee_image_field'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='unifiedjob',
|
||||||
|
name='work_unit_id',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True, default=None, editable=False, help_text='The Receptor work unit ID associated with this job.', max_length=255, null=True
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-16 21:00
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0148_unifiedjob_receptor_unit_id'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='inventory',
|
||||||
|
name='insights_credential',
|
||||||
|
),
|
||||||
|
]
|
||||||
113
awx/main/migrations/0150_rename_inv_sources_inv_updates.py
Normal file
113
awx/main/migrations/0150_rename_inv_sources_inv_updates.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-17 13:12
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
from awx.main.models.credential import ManagedCredentialType, CredentialType as ModernCredentialType
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def forwards(apps, schema_editor):
|
||||||
|
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||||
|
InventorySource = apps.get_model('main', 'InventorySource')
|
||||||
|
|
||||||
|
r = InventoryUpdate.objects.filter(source='tower').update(source='controller')
|
||||||
|
if r:
|
||||||
|
logger.warn(f'Renamed {r} tower inventory updates to controller')
|
||||||
|
InventorySource.objects.filter(source='tower').update(source='controller')
|
||||||
|
if r:
|
||||||
|
logger.warn(f'Renamed {r} tower inventory sources to controller')
|
||||||
|
|
||||||
|
CredentialType = apps.get_model('main', 'CredentialType')
|
||||||
|
|
||||||
|
tower_type = CredentialType.objects.filter(managed_by_tower=True, namespace='tower').first()
|
||||||
|
if tower_type is not None:
|
||||||
|
controller_type = CredentialType.objects.filter(managed_by_tower=True, namespace='controller', kind='cloud').first()
|
||||||
|
if controller_type:
|
||||||
|
# this gets created by prior migrations in upgrade scenarios
|
||||||
|
controller_type.delete()
|
||||||
|
|
||||||
|
registry_type = ManagedCredentialType.registry.get('controller')
|
||||||
|
if not registry_type:
|
||||||
|
raise RuntimeError('Excpected to find controller credential, this may need to be edited in the future!')
|
||||||
|
logger.warn('Renaming the Ansible Tower credential type for existing install')
|
||||||
|
tower_type.name = registry_type.name # sensitive to translations
|
||||||
|
tower_type.namespace = 'controller' # if not done, will error setup_tower_managed_defaults
|
||||||
|
tower_type.save(update_fields=['name', 'namespace'])
|
||||||
|
|
||||||
|
ModernCredentialType.setup_tower_managed_defaults(apps)
|
||||||
|
|
||||||
|
|
||||||
|
def backwards(apps, schema_editor):
|
||||||
|
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||||
|
InventorySource = apps.get_model('main', 'InventorySource')
|
||||||
|
|
||||||
|
r = InventoryUpdate.objects.filter(source='controller').update(source='tower')
|
||||||
|
if r:
|
||||||
|
logger.warn(f'Renamed {r} controller inventory updates to tower')
|
||||||
|
r = InventorySource.objects.filter(source='controller').update(source='tower')
|
||||||
|
if r:
|
||||||
|
logger.warn(f'Renamed {r} controller inventory sources to tower')
|
||||||
|
|
||||||
|
CredentialType = apps.get_model('main', 'CredentialType')
|
||||||
|
|
||||||
|
tower_type = CredentialType.objects.filter(managed_by_tower=True, namespace='controller', kind='cloud').first()
|
||||||
|
if tower_type is not None and not CredentialType.objects.filter(managed_by_tower=True, namespace='tower').exists():
|
||||||
|
logger.info('Renaming the controller credential type back')
|
||||||
|
tower_type.namespace = 'tower'
|
||||||
|
tower_type.name = 'Ansible Tower'
|
||||||
|
tower_type.save(update_fields=['namespace', 'name'])
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0149_remove_inventory_insights_credential'),
|
||||||
|
]
|
||||||
|
operations = [
|
||||||
|
migrations.RunPython(migrations.RunPython.noop, backwards),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventorysource',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('controller', 'Red Hat Ansible Automation Platform'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdate',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[
|
||||||
|
('file', 'File, Directory or Script'),
|
||||||
|
('scm', 'Sourced from a Project'),
|
||||||
|
('ec2', 'Amazon EC2'),
|
||||||
|
('gce', 'Google Compute Engine'),
|
||||||
|
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||||
|
('vmware', 'VMware vCenter'),
|
||||||
|
('satellite6', 'Red Hat Satellite 6'),
|
||||||
|
('openstack', 'OpenStack'),
|
||||||
|
('rhv', 'Red Hat Virtualization'),
|
||||||
|
('controller', 'Red Hat Ansible Automation Platform'),
|
||||||
|
('insights', 'Red Hat Insights'),
|
||||||
|
],
|
||||||
|
default=None,
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.RunPython(forwards, migrations.RunPython.noop),
|
||||||
|
]
|
||||||
28
awx/main/migrations/0151_rename_managed_by_tower.py
Normal file
28
awx/main/migrations/0151_rename_managed_by_tower.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Generated by Django 2.2.16 on 2021-06-17 18:32
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0150_rename_inv_sources_inv_updates'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RenameField(
|
||||||
|
model_name='credential',
|
||||||
|
old_name='managed_by_tower',
|
||||||
|
new_name='managed',
|
||||||
|
),
|
||||||
|
migrations.RenameField(
|
||||||
|
model_name='credentialtype',
|
||||||
|
old_name='managed_by_tower',
|
||||||
|
new_name='managed',
|
||||||
|
),
|
||||||
|
migrations.RenameField(
|
||||||
|
model_name='executionenvironment',
|
||||||
|
old_name='managed_by_tower',
|
||||||
|
new_name='managed',
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -19,7 +19,7 @@ def migrate_galaxy_settings(apps, schema_editor):
|
|||||||
# nothing to migrate
|
# nothing to migrate
|
||||||
return
|
return
|
||||||
set_current_apps(apps)
|
set_current_apps(apps)
|
||||||
ModernCredentialType.setup_tower_managed_defaults()
|
ModernCredentialType.setup_tower_managed_defaults(apps)
|
||||||
CredentialType = apps.get_model('main', 'CredentialType')
|
CredentialType = apps.get_model('main', 'CredentialType')
|
||||||
Credential = apps.get_model('main', 'Credential')
|
Credential = apps.get_model('main', 'Credential')
|
||||||
Setting = apps.get_model('conf', 'Setting')
|
Setting = apps.get_model('conf', 'Setting')
|
||||||
@@ -34,10 +34,21 @@ def migrate_galaxy_settings(apps, schema_editor):
|
|||||||
if public_galaxy_setting and public_galaxy_setting.value is False:
|
if public_galaxy_setting and public_galaxy_setting.value is False:
|
||||||
# ...UNLESS this behavior was explicitly disabled via this setting
|
# ...UNLESS this behavior was explicitly disabled via this setting
|
||||||
public_galaxy_enabled = False
|
public_galaxy_enabled = False
|
||||||
|
try:
|
||||||
public_galaxy_credential = Credential(
|
# Needed for old migrations
|
||||||
created=now(), modified=now(), name='Ansible Galaxy', managed_by_tower=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}
|
public_galaxy_credential = Credential(
|
||||||
)
|
created=now(),
|
||||||
|
modified=now(),
|
||||||
|
name='Ansible Galaxy',
|
||||||
|
managed_by_tower=True,
|
||||||
|
credential_type=galaxy_type,
|
||||||
|
inputs={'url': 'https://galaxy.ansible.com/'},
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
# Needed for new migrations, tests
|
||||||
|
public_galaxy_credential = Credential(
|
||||||
|
created=now(), modified=now(), name='Ansible Galaxy', managed=True, credential_type=galaxy_type, inputs={'url': 'https://galaxy.ansible.com/'}
|
||||||
|
)
|
||||||
public_galaxy_credential.save()
|
public_galaxy_credential.save()
|
||||||
|
|
||||||
for org in Organization.objects.all():
|
for org in Organization.objects.all():
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings # noqa
|
from django.conf import settings # noqa
|
||||||
from django.db import connection
|
|
||||||
from django.db.models.signals import pre_delete # noqa
|
from django.db.models.signals import pre_delete # noqa
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -12,7 +11,16 @@ from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutM
|
|||||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||||
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
||||||
from awx.main.models.inventory import Group, Host, Inventory, InventorySource, InventoryUpdate, SmartInventoryMembership # noqa
|
from awx.main.models.inventory import ( # noqa
|
||||||
|
CustomInventoryScript,
|
||||||
|
Group,
|
||||||
|
Host,
|
||||||
|
HostMetric,
|
||||||
|
Inventory,
|
||||||
|
InventorySource,
|
||||||
|
InventoryUpdate,
|
||||||
|
SmartInventoryMembership,
|
||||||
|
)
|
||||||
from awx.main.models.jobs import ( # noqa
|
from awx.main.models.jobs import ( # noqa
|
||||||
Job,
|
Job,
|
||||||
JobHostSummary,
|
JobHostSummary,
|
||||||
@@ -27,6 +35,11 @@ from awx.main.models.events import ( # noqa
|
|||||||
JobEvent,
|
JobEvent,
|
||||||
ProjectUpdateEvent,
|
ProjectUpdateEvent,
|
||||||
SystemJobEvent,
|
SystemJobEvent,
|
||||||
|
UnpartitionedAdHocCommandEvent,
|
||||||
|
UnpartitionedInventoryUpdateEvent,
|
||||||
|
UnpartitionedJobEvent,
|
||||||
|
UnpartitionedProjectUpdateEvent,
|
||||||
|
UnpartitionedSystemJobEvent,
|
||||||
)
|
)
|
||||||
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
||||||
from awx.main.models.schedules import Schedule # noqa
|
from awx.main.models.schedules import Schedule # noqa
|
||||||
@@ -83,27 +96,6 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
|||||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||||
|
|
||||||
|
|
||||||
def enforce_bigint_pk_migration():
|
|
||||||
#
|
|
||||||
# NOTE: this function is not actually in use anymore,
|
|
||||||
# but has been intentionally kept for historical purposes,
|
|
||||||
# and to serve as an illustration if we ever need to perform
|
|
||||||
# bulk modification/migration of event data in the future.
|
|
||||||
#
|
|
||||||
# see: https://github.com/ansible/awx/issues/6010
|
|
||||||
# look at all the event tables and verify that they have been fully migrated
|
|
||||||
# from the *old* int primary key table to the replacement bigint table
|
|
||||||
# if not, attempt to migrate them in the background
|
|
||||||
#
|
|
||||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
|
||||||
with connection.cursor() as cursor:
|
|
||||||
cursor.execute('SELECT 1 FROM information_schema.tables WHERE table_name=%s', (f'_old_{tblname}',))
|
|
||||||
if bool(cursor.rowcount):
|
|
||||||
from awx.main.tasks import migrate_legacy_event_data
|
|
||||||
|
|
||||||
migrate_legacy_event_data.apply_async([tblname])
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_created_modified_by(sender, **kwargs):
|
def cleanup_created_modified_by(sender, **kwargs):
|
||||||
# work around a bug in django-polymorphic that doesn't properly
|
# work around a bug in django-polymorphic that doesn't properly
|
||||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ from django.core.exceptions import ValidationError
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||||
from awx.main.models.events import AdHocCommandEvent
|
from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent
|
||||||
from awx.main.models.unified_jobs import UnifiedJob
|
from awx.main.models.unified_jobs import UnifiedJob
|
||||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||||
|
|
||||||
@@ -127,6 +127,8 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
return UnpartitionedAdHocCommandEvent
|
||||||
return AdHocCommandEvent
|
return AdHocCommandEvent
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -215,9 +217,6 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
|||||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||||
if 'name' not in update_fields:
|
if 'name' not in update_fields:
|
||||||
update_fields.append('name')
|
update_fields.append('name')
|
||||||
if not self.execution_environment_id:
|
|
||||||
self.execution_environment = self.resolve_execution_environment()
|
|
||||||
update_fields.append('execution_environment')
|
|
||||||
super(AdHocCommand, self).save(*args, **kwargs)
|
super(AdHocCommand, self).save(*args, **kwargs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from django.utils.translation import ugettext_lazy as _, ugettext_noop
|
|||||||
from django.core.exceptions import ValidationError
|
from django.core.exceptions import ValidationError
|
||||||
from django.utils.encoding import force_text
|
from django.utils.encoding import force_text
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
|
from django.utils.timezone import now
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -31,6 +32,7 @@ from awx.main.fields import (
|
|||||||
)
|
)
|
||||||
from awx.main.utils import decrypt_field, classproperty
|
from awx.main.utils import decrypt_field, classproperty
|
||||||
from awx.main.utils.safe_yaml import safe_dump
|
from awx.main.utils.safe_yaml import safe_dump
|
||||||
|
from awx.main.utils.execution_environments import to_container_path
|
||||||
from awx.main.validators import validate_ssh_private_key
|
from awx.main.validators import validate_ssh_private_key
|
||||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, PrimordialModel
|
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, PrimordialModel
|
||||||
from awx.main.models.mixins import ResourceMixin
|
from awx.main.models.mixins import ResourceMixin
|
||||||
@@ -91,7 +93,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
|||||||
on_delete=models.CASCADE,
|
on_delete=models.CASCADE,
|
||||||
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
|
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
|
||||||
)
|
)
|
||||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
managed = models.BooleanField(default=False, editable=False)
|
||||||
organization = models.ForeignKey(
|
organization = models.ForeignKey(
|
||||||
'Organization',
|
'Organization',
|
||||||
null=True,
|
null=True,
|
||||||
@@ -340,7 +342,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
)
|
)
|
||||||
|
|
||||||
kind = models.CharField(max_length=32, choices=KIND_CHOICES)
|
kind = models.CharField(max_length=32, choices=KIND_CHOICES)
|
||||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
managed = models.BooleanField(default=False, editable=False)
|
||||||
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
|
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
|
||||||
inputs = CredentialTypeInputField(
|
inputs = CredentialTypeInputField(
|
||||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||||
@@ -354,7 +356,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def from_db(cls, db, field_names, values):
|
def from_db(cls, db, field_names, values):
|
||||||
instance = super(CredentialType, cls).from_db(db, field_names, values)
|
instance = super(CredentialType, cls).from_db(db, field_names, values)
|
||||||
if instance.managed_by_tower and instance.namespace:
|
if instance.managed and instance.namespace:
|
||||||
native = ManagedCredentialType.registry[instance.namespace]
|
native = ManagedCredentialType.registry[instance.namespace]
|
||||||
instance.inputs = native.inputs
|
instance.inputs = native.inputs
|
||||||
instance.injectors = native.injectors
|
instance.injectors = native.injectors
|
||||||
@@ -394,9 +396,13 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
return dict((k, functools.partial(v.create)) for k, v in ManagedCredentialType.registry.items())
|
return dict((k, functools.partial(v.create)) for k, v in ManagedCredentialType.registry.items())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_tower_managed_defaults(cls):
|
def setup_tower_managed_defaults(cls, apps=None):
|
||||||
|
if apps is not None:
|
||||||
|
ct_class = apps.get_model('main', 'CredentialType')
|
||||||
|
else:
|
||||||
|
ct_class = CredentialType
|
||||||
for default in ManagedCredentialType.registry.values():
|
for default in ManagedCredentialType.registry.values():
|
||||||
existing = CredentialType.objects.filter(name=default.name, kind=default.kind).first()
|
existing = ct_class.objects.filter(name=default.name, kind=default.kind).first()
|
||||||
if existing is not None:
|
if existing is not None:
|
||||||
existing.namespace = default.namespace
|
existing.namespace = default.namespace
|
||||||
existing.inputs = {}
|
existing.inputs = {}
|
||||||
@@ -404,7 +410,11 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
existing.save()
|
existing.save()
|
||||||
continue
|
continue
|
||||||
logger.debug(_("adding %s credential type" % default.name))
|
logger.debug(_("adding %s credential type" % default.name))
|
||||||
created = default.create()
|
params = default.get_creation_params()
|
||||||
|
if 'managed' not in [f.name for f in ct_class._meta.get_fields()]:
|
||||||
|
params['managed_by_tower'] = params.pop('managed')
|
||||||
|
params['created'] = params['modified'] = now() # CreatedModifiedModel service
|
||||||
|
created = ct_class(**params)
|
||||||
created.inputs = created.injectors = {}
|
created.inputs = created.injectors = {}
|
||||||
created.save()
|
created.save()
|
||||||
|
|
||||||
@@ -438,7 +448,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
files)
|
files)
|
||||||
"""
|
"""
|
||||||
if not self.injectors:
|
if not self.injectors:
|
||||||
if self.managed_by_tower and credential.credential_type.namespace in dir(builtin_injectors):
|
if self.managed and credential.credential_type.namespace in dir(builtin_injectors):
|
||||||
injected_env = {}
|
injected_env = {}
|
||||||
getattr(builtin_injectors, credential.credential_type.namespace)(credential, injected_env, private_data_dir)
|
getattr(builtin_injectors, credential.credential_type.namespace)(credential, injected_env, private_data_dir)
|
||||||
env.update(injected_env)
|
env.update(injected_env)
|
||||||
@@ -493,12 +503,11 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
|
|
||||||
for file_label, file_tmpl in file_tmpls.items():
|
for file_label, file_tmpl in file_tmpls.items():
|
||||||
data = sandbox_env.from_string(file_tmpl).render(**namespace)
|
data = sandbox_env.from_string(file_tmpl).render(**namespace)
|
||||||
_, path = tempfile.mkstemp(dir=private_data_dir)
|
_, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
with open(path, 'w') as f:
|
with open(path, 'w') as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||||
# FIXME: develop some better means of referencing paths inside containers
|
container_path = to_container_path(path, private_data_dir)
|
||||||
container_path = os.path.join('/runner', os.path.basename(path))
|
|
||||||
|
|
||||||
# determine if filename indicates single file or many
|
# determine if filename indicates single file or many
|
||||||
if file_label.find('.') == -1:
|
if file_label.find('.') == -1:
|
||||||
@@ -526,7 +535,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
extra_vars[var_name] = sandbox_env.from_string(tmpl).render(**namespace)
|
extra_vars[var_name] = sandbox_env.from_string(tmpl).render(**namespace)
|
||||||
|
|
||||||
def build_extra_vars_file(vars, private_dir):
|
def build_extra_vars_file(vars, private_dir):
|
||||||
handle, path = tempfile.mkstemp(dir=private_dir)
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_dir, 'env'))
|
||||||
f = os.fdopen(handle, 'w')
|
f = os.fdopen(handle, 'w')
|
||||||
f.write(safe_dump(vars))
|
f.write(safe_dump(vars))
|
||||||
f.close()
|
f.close()
|
||||||
@@ -535,8 +544,7 @@ class CredentialType(CommonModelNameNotUnique):
|
|||||||
|
|
||||||
if extra_vars:
|
if extra_vars:
|
||||||
path = build_extra_vars_file(extra_vars, private_data_dir)
|
path = build_extra_vars_file(extra_vars, private_data_dir)
|
||||||
# FIXME: develop some better means of referencing paths inside containers
|
container_path = to_container_path(path, private_data_dir)
|
||||||
container_path = os.path.join('/runner', os.path.basename(path))
|
|
||||||
args.extend(['-e', '@%s' % container_path])
|
args.extend(['-e', '@%s' % container_path])
|
||||||
|
|
||||||
|
|
||||||
@@ -557,16 +565,19 @@ class ManagedCredentialType(SimpleNamespace):
|
|||||||
)
|
)
|
||||||
ManagedCredentialType.registry[namespace] = self
|
ManagedCredentialType.registry[namespace] = self
|
||||||
|
|
||||||
def create(self):
|
def get_creation_params(self):
|
||||||
return CredentialType(
|
return dict(
|
||||||
namespace=self.namespace,
|
namespace=self.namespace,
|
||||||
kind=self.kind,
|
kind=self.kind,
|
||||||
name=self.name,
|
name=self.name,
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs=self.inputs,
|
inputs=self.inputs,
|
||||||
injectors=self.injectors,
|
injectors=self.injectors,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
return CredentialType(**self.get_creation_params())
|
||||||
|
|
||||||
|
|
||||||
ManagedCredentialType(
|
ManagedCredentialType(
|
||||||
namespace='ssh',
|
namespace='ssh',
|
||||||
@@ -607,7 +618,7 @@ ManagedCredentialType(
|
|||||||
namespace='scm',
|
namespace='scm',
|
||||||
kind='scm',
|
kind='scm',
|
||||||
name=ugettext_noop('Source Control'),
|
name=ugettext_noop('Source Control'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||||
@@ -622,7 +633,7 @@ ManagedCredentialType(
|
|||||||
namespace='vault',
|
namespace='vault',
|
||||||
kind='vault',
|
kind='vault',
|
||||||
name=ugettext_noop('Vault'),
|
name=ugettext_noop('Vault'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'vault_password', 'label': ugettext_noop('Vault Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
|
{'id': 'vault_password', 'label': ugettext_noop('Vault Password'), 'type': 'string', 'secret': True, 'ask_at_runtime': True},
|
||||||
@@ -648,7 +659,7 @@ ManagedCredentialType(
|
|||||||
namespace='net',
|
namespace='net',
|
||||||
kind='net',
|
kind='net',
|
||||||
name=ugettext_noop('Network'),
|
name=ugettext_noop('Network'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||||
@@ -688,7 +699,7 @@ ManagedCredentialType(
|
|||||||
namespace='aws',
|
namespace='aws',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Amazon Web Services'),
|
name=ugettext_noop('Amazon Web Services'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'username', 'label': ugettext_noop('Access Key'), 'type': 'string'},
|
{'id': 'username', 'label': ugettext_noop('Access Key'), 'type': 'string'},
|
||||||
@@ -719,7 +730,7 @@ ManagedCredentialType(
|
|||||||
namespace='openstack',
|
namespace='openstack',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('OpenStack'),
|
name=ugettext_noop('OpenStack'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||||
@@ -777,7 +788,7 @@ ManagedCredentialType(
|
|||||||
namespace='vmware',
|
namespace='vmware',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('VMware vCenter'),
|
name=ugettext_noop('VMware vCenter'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -802,7 +813,7 @@ ManagedCredentialType(
|
|||||||
namespace='satellite6',
|
namespace='satellite6',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Red Hat Satellite 6'),
|
name=ugettext_noop('Red Hat Satellite 6'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -827,7 +838,7 @@ ManagedCredentialType(
|
|||||||
namespace='gce',
|
namespace='gce',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Google Compute Engine'),
|
name=ugettext_noop('Google Compute Engine'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -865,7 +876,7 @@ ManagedCredentialType(
|
|||||||
namespace='azure_rm',
|
namespace='azure_rm',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Microsoft Azure Resource Manager'),
|
name=ugettext_noop('Microsoft Azure Resource Manager'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -904,7 +915,7 @@ ManagedCredentialType(
|
|||||||
namespace='github_token',
|
namespace='github_token',
|
||||||
kind='token',
|
kind='token',
|
||||||
name=ugettext_noop('GitHub Personal Access Token'),
|
name=ugettext_noop('GitHub Personal Access Token'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -923,7 +934,7 @@ ManagedCredentialType(
|
|||||||
namespace='gitlab_token',
|
namespace='gitlab_token',
|
||||||
kind='token',
|
kind='token',
|
||||||
name=ugettext_noop('GitLab Personal Access Token'),
|
name=ugettext_noop('GitLab Personal Access Token'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
@@ -942,7 +953,7 @@ ManagedCredentialType(
|
|||||||
namespace='insights',
|
namespace='insights',
|
||||||
kind='insights',
|
kind='insights',
|
||||||
name=ugettext_noop('Insights'),
|
name=ugettext_noop('Insights'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
{'id': 'username', 'label': ugettext_noop('Username'), 'type': 'string'},
|
||||||
@@ -955,6 +966,10 @@ ManagedCredentialType(
|
|||||||
"scm_username": "{{username}}",
|
"scm_username": "{{username}}",
|
||||||
"scm_password": "{{password}}",
|
"scm_password": "{{password}}",
|
||||||
},
|
},
|
||||||
|
'env': {
|
||||||
|
'INSIGHTS_USER': '{{username}}',
|
||||||
|
'INSIGHTS_PASSWORD': '{{password}}',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -962,7 +977,7 @@ ManagedCredentialType(
|
|||||||
namespace='rhv',
|
namespace='rhv',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Red Hat Virtualization'),
|
name=ugettext_noop('Red Hat Virtualization'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{'id': 'host', 'label': ugettext_noop('Host (Authentication URL)'), 'type': 'string', 'help_text': ugettext_noop('The host to authenticate with.')},
|
{'id': 'host', 'label': ugettext_noop('Host (Authentication URL)'), 'type': 'string', 'help_text': ugettext_noop('The host to authenticate with.')},
|
||||||
@@ -1003,23 +1018,25 @@ ManagedCredentialType(
|
|||||||
)
|
)
|
||||||
|
|
||||||
ManagedCredentialType(
|
ManagedCredentialType(
|
||||||
namespace='tower',
|
namespace='controller',
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
name=ugettext_noop('Ansible Tower'),
|
name=ugettext_noop('Red Hat Ansible Automation Platform'),
|
||||||
managed_by_tower=True,
|
managed=True,
|
||||||
inputs={
|
inputs={
|
||||||
'fields': [
|
'fields': [
|
||||||
{
|
{
|
||||||
'id': 'host',
|
'id': 'host',
|
||||||
'label': ugettext_noop('Ansible Tower Hostname'),
|
'label': ugettext_noop('Red Hat Ansible Automation Platform'),
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
'help_text': ugettext_noop('The Ansible Tower base URL to authenticate with.'),
|
'help_text': ugettext_noop('Red Hat Ansible Automation Platform base URL to authenticate with.'),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 'username',
|
'id': 'username',
|
||||||
'label': ugettext_noop('Username'),
|
'label': ugettext_noop('Username'),
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
'help_text': ugettext_noop('The Ansible Tower user to authenticate as.' 'This should not be set if an OAuth token is being used.'),
|
'help_text': ugettext_noop(
|
||||||
|
'Red Hat Ansible Automation Platform username id to authenticate as.' 'This should not be set if an OAuth token is being used.'
|
||||||
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 'password',
|
'id': 'password',
|
||||||
@@ -1045,6 +1062,11 @@ ManagedCredentialType(
|
|||||||
'TOWER_PASSWORD': '{{password}}',
|
'TOWER_PASSWORD': '{{password}}',
|
||||||
'TOWER_VERIFY_SSL': '{{verify_ssl}}',
|
'TOWER_VERIFY_SSL': '{{verify_ssl}}',
|
||||||
'TOWER_OAUTH_TOKEN': '{{oauth_token}}',
|
'TOWER_OAUTH_TOKEN': '{{oauth_token}}',
|
||||||
|
'CONTROLLER_HOST': '{{host}}',
|
||||||
|
'CONTROLLER_USERNAME': '{{username}}',
|
||||||
|
'CONTROLLER_PASSWORD': '{{password}}',
|
||||||
|
'CONTROLLER_VERIFY_SSL': '{{verify_ssl}}',
|
||||||
|
'CONTROLLER_OAUTH_TOKEN': '{{oauth_token}}',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import tempfile
|
|||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
from awx.main.utils.execution_environments import to_container_path
|
||||||
|
|
||||||
|
|
||||||
def aws(cred, env, private_data_dir):
|
def aws(cred, env, private_data_dir):
|
||||||
env['AWS_ACCESS_KEY_ID'] = cred.get_input('username', default='')
|
env['AWS_ACCESS_KEY_ID'] = cred.get_input('username', default='')
|
||||||
@@ -25,13 +27,14 @@ def gce(cred, env, private_data_dir):
|
|||||||
env['GCE_PROJECT'] = project
|
env['GCE_PROJECT'] = project
|
||||||
json_cred['token_uri'] = 'https://oauth2.googleapis.com/token'
|
json_cred['token_uri'] = 'https://oauth2.googleapis.com/token'
|
||||||
|
|
||||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
f = os.fdopen(handle, 'w')
|
f = os.fdopen(handle, 'w')
|
||||||
json.dump(json_cred, f, indent=2)
|
json.dump(json_cred, f, indent=2)
|
||||||
f.close()
|
f.close()
|
||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||||
env['GCE_CREDENTIALS_FILE_PATH'] = os.path.join('/runner', os.path.basename(path))
|
container_path = to_container_path(path, private_data_dir)
|
||||||
env['GCP_SERVICE_ACCOUNT_FILE'] = os.path.join('/runner', os.path.basename(path))
|
env['GCE_CREDENTIALS_FILE_PATH'] = container_path
|
||||||
|
env['GCP_SERVICE_ACCOUNT_FILE'] = container_path
|
||||||
|
|
||||||
# Handle env variables for new module types.
|
# Handle env variables for new module types.
|
||||||
# This includes gcp_compute inventory plugin and
|
# This includes gcp_compute inventory plugin and
|
||||||
@@ -96,14 +99,13 @@ def _openstack_data(cred):
|
|||||||
|
|
||||||
|
|
||||||
def openstack(cred, env, private_data_dir):
|
def openstack(cred, env, private_data_dir):
|
||||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
f = os.fdopen(handle, 'w')
|
f = os.fdopen(handle, 'w')
|
||||||
openstack_data = _openstack_data(cred)
|
openstack_data = _openstack_data(cred)
|
||||||
yaml.safe_dump(openstack_data, f, default_flow_style=False, allow_unicode=True)
|
yaml.safe_dump(openstack_data, f, default_flow_style=False, allow_unicode=True)
|
||||||
f.close()
|
f.close()
|
||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||||
# TODO: constant for container base path
|
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(path, private_data_dir)
|
||||||
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(path))
|
|
||||||
|
|
||||||
|
|
||||||
def kubernetes_bearer_token(cred, env, private_data_dir):
|
def kubernetes_bearer_token(cred, env, private_data_dir):
|
||||||
@@ -111,10 +113,10 @@ def kubernetes_bearer_token(cred, env, private_data_dir):
|
|||||||
env['K8S_AUTH_API_KEY'] = cred.get_input('bearer_token', default='')
|
env['K8S_AUTH_API_KEY'] = cred.get_input('bearer_token', default='')
|
||||||
if cred.get_input('verify_ssl') and 'ssl_ca_cert' in cred.inputs:
|
if cred.get_input('verify_ssl') and 'ssl_ca_cert' in cred.inputs:
|
||||||
env['K8S_AUTH_VERIFY_SSL'] = 'True'
|
env['K8S_AUTH_VERIFY_SSL'] = 'True'
|
||||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
with os.fdopen(handle, 'w') as f:
|
with os.fdopen(handle, 'w') as f:
|
||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||||
f.write(cred.get_input('ssl_ca_cert'))
|
f.write(cred.get_input('ssl_ca_cert'))
|
||||||
env['K8S_AUTH_SSL_CA_CERT'] = os.path.join('/runner', os.path.basename(path))
|
env['K8S_AUTH_SSL_CA_CERT'] = to_container_path(path, private_data_dir)
|
||||||
else:
|
else:
|
||||||
env['K8S_AUTH_VERIFY_SSL'] = 'False'
|
env['K8S_AUTH_VERIFY_SSL'] = 'False'
|
||||||
|
|||||||
@@ -15,7 +15,9 @@ from django.utils.encoding import force_text
|
|||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main import consumers
|
from awx.main import consumers
|
||||||
|
from awx.main.managers import DeferJobCreatedManager
|
||||||
from awx.main.fields import JSONField
|
from awx.main.fields import JSONField
|
||||||
|
from awx.main.constants import MINIMAL_EVENTS
|
||||||
from awx.main.models.base import CreatedModifiedModel
|
from awx.main.models.base import CreatedModifiedModel
|
||||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||||
|
|
||||||
@@ -56,9 +58,6 @@ def create_host_status_counts(event_data):
|
|||||||
return dict(host_status_counts)
|
return dict(host_status_counts)
|
||||||
|
|
||||||
|
|
||||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
|
||||||
|
|
||||||
|
|
||||||
def emit_event_detail(event):
|
def emit_event_detail(event):
|
||||||
if settings.UI_LIVE_UPDATES_ENABLED is False and event.event not in MINIMAL_EVENTS:
|
if settings.UI_LIVE_UPDATES_ENABLED is False and event.event not in MINIMAL_EVENTS:
|
||||||
return
|
return
|
||||||
@@ -271,6 +270,10 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
|||||||
null=True,
|
null=True,
|
||||||
default=None,
|
default=None,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
)
|
||||||
|
modified = models.DateTimeField(
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
db_index=True,
|
db_index=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -365,14 +368,24 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
|||||||
|
|
||||||
# find parent links and progagate changed=T and failed=T
|
# find parent links and progagate changed=T and failed=T
|
||||||
changed = (
|
changed = (
|
||||||
job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
job.get_event_queryset()
|
||||||
|
.filter(changed=True)
|
||||||
|
.exclude(parent_uuid=None)
|
||||||
|
.only('parent_uuid')
|
||||||
|
.values_list('parent_uuid', flat=True)
|
||||||
|
.distinct()
|
||||||
) # noqa
|
) # noqa
|
||||||
failed = (
|
failed = (
|
||||||
job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct()
|
job.get_event_queryset()
|
||||||
|
.filter(failed=True)
|
||||||
|
.exclude(parent_uuid=None)
|
||||||
|
.only('parent_uuid')
|
||||||
|
.values_list('parent_uuid', flat=True)
|
||||||
|
.distinct()
|
||||||
) # noqa
|
) # noqa
|
||||||
|
|
||||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=changed).update(changed=True)
|
job.get_event_queryset().filter(uuid__in=changed).update(changed=True)
|
||||||
JobEvent.objects.filter(job_id=self.job_id, uuid__in=failed).update(failed=True)
|
job.get_event_queryset().filter(uuid__in=failed).update(failed=True)
|
||||||
|
|
||||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||||
@@ -423,6 +436,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
|||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
kwargs.pop('created', None)
|
kwargs.pop('created', None)
|
||||||
|
|
||||||
|
# same as above, for job_created
|
||||||
|
# TODO: if this approach, identical to above, works, can convert to for loop
|
||||||
|
try:
|
||||||
|
if not isinstance(kwargs['job_created'], datetime.datetime):
|
||||||
|
kwargs['job_created'] = parse_datetime(kwargs['job_created'])
|
||||||
|
if not kwargs['job_created'].tzinfo:
|
||||||
|
kwargs['job_created'] = kwargs['job_created'].replace(tzinfo=utc)
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
kwargs.pop('job_created', None)
|
||||||
|
|
||||||
host_map = kwargs.pop('host_map', {})
|
host_map = kwargs.pop('host_map', {})
|
||||||
|
|
||||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||||
@@ -430,6 +453,11 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
|||||||
event = cls(**kwargs)
|
event = cls(**kwargs)
|
||||||
if workflow_job_id:
|
if workflow_job_id:
|
||||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||||
|
# shouldn't job_created _always_ be present?
|
||||||
|
# if it's not, how could we save the event to the db?
|
||||||
|
job_created = kwargs.pop('job_created', None)
|
||||||
|
if job_created:
|
||||||
|
setattr(event, 'job_created', job_created)
|
||||||
setattr(event, 'host_map', host_map)
|
setattr(event, 'host_map', host_map)
|
||||||
event._update_from_event_data()
|
event._update_from_event_data()
|
||||||
return event
|
return event
|
||||||
@@ -444,25 +472,28 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
An event/message logged from the callback when running a job.
|
An event/message logged from the callback when running a job.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id']
|
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||||
|
|
||||||
|
objects = DeferJobCreatedManager()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
ordering = ('pk',)
|
ordering = ('pk',)
|
||||||
index_together = [
|
index_together = [
|
||||||
('job', 'event'),
|
('job', 'job_created', 'event'),
|
||||||
('job', 'uuid'),
|
('job', 'job_created', 'uuid'),
|
||||||
('job', 'start_line'),
|
('job', 'job_created', 'parent_uuid'),
|
||||||
('job', 'end_line'),
|
('job', 'job_created', 'counter'),
|
||||||
('job', 'parent_uuid'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||||
job = models.ForeignKey(
|
job = models.ForeignKey(
|
||||||
'Job',
|
'Job',
|
||||||
related_name='job_events',
|
related_name='job_events',
|
||||||
on_delete=models.CASCADE,
|
null=True,
|
||||||
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_index=False,
|
||||||
)
|
)
|
||||||
host = models.ForeignKey(
|
host = models.ForeignKey(
|
||||||
'Host',
|
'Host',
|
||||||
@@ -482,6 +513,7 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
default='',
|
default='',
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
|
job_created = models.DateTimeField(null=True, editable=False)
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:job_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
@@ -510,12 +542,15 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
job = self.job
|
job = self.job
|
||||||
|
|
||||||
from awx.main.models import Host, JobHostSummary # circular import
|
from awx.main.models import Host, JobHostSummary # circular import
|
||||||
|
from awx.main.models import Host, JobHostSummary, HostMetric
|
||||||
|
|
||||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id')
|
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||||
existing_host_ids = set(h.id for h in all_hosts)
|
existing_host_ids = set(h.id for h in all_hosts)
|
||||||
|
|
||||||
summaries = dict()
|
summaries = dict()
|
||||||
|
updated_hosts_list = list()
|
||||||
for host in hostnames:
|
for host in hostnames:
|
||||||
|
updated_hosts_list.append(host.lower())
|
||||||
host_id = self.host_map.get(host, None)
|
host_id = self.host_map.get(host, None)
|
||||||
if host_id not in existing_host_ids:
|
if host_id not in existing_host_ids:
|
||||||
host_id = None
|
host_id = None
|
||||||
@@ -546,38 +581,64 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
|
|
||||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||||
|
|
||||||
|
# bulk-create
|
||||||
|
current_time = now()
|
||||||
|
HostMetric.objects.bulk_create(
|
||||||
|
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||||
|
)
|
||||||
|
HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def job_verbosity(self):
|
def job_verbosity(self):
|
||||||
return self.job.verbosity
|
return self.job.verbosity
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedJobEvent(JobEvent):
|
||||||
|
class Meta:
|
||||||
|
proxy = True
|
||||||
|
|
||||||
|
|
||||||
|
UnpartitionedJobEvent._meta.db_table = '_unpartitioned_' + JobEvent._meta.db_table # noqa
|
||||||
|
|
||||||
|
|
||||||
class ProjectUpdateEvent(BasePlaybookEvent):
|
class ProjectUpdateEvent(BasePlaybookEvent):
|
||||||
|
|
||||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id']
|
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['project_update_id', 'workflow_job_id', 'job_created']
|
||||||
|
|
||||||
|
objects = DeferJobCreatedManager()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
ordering = ('pk',)
|
ordering = ('pk',)
|
||||||
index_together = [
|
index_together = [
|
||||||
('project_update', 'event'),
|
('project_update', 'job_created', 'event'),
|
||||||
('project_update', 'uuid'),
|
('project_update', 'job_created', 'uuid'),
|
||||||
('project_update', 'start_line'),
|
('project_update', 'job_created', 'counter'),
|
||||||
('project_update', 'end_line'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||||
project_update = models.ForeignKey(
|
project_update = models.ForeignKey(
|
||||||
'ProjectUpdate',
|
'ProjectUpdate',
|
||||||
related_name='project_update_events',
|
related_name='project_update_events',
|
||||||
on_delete=models.CASCADE,
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_index=False,
|
||||||
)
|
)
|
||||||
|
job_created = models.DateTimeField(null=True, editable=False)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def host_name(self):
|
def host_name(self):
|
||||||
return 'localhost'
|
return 'localhost'
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedProjectUpdateEvent(ProjectUpdateEvent):
|
||||||
|
class Meta:
|
||||||
|
proxy = True
|
||||||
|
|
||||||
|
|
||||||
|
UnpartitionedProjectUpdateEvent._meta.db_table = '_unpartitioned_' + ProjectUpdateEvent._meta.db_table # noqa
|
||||||
|
|
||||||
|
|
||||||
class BaseCommandEvent(CreatedModifiedModel):
|
class BaseCommandEvent(CreatedModifiedModel):
|
||||||
"""
|
"""
|
||||||
An event/message logged from a command for each host.
|
An event/message logged from a command for each host.
|
||||||
@@ -617,6 +678,16 @@ class BaseCommandEvent(CreatedModifiedModel):
|
|||||||
default=0,
|
default=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
|
created = models.DateTimeField(
|
||||||
|
null=True,
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
)
|
||||||
|
modified = models.DateTimeField(
|
||||||
|
default=None,
|
||||||
|
editable=False,
|
||||||
|
db_index=True,
|
||||||
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||||
@@ -671,16 +742,17 @@ class BaseCommandEvent(CreatedModifiedModel):
|
|||||||
|
|
||||||
class AdHocCommandEvent(BaseCommandEvent):
|
class AdHocCommandEvent(BaseCommandEvent):
|
||||||
|
|
||||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id']
|
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id', 'job_created']
|
||||||
|
|
||||||
|
objects = DeferJobCreatedManager()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
ordering = ('-pk',)
|
ordering = ('-pk',)
|
||||||
index_together = [
|
index_together = [
|
||||||
('ad_hoc_command', 'event'),
|
('ad_hoc_command', 'job_created', 'event'),
|
||||||
('ad_hoc_command', 'uuid'),
|
('ad_hoc_command', 'job_created', 'uuid'),
|
||||||
('ad_hoc_command', 'start_line'),
|
('ad_hoc_command', 'job_created', 'counter'),
|
||||||
('ad_hoc_command', 'end_line'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
EVENT_TYPES = [
|
EVENT_TYPES = [
|
||||||
@@ -727,8 +799,9 @@ class AdHocCommandEvent(BaseCommandEvent):
|
|||||||
ad_hoc_command = models.ForeignKey(
|
ad_hoc_command = models.ForeignKey(
|
||||||
'AdHocCommand',
|
'AdHocCommand',
|
||||||
related_name='ad_hoc_command_events',
|
related_name='ad_hoc_command_events',
|
||||||
on_delete=models.CASCADE,
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_index=False,
|
||||||
)
|
)
|
||||||
host = models.ForeignKey(
|
host = models.ForeignKey(
|
||||||
'Host',
|
'Host',
|
||||||
@@ -743,6 +816,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
|||||||
default='',
|
default='',
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
|
job_created = models.DateTimeField(null=True, editable=False)
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
@@ -758,26 +832,37 @@ class AdHocCommandEvent(BaseCommandEvent):
|
|||||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=self)))
|
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=self)))
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedAdHocCommandEvent(AdHocCommandEvent):
|
||||||
|
class Meta:
|
||||||
|
proxy = True
|
||||||
|
|
||||||
|
|
||||||
|
UnpartitionedAdHocCommandEvent._meta.db_table = '_unpartitioned_' + AdHocCommandEvent._meta.db_table # noqa
|
||||||
|
|
||||||
|
|
||||||
class InventoryUpdateEvent(BaseCommandEvent):
|
class InventoryUpdateEvent(BaseCommandEvent):
|
||||||
|
|
||||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id']
|
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['inventory_update_id', 'workflow_job_id', 'job_created']
|
||||||
|
|
||||||
|
objects = DeferJobCreatedManager()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
ordering = ('-pk',)
|
ordering = ('-pk',)
|
||||||
index_together = [
|
index_together = [
|
||||||
('inventory_update', 'uuid'),
|
('inventory_update', 'job_created', 'uuid'),
|
||||||
('inventory_update', 'start_line'),
|
('inventory_update', 'job_created', 'counter'),
|
||||||
('inventory_update', 'end_line'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||||
inventory_update = models.ForeignKey(
|
inventory_update = models.ForeignKey(
|
||||||
'InventoryUpdate',
|
'InventoryUpdate',
|
||||||
related_name='inventory_update_events',
|
related_name='inventory_update_events',
|
||||||
on_delete=models.CASCADE,
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_index=False,
|
||||||
)
|
)
|
||||||
|
job_created = models.DateTimeField(null=True, editable=False)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event(self):
|
def event(self):
|
||||||
@@ -792,26 +877,37 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedInventoryUpdateEvent(InventoryUpdateEvent):
|
||||||
|
class Meta:
|
||||||
|
proxy = True
|
||||||
|
|
||||||
|
|
||||||
|
UnpartitionedInventoryUpdateEvent._meta.db_table = '_unpartitioned_' + InventoryUpdateEvent._meta.db_table # noqa
|
||||||
|
|
||||||
|
|
||||||
class SystemJobEvent(BaseCommandEvent):
|
class SystemJobEvent(BaseCommandEvent):
|
||||||
|
|
||||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id']
|
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['system_job_id', 'job_created']
|
||||||
|
|
||||||
|
objects = DeferJobCreatedManager()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
app_label = 'main'
|
app_label = 'main'
|
||||||
ordering = ('-pk',)
|
ordering = ('-pk',)
|
||||||
index_together = [
|
index_together = [
|
||||||
('system_job', 'uuid'),
|
('system_job', 'job_created', 'uuid'),
|
||||||
('system_job', 'start_line'),
|
('system_job', 'job_created', 'counter'),
|
||||||
('system_job', 'end_line'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||||
system_job = models.ForeignKey(
|
system_job = models.ForeignKey(
|
||||||
'SystemJob',
|
'SystemJob',
|
||||||
related_name='system_job_events',
|
related_name='system_job_events',
|
||||||
on_delete=models.CASCADE,
|
on_delete=models.DO_NOTHING,
|
||||||
editable=False,
|
editable=False,
|
||||||
|
db_index=False,
|
||||||
)
|
)
|
||||||
|
job_created = models.DateTimeField(null=True, editable=False)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event(self):
|
def event(self):
|
||||||
@@ -824,3 +920,11 @@ class SystemJobEvent(BaseCommandEvent):
|
|||||||
@property
|
@property
|
||||||
def changed(self):
|
def changed(self):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class UnpartitionedSystemJobEvent(SystemJobEvent):
|
||||||
|
class Meta:
|
||||||
|
proxy = True
|
||||||
|
|
||||||
|
|
||||||
|
UnpartitionedSystemJobEvent._meta.db_table = '_unpartitioned_' + SystemJobEvent._meta.db_table # noqa
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ from django.utils.translation import ugettext_lazy as _
|
|||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models.base import CommonModel
|
from awx.main.models.base import CommonModel
|
||||||
|
from awx.main.validators import validate_container_image_name
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['ExecutionEnvironment']
|
__all__ = ['ExecutionEnvironment']
|
||||||
@@ -14,7 +15,7 @@ class ExecutionEnvironment(CommonModel):
|
|||||||
|
|
||||||
PULL_CHOICES = [
|
PULL_CHOICES = [
|
||||||
('always', _("Always pull container before running.")),
|
('always', _("Always pull container before running.")),
|
||||||
('missing', _("No pull option has been selected.")),
|
('missing', _("Only pull the image if not present before running.")),
|
||||||
('never', _("Never pull container before running.")),
|
('never', _("Never pull container before running.")),
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -30,9 +31,10 @@ class ExecutionEnvironment(CommonModel):
|
|||||||
image = models.CharField(
|
image = models.CharField(
|
||||||
max_length=1024,
|
max_length=1024,
|
||||||
verbose_name=_('image location'),
|
verbose_name=_('image location'),
|
||||||
help_text=_("The registry location where the container is stored."),
|
help_text=_("The full image location, including the container registry, image name, and version tag."),
|
||||||
|
validators=[validate_container_image_name],
|
||||||
)
|
)
|
||||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
managed = models.BooleanField(default=False, editable=False)
|
||||||
credential = models.ForeignKey(
|
credential = models.ForeignKey(
|
||||||
'Credential',
|
'Credential',
|
||||||
related_name='%(class)ss',
|
related_name='%(class)ss',
|
||||||
|
|||||||
@@ -130,12 +130,6 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
return self.modified < ref_time - timedelta(seconds=grace_period)
|
return self.modified < ref_time - timedelta(seconds=grace_period)
|
||||||
|
|
||||||
def refresh_capacity(self):
|
def refresh_capacity(self):
|
||||||
if settings.IS_K8S:
|
|
||||||
self.capacity = self.cpu = self.memory = self.cpu_capacity = self.mem_capacity = 0 # noqa
|
|
||||||
self.version = awx_application_version
|
|
||||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity'])
|
|
||||||
return
|
|
||||||
|
|
||||||
cpu = get_cpu_capacity()
|
cpu = get_cpu_capacity()
|
||||||
mem = get_mem_capacity()
|
mem = get_mem_capacity()
|
||||||
if self.enabled:
|
if self.enabled:
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ from awx.main.fields import (
|
|||||||
)
|
)
|
||||||
from awx.main.managers import HostManager
|
from awx.main.managers import HostManager
|
||||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||||
from awx.main.models.events import InventoryUpdateEvent
|
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||||
from awx.main.models.mixins import (
|
from awx.main.models.mixins import (
|
||||||
ResourceMixin,
|
ResourceMixin,
|
||||||
@@ -50,6 +50,8 @@ from awx.main.models.notifications import (
|
|||||||
from awx.main.models.credential.injectors import _openstack_data
|
from awx.main.models.credential.injectors import _openstack_data
|
||||||
from awx.main.utils import _inventory_updates
|
from awx.main.utils import _inventory_updates
|
||||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||||
|
from awx.main.utils.execution_environments import to_container_path
|
||||||
|
from awx.main.utils.licensing import server_product_name
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||||
@@ -163,15 +165,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
'admin_role',
|
'admin_role',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
insights_credential = models.ForeignKey(
|
|
||||||
'Credential',
|
|
||||||
related_name='insights_inventories',
|
|
||||||
help_text=_('Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.'),
|
|
||||||
on_delete=models.SET_NULL,
|
|
||||||
blank=True,
|
|
||||||
null=True,
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
pending_deletion = models.BooleanField(
|
pending_deletion = models.BooleanField(
|
||||||
default=False,
|
default=False,
|
||||||
editable=False,
|
editable=False,
|
||||||
@@ -313,7 +306,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
for host in hosts:
|
for host in hosts:
|
||||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||||
if towervars:
|
if towervars:
|
||||||
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(), remote_tower_id=host.id)
|
tower_dict = dict(
|
||||||
|
remote_tower_enabled=str(host.enabled).lower(),
|
||||||
|
remote_tower_id=host.id,
|
||||||
|
remote_host_enabled=str(host.enabled).lower(),
|
||||||
|
remote_host_id=host.id,
|
||||||
|
)
|
||||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
@@ -366,13 +364,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
|||||||
group_pks = self.groups.values_list('pk', flat=True)
|
group_pks = self.groups.values_list('pk', flat=True)
|
||||||
return self.groups.exclude(parents__pk__in=group_pks).distinct()
|
return self.groups.exclude(parents__pk__in=group_pks).distinct()
|
||||||
|
|
||||||
def clean_insights_credential(self):
|
|
||||||
if self.kind == 'smart' and self.insights_credential:
|
|
||||||
raise ValidationError(_("Assignment not allowed for Smart Inventory"))
|
|
||||||
if self.insights_credential and self.insights_credential.credential_type.kind != 'insights':
|
|
||||||
raise ValidationError(_("Credential kind must be 'insights'."))
|
|
||||||
return self.insights_credential
|
|
||||||
|
|
||||||
@transaction.atomic
|
@transaction.atomic
|
||||||
def schedule_deletion(self, user_id=None):
|
def schedule_deletion(self, user_id=None):
|
||||||
from awx.main.tasks import delete_inventory
|
from awx.main.tasks import delete_inventory
|
||||||
@@ -502,13 +493,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
|||||||
null=True,
|
null=True,
|
||||||
help_text=_('The date and time ansible_facts was last modified.'),
|
help_text=_('The date and time ansible_facts was last modified.'),
|
||||||
)
|
)
|
||||||
insights_system_id = models.TextField(
|
|
||||||
blank=True,
|
|
||||||
default=None,
|
|
||||||
null=True,
|
|
||||||
db_index=True,
|
|
||||||
help_text=_('Red Hat Insights host unique identifier.'),
|
|
||||||
)
|
|
||||||
|
|
||||||
objects = HostManager()
|
objects = HostManager()
|
||||||
|
|
||||||
@@ -803,6 +787,12 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
|||||||
return UnifiedJob.objects.non_polymorphic().filter(Q(job__inventory=self.inventory) | Q(inventoryupdate__inventory_source__groups=self))
|
return UnifiedJob.objects.non_polymorphic().filter(Q(job__inventory=self.inventory) | Q(inventoryupdate__inventory_source__groups=self))
|
||||||
|
|
||||||
|
|
||||||
|
class HostMetric(models.Model):
|
||||||
|
hostname = models.CharField(primary_key=True, max_length=512)
|
||||||
|
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
|
||||||
|
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
|
||||||
|
|
||||||
|
|
||||||
class InventorySourceOptions(BaseModel):
|
class InventorySourceOptions(BaseModel):
|
||||||
"""
|
"""
|
||||||
Common fields for InventorySource and InventoryUpdate.
|
Common fields for InventorySource and InventoryUpdate.
|
||||||
@@ -820,7 +810,8 @@ class InventorySourceOptions(BaseModel):
|
|||||||
('satellite6', _('Red Hat Satellite 6')),
|
('satellite6', _('Red Hat Satellite 6')),
|
||||||
('openstack', _('OpenStack')),
|
('openstack', _('OpenStack')),
|
||||||
('rhv', _('Red Hat Virtualization')),
|
('rhv', _('Red Hat Virtualization')),
|
||||||
('tower', _('Ansible Tower')),
|
('controller', _('Red Hat Ansible Automation Platform')),
|
||||||
|
('insights', _('Red Hat Insights')),
|
||||||
]
|
]
|
||||||
|
|
||||||
# From the options of the Django management base command
|
# From the options of the Django management base command
|
||||||
@@ -1223,6 +1214,10 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
def is_container_group_task(self):
|
def is_container_group_task(self):
|
||||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_run_containerized(self):
|
||||||
|
return True
|
||||||
|
|
||||||
def _get_parent_field_name(self):
|
def _get_parent_field_name(self):
|
||||||
return 'inventory_source'
|
return 'inventory_source'
|
||||||
|
|
||||||
@@ -1258,6 +1253,8 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
return UnpartitionedInventoryUpdateEvent
|
||||||
return InventoryUpdateEvent
|
return InventoryUpdateEvent
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -1299,16 +1296,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
|||||||
return self.global_instance_groups
|
return self.global_instance_groups
|
||||||
return selected_groups
|
return selected_groups
|
||||||
|
|
||||||
@property
|
|
||||||
def ansible_virtualenv_path(self):
|
|
||||||
if self.inventory_source and self.inventory_source.custom_virtualenv:
|
|
||||||
return self.inventory_source.custom_virtualenv
|
|
||||||
if self.inventory_source and self.inventory_source.source_project:
|
|
||||||
project = self.inventory_source.source_project
|
|
||||||
if project and project.custom_virtualenv:
|
|
||||||
return project.custom_virtualenv
|
|
||||||
return settings.ANSIBLE_VENV_PATH
|
|
||||||
|
|
||||||
def cancel(self, job_explanation=None, is_chain=False):
|
def cancel(self, job_explanation=None, is_chain=False):
|
||||||
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
res = super(InventoryUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||||
if res:
|
if res:
|
||||||
@@ -1343,6 +1330,7 @@ class PluginFileInjector(object):
|
|||||||
namespace = None
|
namespace = None
|
||||||
collection = None
|
collection = None
|
||||||
collection_migration = '2.9' # Starting with this version, we use collections
|
collection_migration = '2.9' # Starting with this version, we use collections
|
||||||
|
use_fqcn = False # plugin: name versus plugin: namespace.collection.name
|
||||||
|
|
||||||
# TODO: delete this method and update unit tests
|
# TODO: delete this method and update unit tests
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -1369,7 +1357,12 @@ class PluginFileInjector(object):
|
|||||||
Note that a plugin value of '' should still be overridden.
|
Note that a plugin value of '' should still be overridden.
|
||||||
'''
|
'''
|
||||||
if self.plugin_name is not None:
|
if self.plugin_name is not None:
|
||||||
source_vars['plugin'] = self.plugin_name
|
if hasattr(self, 'downstream_namespace') and server_product_name() != 'AWX':
|
||||||
|
source_vars['plugin'] = f'{self.downstream_namespace}.{self.downstream_collection}.{self.plugin_name}'
|
||||||
|
elif self.use_fqcn:
|
||||||
|
source_vars['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||||
|
else:
|
||||||
|
source_vars['plugin'] = self.plugin_name
|
||||||
return source_vars
|
return source_vars
|
||||||
|
|
||||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||||
@@ -1380,7 +1373,7 @@ class PluginFileInjector(object):
|
|||||||
return env
|
return env
|
||||||
|
|
||||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||||
"""By default, we will apply the standard managed_by_tower injectors"""
|
"""By default, we will apply the standard managed injectors"""
|
||||||
injected_env = {}
|
injected_env = {}
|
||||||
credential = inventory_update.get_cloud_credential()
|
credential = inventory_update.get_cloud_credential()
|
||||||
# some sources may have no credential, specifically ec2
|
# some sources may have no credential, specifically ec2
|
||||||
@@ -1399,7 +1392,7 @@ class PluginFileInjector(object):
|
|||||||
args = []
|
args = []
|
||||||
credential.credential_type.inject_credential(credential, injected_env, safe_env, args, private_data_dir)
|
credential.credential_type.inject_credential(credential, injected_env, safe_env, args, private_data_dir)
|
||||||
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
|
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
|
||||||
# that means that managed_by_tower injectors must only inject detectable env keys
|
# that means that managed injectors must only inject detectable env keys
|
||||||
# enforcement of this is accomplished by tests
|
# enforcement of this is accomplished by tests
|
||||||
return injected_env
|
return injected_env
|
||||||
|
|
||||||
@@ -1505,7 +1498,7 @@ class openstack(PluginFileInjector):
|
|||||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||||
credential = inventory_update.get_cloud_credential()
|
credential = inventory_update.get_cloud_credential()
|
||||||
cred_data = private_data_files['credentials']
|
cred_data = private_data_files['credentials']
|
||||||
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_data[credential]))
|
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_data[credential], private_data_dir)
|
||||||
return env
|
return env
|
||||||
|
|
||||||
|
|
||||||
@@ -1517,12 +1510,17 @@ class rhv(PluginFileInjector):
|
|||||||
initial_version = '2.9'
|
initial_version = '2.9'
|
||||||
namespace = 'ovirt'
|
namespace = 'ovirt'
|
||||||
collection = 'ovirt'
|
collection = 'ovirt'
|
||||||
|
downstream_namespace = 'redhat'
|
||||||
|
downstream_collection = 'rhv'
|
||||||
|
|
||||||
|
|
||||||
class satellite6(PluginFileInjector):
|
class satellite6(PluginFileInjector):
|
||||||
plugin_name = 'foreman'
|
plugin_name = 'foreman'
|
||||||
namespace = 'theforeman'
|
namespace = 'theforeman'
|
||||||
collection = 'foreman'
|
collection = 'foreman'
|
||||||
|
downstream_namespace = 'redhat'
|
||||||
|
downstream_collection = 'satellite'
|
||||||
|
use_fqcn = True
|
||||||
|
|
||||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||||
# this assumes that this is merged
|
# this assumes that this is merged
|
||||||
@@ -1535,18 +1533,24 @@ class satellite6(PluginFileInjector):
|
|||||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
|
||||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
|
||||||
# this inventory plugin requires the fully qualified inventory plugin name
|
|
||||||
ret['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
class controller(PluginFileInjector):
|
||||||
class tower(PluginFileInjector):
|
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
||||||
plugin_name = 'tower'
|
|
||||||
base_injector = 'template'
|
base_injector = 'template'
|
||||||
namespace = 'awx'
|
namespace = 'awx'
|
||||||
collection = 'awx'
|
collection = 'awx'
|
||||||
|
downstream_namespace = 'ansible'
|
||||||
|
downstream_collection = 'controller'
|
||||||
|
|
||||||
|
|
||||||
|
class insights(PluginFileInjector):
|
||||||
|
plugin_name = 'insights'
|
||||||
|
base_injector = 'template'
|
||||||
|
namespace = 'redhatinsights'
|
||||||
|
collection = 'insights'
|
||||||
|
downstream_namespace = 'redhat'
|
||||||
|
downstream_collection = 'insights'
|
||||||
|
use_fqcn = True
|
||||||
|
|
||||||
|
|
||||||
for cls in PluginFileInjector.__subclasses__():
|
for cls in PluginFileInjector.__subclasses__():
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ from awx.main.models.base import (
|
|||||||
VERBOSITY_CHOICES,
|
VERBOSITY_CHOICES,
|
||||||
VarsDictProperty,
|
VarsDictProperty,
|
||||||
)
|
)
|
||||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
from awx.main.models.events import JobEvent, UnpartitionedJobEvent, UnpartitionedSystemJobEvent, SystemJobEvent
|
||||||
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
|
||||||
from awx.main.models.notifications import (
|
from awx.main.models.notifications import (
|
||||||
NotificationTemplate,
|
NotificationTemplate,
|
||||||
@@ -600,20 +600,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
def get_ui_url(self):
|
def get_ui_url(self):
|
||||||
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/playbook/{}".format(self.pk))
|
||||||
|
|
||||||
@property
|
|
||||||
def ansible_virtualenv_path(self):
|
|
||||||
# the order here enforces precedence (it matters)
|
|
||||||
for virtualenv in (
|
|
||||||
self.job_template.custom_virtualenv if self.job_template else None,
|
|
||||||
self.project.custom_virtualenv,
|
|
||||||
self.organization.custom_virtualenv if self.organization else None,
|
|
||||||
):
|
|
||||||
if virtualenv:
|
|
||||||
return virtualenv
|
|
||||||
return settings.ANSIBLE_VENV_PATH
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
return UnpartitionedJobEvent
|
||||||
return JobEvent
|
return JobEvent
|
||||||
|
|
||||||
def copy_unified_job(self, **new_prompts):
|
def copy_unified_job(self, **new_prompts):
|
||||||
@@ -855,23 +845,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
|||||||
continue
|
continue
|
||||||
host.ansible_facts = ansible_facts
|
host.ansible_facts = ansible_facts
|
||||||
host.ansible_facts_modified = now()
|
host.ansible_facts_modified = now()
|
||||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
|
||||||
ansible_facts = ansible_facts.get('insights', {})
|
|
||||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
|
||||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
|
||||||
if ansible_local_system_id:
|
|
||||||
print("Setting local {}".format(ansible_local_system_id))
|
|
||||||
logger.debug(
|
|
||||||
"Insights system_id {} found for host <{}, {}> in"
|
|
||||||
" ansible local facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
|
||||||
)
|
|
||||||
host.insights_system_id = ansible_local_system_id
|
|
||||||
elif ansible_facts_system_id:
|
|
||||||
logger.debug(
|
|
||||||
"Insights system_id {} found for host <{}, {}> in"
|
|
||||||
" insights facts".format(ansible_local_system_id, host.inventory.id, host.name)
|
|
||||||
)
|
|
||||||
host.insights_system_id = ansible_facts_system_id
|
|
||||||
host.save()
|
host.save()
|
||||||
system_tracking_logger.info(
|
system_tracking_logger.info(
|
||||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||||
@@ -1259,17 +1232,21 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
return UnpartitionedSystemJobEvent
|
||||||
return SystemJobEvent
|
return SystemJobEvent
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_run_on_control_plane(self):
|
||||||
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def task_impact(self):
|
def task_impact(self):
|
||||||
if settings.IS_K8S:
|
|
||||||
return 0
|
|
||||||
return 5
|
return 5
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def preferred_instance_groups(self):
|
def preferred_instance_groups(self):
|
||||||
return self.global_instance_groups
|
return self.control_plane_instance_group
|
||||||
|
|
||||||
'''
|
'''
|
||||||
JobNotificationMixin
|
JobNotificationMixin
|
||||||
|
|||||||
@@ -464,15 +464,18 @@ class ExecutionEnvironmentMixin(models.Model):
|
|||||||
|
|
||||||
def resolve_execution_environment(self):
|
def resolve_execution_environment(self):
|
||||||
"""
|
"""
|
||||||
Return the execution environment that should be used when creating a new job.
|
Return the execution environment that should be used when executing a job.
|
||||||
"""
|
"""
|
||||||
if self.execution_environment is not None:
|
if self.execution_environment is not None:
|
||||||
return self.execution_environment
|
return self.execution_environment
|
||||||
|
template = getattr(self, 'unified_job_template', None)
|
||||||
|
if template is not None and template.execution_environment is not None:
|
||||||
|
return template.execution_environment
|
||||||
if getattr(self, 'project_id', None) and self.project.default_environment is not None:
|
if getattr(self, 'project_id', None) and self.project.default_environment is not None:
|
||||||
return self.project.default_environment
|
return self.project.default_environment
|
||||||
if getattr(self, 'organization', None) and self.organization.default_environment is not None:
|
if getattr(self, 'organization_id', None) and self.organization.default_environment is not None:
|
||||||
return self.organization.default_environment
|
return self.organization.default_environment
|
||||||
if getattr(self, 'inventory', None) and self.inventory.organization is not None:
|
if getattr(self, 'inventory_id', None) and self.inventory.organization is not None:
|
||||||
if self.inventory.organization.default_environment is not None:
|
if self.inventory.organization.default_environment is not None:
|
||||||
return self.inventory.organization.default_environment
|
return self.inventory.organization.default_environment
|
||||||
|
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
|||||||
def create_default_galaxy_credential(self):
|
def create_default_galaxy_credential(self):
|
||||||
from awx.main.models import Credential
|
from awx.main.models import Credential
|
||||||
|
|
||||||
public_galaxy_credential = Credential.objects.filter(managed_by_tower=True, name='Ansible Galaxy').first()
|
public_galaxy_credential = Credential.objects.filter(managed=True, name='Ansible Galaxy').first()
|
||||||
if public_galaxy_credential not in self.galaxy_credentials.all():
|
if public_galaxy_credential not in self.galaxy_credentials.all():
|
||||||
self.galaxy_credentials.add(public_galaxy_credential)
|
self.galaxy_credentials.add(public_galaxy_credential)
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from django.utils.timezone import now, make_aware, get_default_timezone
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
|
from awx.main.models.base import PROJECT_UPDATE_JOB_TYPE_CHOICES, PERM_INVENTORY_DEPLOY
|
||||||
from awx.main.models.events import ProjectUpdateEvent
|
from awx.main.models.events import ProjectUpdateEvent, UnpartitionedProjectUpdateEvent
|
||||||
from awx.main.models.notifications import (
|
from awx.main.models.notifications import (
|
||||||
NotificationTemplate,
|
NotificationTemplate,
|
||||||
JobNotificationMixin,
|
JobNotificationMixin,
|
||||||
@@ -32,6 +32,7 @@ from awx.main.models.jobs import Job
|
|||||||
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
|
from awx.main.models.mixins import ResourceMixin, TaskManagerProjectUpdateMixin, CustomVirtualEnvMixin, RelatedJobsMixin
|
||||||
from awx.main.utils import update_scm_url, polymorphic
|
from awx.main.utils import update_scm_url, polymorphic
|
||||||
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
||||||
|
from awx.main.utils.execution_environments import get_control_plane_execution_environment
|
||||||
from awx.main.fields import ImplicitRoleField
|
from awx.main.fields import ImplicitRoleField
|
||||||
from awx.main.models.rbac import (
|
from awx.main.models.rbac import (
|
||||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||||
@@ -184,11 +185,11 @@ class ProjectOptions(models.Model):
|
|||||||
|
|
||||||
def resolve_execution_environment(self):
|
def resolve_execution_environment(self):
|
||||||
"""
|
"""
|
||||||
Project updates, themselves, will use the default execution environment.
|
Project updates, themselves, will use the control plane execution environment.
|
||||||
Jobs using the project can use the default_environment, but the project updates
|
Jobs using the project can use the default_environment, but the project updates
|
||||||
are not flexible enough to allow customizing the image they use.
|
are not flexible enough to allow customizing the image they use.
|
||||||
"""
|
"""
|
||||||
return self.get_default_execution_environment()
|
return get_control_plane_execution_environment()
|
||||||
|
|
||||||
def get_project_path(self, check_if_exists=True):
|
def get_project_path(self, check_if_exists=True):
|
||||||
local_path = os.path.basename(self.local_path)
|
local_path = os.path.basename(self.local_path)
|
||||||
@@ -552,14 +553,18 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
|||||||
websocket_data.update(dict(project_id=self.project.id))
|
websocket_data.update(dict(project_id=self.project.id))
|
||||||
return websocket_data
|
return websocket_data
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_run_on_control_plane(self):
|
||||||
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event_class(self):
|
def event_class(self):
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
return UnpartitionedProjectUpdateEvent
|
||||||
return ProjectUpdateEvent
|
return ProjectUpdateEvent
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def task_impact(self):
|
def task_impact(self):
|
||||||
if settings.IS_K8S:
|
|
||||||
return 0
|
|
||||||
return 0 if self.job_type == 'run' else 1
|
return 0 if self.job_type == 'run' else 1
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -620,6 +625,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
|||||||
organization_groups = []
|
organization_groups = []
|
||||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||||
selected_groups = template_groups + organization_groups
|
selected_groups = template_groups + organization_groups
|
||||||
|
if not any([not group.is_container_group for group in selected_groups]):
|
||||||
|
selected_groups = selected_groups + list(self.control_plane_instance_group)
|
||||||
if not selected_groups:
|
if not selected_groups:
|
||||||
return self.global_instance_groups
|
return self.global_instance_groups
|
||||||
return selected_groups
|
return selected_groups
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ from awx.main.utils import (
|
|||||||
getattr_dne,
|
getattr_dne,
|
||||||
polymorphic,
|
polymorphic,
|
||||||
schedule_task_manager,
|
schedule_task_manager,
|
||||||
|
get_event_partition_epoch,
|
||||||
)
|
)
|
||||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||||
@@ -366,8 +367,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
for fd, val in eager_fields.items():
|
for fd, val in eager_fields.items():
|
||||||
setattr(unified_job, fd, val)
|
setattr(unified_job, fd, val)
|
||||||
|
|
||||||
unified_job.execution_environment = self.resolve_execution_environment()
|
|
||||||
|
|
||||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||||
# is not correct until this is set
|
# is not correct until this is set
|
||||||
if not parent_field_name:
|
if not parent_field_name:
|
||||||
@@ -718,6 +717,9 @@ class UnifiedJob(
|
|||||||
editable=False,
|
editable=False,
|
||||||
help_text=_("The version of Ansible Core installed in the execution environment."),
|
help_text=_("The version of Ansible Core installed in the execution environment."),
|
||||||
)
|
)
|
||||||
|
work_unit_id = models.CharField(
|
||||||
|
max_length=255, blank=True, default=None, editable=False, null=True, help_text=_("The Receptor work unit ID associated with this job.")
|
||||||
|
)
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
RealClass = self.get_real_instance_class()
|
RealClass = self.get_real_instance_class()
|
||||||
@@ -737,6 +739,13 @@ class UnifiedJob(
|
|||||||
def _get_task_class(cls):
|
def _get_task_class(cls):
|
||||||
raise NotImplementedError # Implement in subclasses.
|
raise NotImplementedError # Implement in subclasses.
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_run_on_control_plane(self):
|
||||||
|
if settings.IS_K8S:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def can_run_containerized(self):
|
def can_run_containerized(self):
|
||||||
return False
|
return False
|
||||||
@@ -992,8 +1001,18 @@ class UnifiedJob(
|
|||||||
'main_systemjob': 'system_job_id',
|
'main_systemjob': 'system_job_id',
|
||||||
}[tablename]
|
}[tablename]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_unpartitioned_events(self):
|
||||||
|
applied = get_event_partition_epoch()
|
||||||
|
return applied and self.created and self.created < applied
|
||||||
|
|
||||||
def get_event_queryset(self):
|
def get_event_queryset(self):
|
||||||
return self.event_class.objects.filter(**{self.event_parent_key: self.id})
|
kwargs = {
|
||||||
|
self.event_parent_key: self.id,
|
||||||
|
}
|
||||||
|
if not self.has_unpartitioned_events:
|
||||||
|
kwargs['job_created'] = self.created
|
||||||
|
return self.event_class.objects.filter(**kwargs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def event_processing_finished(self):
|
def event_processing_finished(self):
|
||||||
@@ -1079,13 +1098,15 @@ class UnifiedJob(
|
|||||||
# .write() calls on the fly to maintain this interface
|
# .write() calls on the fly to maintain this interface
|
||||||
_write = fd.write
|
_write = fd.write
|
||||||
fd.write = lambda s: _write(smart_text(s))
|
fd.write = lambda s: _write(smart_text(s))
|
||||||
|
tbl = self._meta.db_table + 'event'
|
||||||
|
created_by_cond = ''
|
||||||
|
if self.has_unpartitioned_events:
|
||||||
|
tbl = f'_unpartitioned_{tbl}'
|
||||||
|
else:
|
||||||
|
created_by_cond = f"job_created='{self.created.isoformat()}' AND "
|
||||||
|
|
||||||
cursor.copy_expert(
|
sql = f"copy (select stdout from {tbl} where {created_by_cond}{self.event_parent_key}={self.id} and stdout != '' order by start_line) to stdout" # nosql
|
||||||
"copy (select stdout from {} where {}={} and stdout != '' order by start_line) to stdout".format(
|
cursor.copy_expert(sql, fd)
|
||||||
self._meta.db_table + 'event', self.event_parent_key, self.id
|
|
||||||
),
|
|
||||||
fd,
|
|
||||||
)
|
|
||||||
|
|
||||||
if hasattr(fd, 'name'):
|
if hasattr(fd, 'name'):
|
||||||
# If we're dealing with a physical file, use `sed` to clean
|
# If we're dealing with a physical file, use `sed` to clean
|
||||||
@@ -1404,14 +1425,26 @@ class UnifiedJob(
|
|||||||
return []
|
return []
|
||||||
return list(self.unified_job_template.instance_groups.all())
|
return list(self.unified_job_template.instance_groups.all())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def control_plane_instance_group(self):
|
||||||
|
from awx.main.models.ha import InstanceGroup
|
||||||
|
|
||||||
|
control_plane_instance_group = InstanceGroup.objects.filter(name=settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||||
|
|
||||||
|
return list(control_plane_instance_group)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def global_instance_groups(self):
|
def global_instance_groups(self):
|
||||||
from awx.main.models.ha import InstanceGroup
|
from awx.main.models.ha import InstanceGroup
|
||||||
|
|
||||||
default_instance_group = InstanceGroup.objects.filter(name='tower')
|
default_instance_group_names = [settings.DEFAULT_EXECUTION_QUEUE_NAME]
|
||||||
if default_instance_group.exists():
|
|
||||||
return [default_instance_group.first()]
|
if not settings.IS_K8S:
|
||||||
return []
|
default_instance_group_names.append(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME)
|
||||||
|
|
||||||
|
default_instance_groups = InstanceGroup.objects.filter(name__in=default_instance_group_names)
|
||||||
|
|
||||||
|
return list(default_instance_groups)
|
||||||
|
|
||||||
def awx_meta_vars(self):
|
def awx_meta_vars(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -258,6 +258,10 @@ class WorkflowJobNode(WorkflowNodeBase):
|
|||||||
models.Index(fields=['identifier']),
|
models.Index(fields=['identifier']),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_processing_finished(self):
|
||||||
|
return True
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
@@ -591,6 +595,9 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
|||||||
def _get_related_jobs(self):
|
def _get_related_jobs(self):
|
||||||
return WorkflowJob.objects.filter(workflow_job_template=self)
|
return WorkflowJob.objects.filter(workflow_job_template=self)
|
||||||
|
|
||||||
|
def resolve_execution_environment(self):
|
||||||
|
return None # EEs are not meaningful for workflows
|
||||||
|
|
||||||
|
|
||||||
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, WebhookMixin):
|
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, WebhookMixin):
|
||||||
class Meta:
|
class Meta:
|
||||||
@@ -620,6 +627,10 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
|||||||
def workflow_nodes(self):
|
def workflow_nodes(self):
|
||||||
return self.workflow_job_nodes
|
return self.workflow_job_nodes
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_processing_finished(self):
|
||||||
|
return True
|
||||||
|
|
||||||
def _get_parent_field_name(self):
|
def _get_parent_field_name(self):
|
||||||
if self.job_template_id:
|
if self.job_template_id:
|
||||||
# This is a workflow job which is a container for slice jobs
|
# This is a workflow job which is a container for slice jobs
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
import collections
|
import collections
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
|
from django.utils.translation import ugettext_lazy as _
|
||||||
|
|
||||||
from awx.main.utils.common import parse_yaml_or_json
|
from awx.main.utils.common import parse_yaml_or_json
|
||||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||||
@@ -51,6 +54,96 @@ class PodManager(object):
|
|||||||
|
|
||||||
return pods
|
return pods
|
||||||
|
|
||||||
|
def create_secret(self, job):
|
||||||
|
registry_cred = job.execution_environment.credential
|
||||||
|
host = registry_cred.get_input('host')
|
||||||
|
# urlparse requires '//' to be provided if scheme is not specified
|
||||||
|
original_parsed = urlparse.urlsplit(host)
|
||||||
|
if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:
|
||||||
|
host = 'https://%s' % (host)
|
||||||
|
parsed = urlparse.urlsplit(host)
|
||||||
|
host = parsed.hostname
|
||||||
|
if parsed.port:
|
||||||
|
host = "{0}:{1}".format(host, parsed.port)
|
||||||
|
|
||||||
|
username = registry_cred.get_input("username")
|
||||||
|
password = registry_cred.get_input("password")
|
||||||
|
|
||||||
|
# Construct container auth dict and base64 encode it
|
||||||
|
token = b64encode("{}:{}".format(username, password).encode('UTF-8')).decode()
|
||||||
|
auth_dict = json.dumps({"auths": {host: {"auth": token}}}, indent=4)
|
||||||
|
auth_data = b64encode(str(auth_dict).encode('UTF-8')).decode()
|
||||||
|
|
||||||
|
# Construct Secret object
|
||||||
|
secret = client.V1Secret()
|
||||||
|
secret_name = "automation-{0}-image-pull-secret-{1}".format(settings.INSTALL_UUID[:5], job.execution_environment.credential.id)
|
||||||
|
secret.metadata = client.V1ObjectMeta(name="{}".format(secret_name))
|
||||||
|
secret.type = "kubernetes.io/dockerconfigjson"
|
||||||
|
secret.kind = "Secret"
|
||||||
|
secret.data = {".dockerconfigjson": auth_data}
|
||||||
|
|
||||||
|
# Check if secret already exists
|
||||||
|
replace_secret = False
|
||||||
|
try:
|
||||||
|
existing_secret = self.kube_api.read_namespaced_secret(namespace=self.namespace, name=secret_name)
|
||||||
|
if existing_secret.data != secret.data:
|
||||||
|
replace_secret = True
|
||||||
|
secret_exists = True
|
||||||
|
except client.rest.ApiException as e:
|
||||||
|
if e.status == 404:
|
||||||
|
secret_exists = False
|
||||||
|
else:
|
||||||
|
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||||
|
if e.status == 403:
|
||||||
|
error_msg = _(
|
||||||
|
'Failed to create secret for container group {} because additional service account role rules are needed. Add get, create and delete role rules for secret resources for your cluster credential.'.format(
|
||||||
|
job.instance_group.name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
full_error_msg = '{0}: {1}'.format(error_msg, str(e))
|
||||||
|
logger.exception(full_error_msg)
|
||||||
|
raise PermissionError(full_error_msg)
|
||||||
|
|
||||||
|
if replace_secret:
|
||||||
|
try:
|
||||||
|
# Try to replace existing secret
|
||||||
|
self.kube_api.delete_namespaced_secret(name=secret.metadata.name, namespace=self.namespace)
|
||||||
|
self.kube_api.create_namespaced_secret(namespace=self.namespace, body=secret)
|
||||||
|
except client.rest.ApiException as e:
|
||||||
|
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||||
|
if e.status == 403:
|
||||||
|
error_msg = _(
|
||||||
|
'Failed to delete secret for container group {} because additional service account role rules are needed. Add create and delete role rules for secret resources for your cluster credential.'.format(
|
||||||
|
job.instance_group.name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
full_error_msg = '{0}: {1}'.format(error_msg, str(e))
|
||||||
|
logger.exception(full_error_msg)
|
||||||
|
# let job continue for the case where secret was created manually and cluster cred doesn't have permission to create a secret
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = 'Failed to create imagePullSecret for container group {}'.format(job.instance_group.name)
|
||||||
|
logger.exception('{0}: {1}'.format(error_msg, str(e)))
|
||||||
|
raise RuntimeError(error_msg)
|
||||||
|
elif secret_exists and not replace_secret:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# Create an image pull secret in namespace
|
||||||
|
try:
|
||||||
|
self.kube_api.create_namespaced_secret(namespace=self.namespace, body=secret)
|
||||||
|
except client.rest.ApiException as e:
|
||||||
|
if e.status == 403:
|
||||||
|
error_msg = _(
|
||||||
|
'Failed to create imagePullSecret: {}. Check that openshift or k8s credential has permission to create a secret.'.format(e.status)
|
||||||
|
)
|
||||||
|
logger.exception(error_msg)
|
||||||
|
# let job continue for the case where secret was created manually and cluster cred doesn't have permission to create a secret
|
||||||
|
except Exception:
|
||||||
|
error_msg = 'Failed to create imagePullSecret for container group {}'.format(job.instance_group.name)
|
||||||
|
logger.exception(error_msg)
|
||||||
|
job.cancel(job_explanation=error_msg)
|
||||||
|
|
||||||
|
return secret.metadata.name
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def namespace(self):
|
def namespace(self):
|
||||||
return self.pod_definition['metadata']['namespace']
|
return self.pod_definition['metadata']['namespace']
|
||||||
@@ -81,7 +174,7 @@ class PodManager(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def pod_name(self):
|
def pod_name(self):
|
||||||
return f"awx-job-{self.task.id}"
|
return f"automation-job-{self.task.id}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pod_definition(self):
|
def pod_definition(self):
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ from awx.main.models import (
|
|||||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||||
|
from awx.main.utils.common import create_partition
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||||
from awx.main.utils import decrypt_field
|
from awx.main.utils import decrypt_field
|
||||||
@@ -301,6 +302,8 @@ class TaskManager:
|
|||||||
|
|
||||||
def post_commit():
|
def post_commit():
|
||||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||||
|
# Before task is dispatched, ensure that job_event partitions exist
|
||||||
|
create_partition(task.event_class._meta.db_table, start=task.created)
|
||||||
task_cls = task._get_task_class()
|
task_cls = task._get_task_class()
|
||||||
task_cls.apply_async(
|
task_cls.apply_async(
|
||||||
[task.pk],
|
[task.pk],
|
||||||
@@ -471,6 +474,7 @@ class TaskManager:
|
|||||||
tasks_to_update_job_explanation.append(task)
|
tasks_to_update_job_explanation.append(task)
|
||||||
continue
|
continue
|
||||||
preferred_instance_groups = task.preferred_instance_groups
|
preferred_instance_groups = task.preferred_instance_groups
|
||||||
|
|
||||||
found_acceptable_queue = False
|
found_acceptable_queue = False
|
||||||
if isinstance(task, WorkflowJob):
|
if isinstance(task, WorkflowJob):
|
||||||
if task.unified_job_template_id in running_workflow_templates:
|
if task.unified_job_template_id in running_workflow_templates:
|
||||||
@@ -481,6 +485,7 @@ class TaskManager:
|
|||||||
running_workflow_templates.add(task.unified_job_template_id)
|
running_workflow_templates.add(task.unified_job_template_id)
|
||||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for rampart_group in preferred_instance_groups:
|
for rampart_group in preferred_instance_groups:
|
||||||
if task.can_run_containerized and rampart_group.is_container_group:
|
if task.can_run_containerized and rampart_group.is_container_group:
|
||||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||||
@@ -488,12 +493,12 @@ class TaskManager:
|
|||||||
found_acceptable_queue = True
|
found_acceptable_queue = True
|
||||||
break
|
break
|
||||||
|
|
||||||
|
if not task.can_run_on_control_plane:
|
||||||
|
logger.debug("Skipping group {}, task cannot run on control plane".format(rampart_group.name))
|
||||||
|
continue
|
||||||
|
|
||||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||||
if (
|
if task.task_impact > 0 and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||||
task.task_impact > 0
|
|
||||||
and not rampart_group.is_container_group # project updates have a cost of zero
|
|
||||||
and self.get_remaining_capacity(rampart_group.name) <= 0
|
|
||||||
):
|
|
||||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(rampart_group.name, remaining_capacity))
|
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(rampart_group.name, remaining_capacity))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ from crum.signals import current_user_getter
|
|||||||
# AWX
|
# AWX
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
ActivityStream,
|
ActivityStream,
|
||||||
|
ExecutionEnvironment,
|
||||||
Group,
|
Group,
|
||||||
Host,
|
Host,
|
||||||
InstanceGroup,
|
InstanceGroup,
|
||||||
@@ -623,6 +624,12 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
|||||||
approval.deny()
|
approval.deny()
|
||||||
|
|
||||||
|
|
||||||
|
@receiver(pre_delete, sender=ExecutionEnvironment)
|
||||||
|
def remove_default_ee(sender, instance, **kwargs):
|
||||||
|
if instance.id == getattr(settings.DEFAULT_EXECUTION_ENVIRONMENT, 'id', None):
|
||||||
|
settings.DEFAULT_EXECUTION_ENVIRONMENT = None
|
||||||
|
|
||||||
|
|
||||||
@receiver(post_save, sender=Session)
|
@receiver(post_save, sender=Session)
|
||||||
def save_user_session_membership(sender, **kwargs):
|
def save_user_session_membership(sender, **kwargs):
|
||||||
session = kwargs.get('instance', None)
|
session = kwargs.get('instance', None)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
from collections import OrderedDict, namedtuple
|
from collections import OrderedDict, namedtuple, deque
|
||||||
import errno
|
import errno
|
||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
@@ -28,10 +28,11 @@ import threading
|
|||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
from django.db import transaction, DatabaseError, IntegrityError
|
||||||
from django.db.models.fields.related import ForeignKey
|
from django.db.models.fields.related import ForeignKey
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
@@ -56,7 +57,7 @@ from receptorctl.socket_interface import ReceptorControl
|
|||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx import __version__ as awx_application_version
|
from awx import __version__ as awx_application_version
|
||||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
|
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
|
||||||
from awx.main.access import access_registry
|
from awx.main.access import access_registry
|
||||||
from awx.main.redact import UriCleaner
|
from awx.main.redact import UriCleaner
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
@@ -86,7 +87,7 @@ from awx.main.exceptions import AwxTaskError, PostRunError
|
|||||||
from awx.main.queue import CallbackQueueDispatcher
|
from awx.main.queue import CallbackQueueDispatcher
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_local_queuename, reaper
|
from awx.main.dispatch import get_local_queuename, reaper
|
||||||
from awx.main.utils import (
|
from awx.main.utils.common import (
|
||||||
update_scm_url,
|
update_scm_url,
|
||||||
ignore_inventory_computed_fields,
|
ignore_inventory_computed_fields,
|
||||||
ignore_inventory_group_removal,
|
ignore_inventory_group_removal,
|
||||||
@@ -95,8 +96,10 @@ from awx.main.utils import (
|
|||||||
get_awx_version,
|
get_awx_version,
|
||||||
deepmerge,
|
deepmerge,
|
||||||
parse_yaml_or_json,
|
parse_yaml_or_json,
|
||||||
|
cleanup_new_process,
|
||||||
|
create_partition,
|
||||||
)
|
)
|
||||||
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec
|
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
|
||||||
from awx.main.utils.ansible import read_ansible_config
|
from awx.main.utils.ansible import read_ansible_config
|
||||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||||
@@ -470,6 +473,33 @@ def cluster_node_heartbeat():
|
|||||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_local_queuename)
|
||||||
|
def awx_receptor_workunit_reaper():
|
||||||
|
"""
|
||||||
|
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||||
|
in a specific receptor directory. This directory on disk is a random 8 character string, e.g. qLL2JFNT
|
||||||
|
This is also called the work Unit ID in receptor, and is used in various receptor commands,
|
||||||
|
e.g. "work results qLL2JFNT"
|
||||||
|
After an AWX job executes, the receptor work unit directory is cleaned up by
|
||||||
|
issuing the work release command. In some cases the release process might fail, or
|
||||||
|
if AWX crashes during a job's execution, the work release command is never issued to begin with.
|
||||||
|
As such, this periodic task will obtain a list of all receptor work units, and find which ones
|
||||||
|
belong to AWX jobs that are in a completed state (status is canceled, error, or succeeded).
|
||||||
|
This task will call "work release" on each of these work units to clean up the files on disk.
|
||||||
|
"""
|
||||||
|
if not settings.RECEPTOR_RELEASE_WORK:
|
||||||
|
return
|
||||||
|
logger.debug("Checking for unreleased receptor work units")
|
||||||
|
receptor_ctl = get_receptor_ctl()
|
||||||
|
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||||
|
|
||||||
|
unit_ids = [id for id in receptor_work_list]
|
||||||
|
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
||||||
|
for job in jobs_with_unreleased_receptor_units:
|
||||||
|
logger.debug(f"{job.log_format} is not active, reaping receptor work unit {job.work_unit_id}")
|
||||||
|
receptor_ctl.simple_command(f"work release {job.work_unit_id}")
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
def awx_k8s_reaper():
|
def awx_k8s_reaper():
|
||||||
if not settings.RECEPTOR_RELEASE_WORK:
|
if not settings.RECEPTOR_RELEASE_WORK:
|
||||||
@@ -680,48 +710,6 @@ def update_host_smart_inventory_memberships():
|
|||||||
smart_inventory.update_computed_fields()
|
smart_inventory.update_computed_fields()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
|
||||||
def migrate_legacy_event_data(tblname):
|
|
||||||
#
|
|
||||||
# NOTE: this function is not actually in use anymore,
|
|
||||||
# but has been intentionally kept for historical purposes,
|
|
||||||
# and to serve as an illustration if we ever need to perform
|
|
||||||
# bulk modification/migration of event data in the future.
|
|
||||||
#
|
|
||||||
if 'event' not in tblname:
|
|
||||||
return
|
|
||||||
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
|
|
||||||
if acquired is False:
|
|
||||||
return
|
|
||||||
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
|
|
||||||
|
|
||||||
def _remaining():
|
|
||||||
try:
|
|
||||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
|
|
||||||
return cursor.fetchone()[0]
|
|
||||||
except ProgrammingError:
|
|
||||||
# the table is gone (migration is unnecessary)
|
|
||||||
return None
|
|
||||||
|
|
||||||
with connection.cursor() as cursor:
|
|
||||||
total_rows = _remaining()
|
|
||||||
while total_rows:
|
|
||||||
with transaction.atomic():
|
|
||||||
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
|
|
||||||
last_insert_pk = cursor.fetchone()
|
|
||||||
if last_insert_pk is None:
|
|
||||||
# this means that the SELECT from the old table was
|
|
||||||
# empty, and there was nothing to insert (so we're done)
|
|
||||||
break
|
|
||||||
last_insert_pk = last_insert_pk[0]
|
|
||||||
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
|
|
||||||
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
|
|
||||||
|
|
||||||
if _remaining() is None:
|
|
||||||
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
|
|
||||||
logger.warn(f'{tblname} primary key migration to bigint has finished')
|
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_local_queuename)
|
@task(queue=get_local_queuename)
|
||||||
def delete_inventory(inventory_id, user_id, retries=5):
|
def delete_inventory(inventory_id, user_id, retries=5):
|
||||||
# Delete inventory as user
|
# Delete inventory as user
|
||||||
@@ -769,6 +757,10 @@ def with_path_cleanup(f):
|
|||||||
return _wrapped
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
|
def get_receptor_ctl():
|
||||||
|
return ReceptorControl('/var/run/receptor/receptor.sock')
|
||||||
|
|
||||||
|
|
||||||
class BaseTask(object):
|
class BaseTask(object):
|
||||||
model = None
|
model = None
|
||||||
event_model = None
|
event_model = None
|
||||||
@@ -779,6 +771,8 @@ class BaseTask(object):
|
|||||||
self.parent_workflow_job_id = None
|
self.parent_workflow_job_id = None
|
||||||
self.host_map = {}
|
self.host_map = {}
|
||||||
self.guid = GuidMiddleware.get_guid()
|
self.guid = GuidMiddleware.get_guid()
|
||||||
|
self.job_created = None
|
||||||
|
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
|
||||||
|
|
||||||
def update_model(self, pk, _attempt=0, **updates):
|
def update_model(self, pk, _attempt=0, **updates):
|
||||||
"""Reload the model instance from the database and update the
|
"""Reload the model instance from the database and update the
|
||||||
@@ -840,7 +834,7 @@ class BaseTask(object):
|
|||||||
username = cred.get_input('username')
|
username = cred.get_input('username')
|
||||||
password = cred.get_input('password')
|
password = cred.get_input('password')
|
||||||
token = "{}:{}".format(username, password)
|
token = "{}:{}".format(username, password)
|
||||||
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
|
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
|
||||||
authfile.write(json.dumps(auth_data, indent=4))
|
authfile.write(json.dumps(auth_data, indent=4))
|
||||||
params["container_options"].append(f'--authfile={authfile.name}')
|
params["container_options"].append(f'--authfile={authfile.name}')
|
||||||
else:
|
else:
|
||||||
@@ -853,7 +847,9 @@ class BaseTask(object):
|
|||||||
if settings.AWX_ISOLATION_SHOW_PATHS:
|
if settings.AWX_ISOLATION_SHOW_PATHS:
|
||||||
params['container_volume_mounts'] = []
|
params['container_volume_mounts'] = []
|
||||||
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
|
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
|
||||||
params['container_volume_mounts'].append(f'{this_path}:{this_path}:Z')
|
# Using z allows the dir to mounted by multiple containers
|
||||||
|
# Uppercase Z restricts access (in weird ways) to 1 container at a time
|
||||||
|
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
|
||||||
return params
|
return params
|
||||||
|
|
||||||
def build_private_data(self, instance, private_data_dir):
|
def build_private_data(self, instance, private_data_dir):
|
||||||
@@ -873,11 +869,12 @@ class BaseTask(object):
|
|||||||
|
|
||||||
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
|
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
|
||||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||||
runner_project_folder = os.path.join(path, 'project')
|
# Ansible runner requires that project exists,
|
||||||
if not os.path.exists(runner_project_folder):
|
# and we will write files in the other folders without pre-creating the folder
|
||||||
# Ansible Runner requires that this directory exists.
|
for subfolder in ('project', 'inventory', 'env'):
|
||||||
# Specifically, when using process isolation
|
runner_subfolder = os.path.join(path, subfolder)
|
||||||
os.mkdir(runner_project_folder)
|
if not os.path.exists(runner_subfolder):
|
||||||
|
os.mkdir(runner_subfolder)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
def build_private_data_files(self, instance, private_data_dir):
|
def build_private_data_files(self, instance, private_data_dir):
|
||||||
@@ -921,7 +918,7 @@ class BaseTask(object):
|
|||||||
# Instead, ssh private key file is explicitly passed via an
|
# Instead, ssh private key file is explicitly passed via an
|
||||||
# env variable.
|
# env variable.
|
||||||
else:
|
else:
|
||||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
f = os.fdopen(handle, 'w')
|
f = os.fdopen(handle, 'w')
|
||||||
f.write(data)
|
f.write(data)
|
||||||
f.close()
|
f.close()
|
||||||
@@ -994,6 +991,9 @@ class BaseTask(object):
|
|||||||
|
|
||||||
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
|
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
|
||||||
|
|
||||||
|
if self.instance.execution_environment is None:
|
||||||
|
raise RuntimeError('The project could not sync because there is no Execution Environment.')
|
||||||
|
|
||||||
ee_cred = self.instance.execution_environment.credential
|
ee_cred = self.instance.execution_environment.credential
|
||||||
if ee_cred:
|
if ee_cred:
|
||||||
verify_ssl = ee_cred.get_input('verify_ssl')
|
verify_ssl = ee_cred.get_input('verify_ssl')
|
||||||
@@ -1031,7 +1031,6 @@ class BaseTask(object):
|
|||||||
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||||
json_data = json.dumps(script_data)
|
json_data = json.dumps(script_data)
|
||||||
path = os.path.join(private_data_dir, 'inventory')
|
path = os.path.join(private_data_dir, 'inventory')
|
||||||
os.makedirs(path, mode=0o700)
|
|
||||||
fn = os.path.join(path, 'hosts')
|
fn = os.path.join(path, 'hosts')
|
||||||
with open(fn, 'w') as f:
|
with open(fn, 'w') as f:
|
||||||
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
|
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
|
||||||
@@ -1057,9 +1056,6 @@ class BaseTask(object):
|
|||||||
os.chmod(path, stat.S_IRUSR)
|
os.chmod(path, stat.S_IRUSR)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
def build_cwd(self, instance, private_data_dir):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def build_credentials_list(self, instance):
|
def build_credentials_list(self, instance):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@@ -1154,6 +1150,7 @@ class BaseTask(object):
|
|||||||
event_data.pop('parent_uuid', None)
|
event_data.pop('parent_uuid', None)
|
||||||
if self.parent_workflow_job_id:
|
if self.parent_workflow_job_id:
|
||||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||||
|
event_data['job_created'] = self.job_created
|
||||||
if self.host_map:
|
if self.host_map:
|
||||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||||
if host:
|
if host:
|
||||||
@@ -1187,6 +1184,37 @@ class BaseTask(object):
|
|||||||
if 'event_data' in event_data:
|
if 'event_data' in event_data:
|
||||||
event_data['event_data']['guid'] = self.guid
|
event_data['event_data']['guid'] = self.guid
|
||||||
|
|
||||||
|
# To prevent overwhelming the broadcast queue, skip some websocket messages
|
||||||
|
if self.recent_event_timings:
|
||||||
|
cpu_time = time.time()
|
||||||
|
first_window_time = self.recent_event_timings[0]
|
||||||
|
last_window_time = self.recent_event_timings[-1]
|
||||||
|
|
||||||
|
if event_data.get('event') in MINIMAL_EVENTS:
|
||||||
|
should_emit = True # always send some types like playbook_on_stats
|
||||||
|
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
|
||||||
|
should_emit = False # exclude events with no output
|
||||||
|
else:
|
||||||
|
should_emit = any(
|
||||||
|
[
|
||||||
|
# if 30the most recent websocket message was sent over 1 second ago
|
||||||
|
cpu_time - first_window_time > 1.0,
|
||||||
|
# if the very last websocket message came in over 1/30 seconds ago
|
||||||
|
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
|
||||||
|
# if the queue is not yet full
|
||||||
|
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if should_emit:
|
||||||
|
self.recent_event_timings.append(cpu_time)
|
||||||
|
else:
|
||||||
|
event_data.setdefault('event_data', {})
|
||||||
|
event_data['skip_websocket_message'] = True
|
||||||
|
|
||||||
|
elif self.recent_event_timings.maxlen:
|
||||||
|
self.recent_event_timings.append(time.time())
|
||||||
|
|
||||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||||
self.dispatcher.dispatch(event_data)
|
self.dispatcher.dispatch(event_data)
|
||||||
self.event_ct += 1
|
self.event_ct += 1
|
||||||
@@ -1240,11 +1268,17 @@ class BaseTask(object):
|
|||||||
for k, v in self.safe_env.items():
|
for k, v in self.safe_env.items():
|
||||||
if k in job_env:
|
if k in job_env:
|
||||||
job_env[k] = v
|
job_env[k] = v
|
||||||
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
from awx.main.signals import disable_activity_stream # Circular import
|
||||||
|
|
||||||
|
with disable_activity_stream():
|
||||||
|
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
|
||||||
elif status_data['status'] == 'error':
|
elif status_data['status'] == 'error':
|
||||||
result_traceback = status_data.get('result_traceback', None)
|
result_traceback = status_data.get('result_traceback', None)
|
||||||
if result_traceback:
|
if result_traceback:
|
||||||
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
from awx.main.signals import disable_activity_stream # Circular import
|
||||||
|
|
||||||
|
with disable_activity_stream():
|
||||||
|
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
|
||||||
|
|
||||||
@with_path_cleanup
|
@with_path_cleanup
|
||||||
def run(self, pk, **kwargs):
|
def run(self, pk, **kwargs):
|
||||||
@@ -1279,6 +1313,8 @@ class BaseTask(object):
|
|||||||
if self.instance.spawned_by_workflow:
|
if self.instance.spawned_by_workflow:
|
||||||
self.parent_workflow_job_id = self.instance.get_workflow_job().id
|
self.parent_workflow_job_id = self.instance.get_workflow_job().id
|
||||||
|
|
||||||
|
self.job_created = str(self.instance.created)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.instance.send_notification_templates("running")
|
self.instance.send_notification_templates("running")
|
||||||
private_data_dir = self.build_private_data_dir(self.instance)
|
private_data_dir = self.build_private_data_dir(self.instance)
|
||||||
@@ -1296,10 +1332,6 @@ class BaseTask(object):
|
|||||||
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
|
||||||
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
|
||||||
|
|
||||||
# store a record of the venv used at runtime
|
|
||||||
if hasattr(self.instance, 'custom_virtualenv'):
|
|
||||||
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
|
|
||||||
|
|
||||||
# Fetch "cached" fact data from prior runs and put on the disk
|
# Fetch "cached" fact data from prior runs and put on the disk
|
||||||
# where ansible expects to find it
|
# where ansible expects to find it
|
||||||
if getattr(self.instance, 'use_fact_cache', False):
|
if getattr(self.instance, 'use_fact_cache', False):
|
||||||
@@ -1367,14 +1399,17 @@ class BaseTask(object):
|
|||||||
|
|
||||||
self.instance.log_lifecycle("running_playbook")
|
self.instance.log_lifecycle("running_playbook")
|
||||||
if isinstance(self.instance, SystemJob):
|
if isinstance(self.instance, SystemJob):
|
||||||
cwd = self.build_cwd(self.instance, private_data_dir)
|
|
||||||
res = ansible_runner.interface.run(
|
res = ansible_runner.interface.run(
|
||||||
project_dir=cwd, event_handler=self.event_handler, finished_callback=self.finished_callback, status_handler=self.status_handler, **params
|
project_dir=settings.BASE_DIR,
|
||||||
|
event_handler=self.event_handler,
|
||||||
|
finished_callback=self.finished_callback,
|
||||||
|
status_handler=self.status_handler,
|
||||||
|
**params,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
receptor_job = AWXReceptorJob(self, params)
|
receptor_job = AWXReceptorJob(self, params)
|
||||||
self.unit_id = receptor_job.unit_id
|
|
||||||
res = receptor_job.run()
|
res = receptor_job.run()
|
||||||
|
self.unit_id = receptor_job.unit_id
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
return
|
return
|
||||||
@@ -1528,8 +1563,8 @@ class RunJob(BaseTask):
|
|||||||
# Set environment variables for cloud credentials.
|
# Set environment variables for cloud credentials.
|
||||||
cred_files = private_data_files.get('credentials', {})
|
cred_files = private_data_files.get('credentials', {})
|
||||||
for cloud_cred in job.cloud_credentials:
|
for cloud_cred in job.cloud_credentials:
|
||||||
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack':
|
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
|
||||||
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_files.get(cloud_cred, '')))
|
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
|
||||||
|
|
||||||
for network_cred in job.network_credentials:
|
for network_cred in job.network_credentials:
|
||||||
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
|
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
|
||||||
@@ -1561,8 +1596,7 @@ class RunJob(BaseTask):
|
|||||||
for path in config_values[config_setting].split(':'):
|
for path in config_values[config_setting].split(':'):
|
||||||
if path not in paths:
|
if path not in paths:
|
||||||
paths = [config_values[config_setting]] + paths
|
paths = [config_values[config_setting]] + paths
|
||||||
# FIXME: again, figure out more elegant way for inside container
|
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
|
||||||
paths = [os.path.join('/runner', folder)] + paths
|
|
||||||
env[env_key] = os.pathsep.join(paths)
|
env[env_key] = os.pathsep.join(paths)
|
||||||
|
|
||||||
return env
|
return env
|
||||||
@@ -1636,9 +1670,6 @@ class RunJob(BaseTask):
|
|||||||
|
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def build_cwd(self, job, private_data_dir):
|
|
||||||
return os.path.join(private_data_dir, 'project')
|
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||||
return job.playbook
|
return job.playbook
|
||||||
|
|
||||||
@@ -1709,6 +1740,10 @@ class RunJob(BaseTask):
|
|||||||
error = _('Job could not start because it does not have a valid project.')
|
error = _('Job could not start because it does not have a valid project.')
|
||||||
self.update_model(job.pk, status='failed', job_explanation=error)
|
self.update_model(job.pk, status='failed', job_explanation=error)
|
||||||
raise RuntimeError(error)
|
raise RuntimeError(error)
|
||||||
|
elif job.execution_environment is None:
|
||||||
|
error = _('Job could not start because no Execution Environment could be found.')
|
||||||
|
self.update_model(job.pk, status='error', job_explanation=error)
|
||||||
|
raise RuntimeError(error)
|
||||||
elif job.project.status in ('error', 'failed'):
|
elif job.project.status in ('error', 'failed'):
|
||||||
msg = _('The project revision for this job template is unknown due to a failed update.')
|
msg = _('The project revision for this job template is unknown due to a failed update.')
|
||||||
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
job = self.update_model(job.pk, status='failed', job_explanation=msg)
|
||||||
@@ -1759,9 +1794,11 @@ class RunJob(BaseTask):
|
|||||||
)
|
)
|
||||||
if branch_override:
|
if branch_override:
|
||||||
sync_metafields['scm_branch'] = job.scm_branch
|
sync_metafields['scm_branch'] = job.scm_branch
|
||||||
|
sync_metafields['scm_clean'] = True # to accomidate force pushes
|
||||||
if 'update_' not in sync_metafields['job_tags']:
|
if 'update_' not in sync_metafields['job_tags']:
|
||||||
sync_metafields['scm_revision'] = job_revision
|
sync_metafields['scm_revision'] = job_revision
|
||||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
||||||
|
create_partition(local_project_sync.event_class._meta.db_table, start=local_project_sync.created)
|
||||||
# save the associated job before calling run() so that a
|
# save the associated job before calling run() so that a
|
||||||
# cancel() call on the job can cancel the project update
|
# cancel() call on the job can cancel the project update
|
||||||
job = self.update_model(job.pk, project_update=local_project_sync)
|
job = self.update_model(job.pk, project_update=local_project_sync)
|
||||||
@@ -2012,9 +2049,6 @@ class RunProjectUpdate(BaseTask):
|
|||||||
|
|
||||||
self._write_extra_vars_file(private_data_dir, extra_vars)
|
self._write_extra_vars_file(private_data_dir, extra_vars)
|
||||||
|
|
||||||
def build_cwd(self, project_update, private_data_dir):
|
|
||||||
return os.path.join(private_data_dir, 'project')
|
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
|
||||||
return os.path.join('project_update.yml')
|
return os.path.join('project_update.yml')
|
||||||
|
|
||||||
@@ -2044,17 +2078,24 @@ class RunProjectUpdate(BaseTask):
|
|||||||
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
|
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
|
||||||
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
|
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if settings.IS_K8S:
|
||||||
|
instance_group = InventoryUpdate(inventory_source=inv_src).preferred_instance_groups[0]
|
||||||
|
else:
|
||||||
|
instance_group = project_update.instance_group
|
||||||
|
|
||||||
local_inv_update = inv_src.create_inventory_update(
|
local_inv_update = inv_src.create_inventory_update(
|
||||||
_eager_fields=dict(
|
_eager_fields=dict(
|
||||||
launch_type='scm',
|
launch_type='scm',
|
||||||
status='running',
|
status='running',
|
||||||
instance_group=project_update.instance_group,
|
instance_group=instance_group,
|
||||||
execution_node=project_update.execution_node,
|
execution_node=project_update.execution_node,
|
||||||
source_project_update=project_update,
|
source_project_update=project_update,
|
||||||
celery_task_id=project_update.celery_task_id,
|
celery_task_id=project_update.celery_task_id,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
|
create_partition(local_inv_update.event_class._meta.db_table, start=local_inv_update.created)
|
||||||
inv_update_class().run(local_inv_update.id)
|
inv_update_class().run(local_inv_update.id)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
|
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
|
||||||
@@ -2135,8 +2176,6 @@ class RunProjectUpdate(BaseTask):
|
|||||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||||
os.mkdir(settings.PROJECTS_ROOT)
|
os.mkdir(settings.PROJECTS_ROOT)
|
||||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||||
if not os.path.exists(project_path):
|
|
||||||
os.makedirs(project_path) # used as container mount
|
|
||||||
|
|
||||||
self.acquire_lock(instance)
|
self.acquire_lock(instance)
|
||||||
|
|
||||||
@@ -2149,6 +2188,9 @@ class RunProjectUpdate(BaseTask):
|
|||||||
else:
|
else:
|
||||||
self.original_branch = git_repo.active_branch
|
self.original_branch = git_repo.active_branch
|
||||||
|
|
||||||
|
if not os.path.exists(project_path):
|
||||||
|
os.makedirs(project_path) # used as container mount
|
||||||
|
|
||||||
stage_path = os.path.join(instance.get_cache_path(), 'stage')
|
stage_path = os.path.join(instance.get_cache_path(), 'stage')
|
||||||
if os.path.exists(stage_path):
|
if os.path.exists(stage_path):
|
||||||
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
|
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
|
||||||
@@ -2384,9 +2426,14 @@ class RunInventoryUpdate(BaseTask):
|
|||||||
for path in config_values[config_setting].split(':'):
|
for path in config_values[config_setting].split(':'):
|
||||||
if path not in paths:
|
if path not in paths:
|
||||||
paths = [config_values[config_setting]] + paths
|
paths = [config_values[config_setting]] + paths
|
||||||
# FIXME: containers
|
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
|
||||||
paths = [os.path.join('/runner', folder)] + paths
|
|
||||||
env[env_key] = os.pathsep.join(paths)
|
env[env_key] = os.pathsep.join(paths)
|
||||||
|
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
|
||||||
|
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
|
||||||
|
else:
|
||||||
|
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
|
||||||
|
paths.append('/usr/share/automation-controller/collections')
|
||||||
|
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
|
||||||
|
|
||||||
return env
|
return env
|
||||||
|
|
||||||
@@ -2414,14 +2461,14 @@ class RunInventoryUpdate(BaseTask):
|
|||||||
|
|
||||||
# Add arguments for the source inventory file/script/thing
|
# Add arguments for the source inventory file/script/thing
|
||||||
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
||||||
container_location = os.path.join('/runner', rel_path) # TODO: make container paths elegant
|
container_location = os.path.join(CONTAINER_ROOT, rel_path)
|
||||||
source_location = os.path.join(private_data_dir, rel_path)
|
source_location = os.path.join(private_data_dir, rel_path)
|
||||||
|
|
||||||
args.append('-i')
|
args.append('-i')
|
||||||
args.append(container_location)
|
args.append(container_location)
|
||||||
|
|
||||||
args.append('--output')
|
args.append('--output')
|
||||||
args.append(os.path.join('/runner', 'artifacts', str(inventory_update.id), 'output.json'))
|
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
|
||||||
|
|
||||||
if os.path.isdir(source_location):
|
if os.path.isdir(source_location):
|
||||||
playbook_dir = container_location
|
playbook_dir = container_location
|
||||||
@@ -2453,29 +2500,17 @@ class RunInventoryUpdate(BaseTask):
|
|||||||
if injector is not None:
|
if injector is not None:
|
||||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||||
# must be a statically named file
|
# must be a statically named file
|
||||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
|
||||||
with open(inventory_path, 'w') as f:
|
with open(inventory_path, 'w') as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||||
|
|
||||||
rel_path = injector.filename
|
rel_path = os.path.join('inventory', injector.filename)
|
||||||
elif src == 'scm':
|
elif src == 'scm':
|
||||||
rel_path = os.path.join('project', inventory_update.source_path)
|
rel_path = os.path.join('project', inventory_update.source_path)
|
||||||
|
|
||||||
return rel_path
|
return rel_path
|
||||||
|
|
||||||
def build_cwd(self, inventory_update, private_data_dir):
|
|
||||||
"""
|
|
||||||
There is one case where the inventory "source" is in a different
|
|
||||||
location from the private data:
|
|
||||||
- SCM, where source needs to live in the project folder
|
|
||||||
"""
|
|
||||||
src = inventory_update.source
|
|
||||||
container_dir = '/runner' # TODO: make container paths elegant
|
|
||||||
if src == 'scm' and inventory_update.source_project_update:
|
|
||||||
return os.path.join(container_dir, 'project')
|
|
||||||
return container_dir
|
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -2751,9 +2786,6 @@ class RunAdHocCommand(BaseTask):
|
|||||||
module_args = sanitize_jinja(module_args)
|
module_args = sanitize_jinja(module_args)
|
||||||
return module_args
|
return module_args
|
||||||
|
|
||||||
def build_cwd(self, ad_hoc_command, private_data_dir):
|
|
||||||
return private_data_dir
|
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -2818,9 +2850,6 @@ class RunSystemJob(BaseTask):
|
|||||||
env.update(base_env)
|
env.update(base_env)
|
||||||
return env
|
return env
|
||||||
|
|
||||||
def build_cwd(self, instance, private_data_dir):
|
|
||||||
return settings.BASE_DIR
|
|
||||||
|
|
||||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -2886,8 +2915,18 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, u
|
|||||||
update_inventory_computed_fields.delay(new_obj.id)
|
update_inventory_computed_fields.delay(new_obj.id)
|
||||||
|
|
||||||
|
|
||||||
|
class TransmitterThread(threading.Thread):
|
||||||
|
def run(self):
|
||||||
|
self.exc = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
super().run()
|
||||||
|
except Exception:
|
||||||
|
self.exc = sys.exc_info()
|
||||||
|
|
||||||
|
|
||||||
class AWXReceptorJob:
|
class AWXReceptorJob:
|
||||||
def __init__(self, task=None, runner_params=None):
|
def __init__(self, task, runner_params=None):
|
||||||
self.task = task
|
self.task = task
|
||||||
self.runner_params = runner_params
|
self.runner_params = runner_params
|
||||||
self.unit_id = None
|
self.unit_id = None
|
||||||
@@ -2898,7 +2937,7 @@ class AWXReceptorJob:
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# We establish a connection to the Receptor socket
|
# We establish a connection to the Receptor socket
|
||||||
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
|
receptor_ctl = get_receptor_ctl()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return self._run_internal(receptor_ctl)
|
return self._run_internal(receptor_ctl)
|
||||||
@@ -2913,16 +2952,23 @@ class AWXReceptorJob:
|
|||||||
# reading.
|
# reading.
|
||||||
sockin, sockout = socket.socketpair()
|
sockin, sockout = socket.socketpair()
|
||||||
|
|
||||||
threading.Thread(target=self.transmit, args=[sockin]).start()
|
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
|
||||||
|
transmitter_thread.start()
|
||||||
|
|
||||||
# submit our work, passing
|
# submit our work, passing
|
||||||
# in the right side of our socketpair for reading.
|
# in the right side of our socketpair for reading.
|
||||||
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
|
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
|
||||||
self.unit_id = result['unitid']
|
self.unit_id = result['unitid']
|
||||||
|
self.task.update_model(self.task.instance.pk, work_unit_id=result['unitid'])
|
||||||
|
|
||||||
sockin.close()
|
sockin.close()
|
||||||
sockout.close()
|
sockout.close()
|
||||||
|
|
||||||
|
if transmitter_thread.exc:
|
||||||
|
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
|
||||||
|
|
||||||
|
transmitter_thread.join()
|
||||||
|
|
||||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
|
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
|
||||||
# Both "processor" and "cancel_watcher" are spawned in separate threads.
|
# Both "processor" and "cancel_watcher" are spawned in separate threads.
|
||||||
# We wait for the first one to return. If cancel_watcher returns first,
|
# We wait for the first one to return. If cancel_watcher returns first,
|
||||||
@@ -2953,27 +2999,30 @@ class AWXReceptorJob:
|
|||||||
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
|
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
|
||||||
self.task.update_model(self.task.instance.pk, status='pending')
|
self.task.update_model(self.task.instance.pk, status='pending')
|
||||||
return
|
return
|
||||||
|
|
||||||
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
# If ansible-runner ran, but an error occured at runtime, the traceback information
|
||||||
# is saved via the status_handler passed in to the processor.
|
# is saved via the status_handler passed in to the processor.
|
||||||
if state_name == 'Succeeded':
|
if state_name == 'Succeeded':
|
||||||
return res
|
return res
|
||||||
|
|
||||||
raise RuntimeError(detail)
|
if self.task.instance.result_traceback is None:
|
||||||
|
raise RuntimeError(detail)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
# Spawned in a thread so Receptor can start reading before we finish writing, we
|
# Spawned in a thread so Receptor can start reading before we finish writing, we
|
||||||
# write our payload to the left side of our socketpair.
|
# write our payload to the left side of our socketpair.
|
||||||
|
@cleanup_new_process
|
||||||
def transmit(self, _socket):
|
def transmit(self, _socket):
|
||||||
if not settings.IS_K8S and self.work_type == 'local':
|
if not settings.IS_K8S and self.work_type == 'local':
|
||||||
self.runner_params['only_transmit_kwargs'] = True
|
self.runner_params['only_transmit_kwargs'] = True
|
||||||
|
|
||||||
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
|
try:
|
||||||
|
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
|
||||||
# Socket must be shutdown here, or the reader will hang forever.
|
finally:
|
||||||
_socket.shutdown(socket.SHUT_WR)
|
# Socket must be shutdown here, or the reader will hang forever.
|
||||||
|
_socket.shutdown(socket.SHUT_WR)
|
||||||
|
|
||||||
|
@cleanup_new_process
|
||||||
def processor(self, resultfile):
|
def processor(self, resultfile):
|
||||||
return ansible_runner.interface.run(
|
return ansible_runner.interface.run(
|
||||||
streamer='process',
|
streamer='process',
|
||||||
@@ -2992,6 +3041,7 @@ class AWXReceptorJob:
|
|||||||
|
|
||||||
receptor_params = {
|
receptor_params = {
|
||||||
"secret_kube_pod": spec_yaml,
|
"secret_kube_pod": spec_yaml,
|
||||||
|
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.credential:
|
if self.credential:
|
||||||
@@ -3015,6 +3065,7 @@ class AWXReceptorJob:
|
|||||||
|
|
||||||
return work_type
|
return work_type
|
||||||
|
|
||||||
|
@cleanup_new_process
|
||||||
def cancel_watcher(self, processor_future):
|
def cancel_watcher(self, processor_future):
|
||||||
while True:
|
while True:
|
||||||
if processor_future.done():
|
if processor_future.done():
|
||||||
@@ -3024,18 +3075,11 @@ class AWXReceptorJob:
|
|||||||
result = namedtuple('result', ['status', 'rc'])
|
result = namedtuple('result', ['status', 'rc'])
|
||||||
return result('canceled', 1)
|
return result('canceled', 1)
|
||||||
|
|
||||||
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
|
|
||||||
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
|
|
||||||
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
|
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pod_definition(self):
|
def pod_definition(self):
|
||||||
if self.task:
|
ee = self.task.instance.execution_environment
|
||||||
ee = self.task.instance.resolve_execution_environment()
|
|
||||||
else:
|
|
||||||
ee = get_default_execution_environment()
|
|
||||||
|
|
||||||
default_pod_spec = get_default_pod_spec()
|
default_pod_spec = get_default_pod_spec()
|
||||||
|
|
||||||
@@ -3047,6 +3091,24 @@ class AWXReceptorJob:
|
|||||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||||
|
|
||||||
|
# Enforce EE Pull Policy
|
||||||
|
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
|
||||||
|
if self.task and self.task.instance.execution_environment:
|
||||||
|
if self.task.instance.execution_environment.pull:
|
||||||
|
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
|
||||||
|
|
||||||
|
if self.task and self.task.instance.is_container_group_task:
|
||||||
|
# If EE credential is passed, create an imagePullSecret
|
||||||
|
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
|
||||||
|
# Create pull secret in k8s cluster based on ee cred
|
||||||
|
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
|
||||||
|
|
||||||
|
pm = PodManager(self.task.instance)
|
||||||
|
secret_name = pm.create_secret(job=self.task.instance)
|
||||||
|
|
||||||
|
# Inject secret name into podspec
|
||||||
|
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
|
||||||
|
|
||||||
if self.task:
|
if self.task:
|
||||||
pod_spec['metadata'] = deepmerge(
|
pod_spec['metadata'] = deepmerge(
|
||||||
pod_spec.get('metadata', {}),
|
pod_spec.get('metadata', {}),
|
||||||
@@ -3057,7 +3119,7 @@ class AWXReceptorJob:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def pod_name(self):
|
def pod_name(self):
|
||||||
return f"awx-job-{self.task.instance.id}"
|
return f"automation-job-{self.task.instance.id}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def credential(self):
|
def credential(self):
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import pytest
|
|||||||
from unittest import mock
|
from unittest import mock
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from awx.main.models import Credential
|
from awx.main.models import Credential, UnifiedJob
|
||||||
from awx.main.tests.factories import (
|
from awx.main.tests.factories import (
|
||||||
create_organization,
|
create_organization,
|
||||||
create_job_template,
|
create_job_template,
|
||||||
@@ -81,7 +81,7 @@ def instance_group_factory():
|
|||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def default_instance_group(instance_factory, instance_group_factory):
|
def default_instance_group(instance_factory, instance_group_factory):
|
||||||
return create_instance_group("tower", instances=[create_instance("hostA")])
|
return create_instance_group("default", instances=[create_instance("hostA")])
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -149,3 +149,29 @@ def mock_external_credential_input_sources():
|
|||||||
# test it explicitly.
|
# test it explicitly.
|
||||||
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
|
with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture:
|
||||||
yield _fixture
|
yield _fixture
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope='session', autouse=True)
|
||||||
|
def mock_has_unpartitioned_events():
|
||||||
|
# has_unpartitioned_events determines if there are any events still
|
||||||
|
# left in the old, unpartitioned job events table. In order to work,
|
||||||
|
# this method looks up when the partition migration occurred. When
|
||||||
|
# Django's unit tests run, however, there will be no record of the migration.
|
||||||
|
# We mock this out to circumvent the migration query.
|
||||||
|
with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture:
|
||||||
|
yield _fixture
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope='session', autouse=True)
|
||||||
|
def mock_get_event_queryset_no_job_created():
|
||||||
|
"""
|
||||||
|
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the
|
||||||
|
job_created field. That field does not actually exist in a non-partition scenario.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def event_qs(self):
|
||||||
|
kwargs = {self.event_parent_key: self.id}
|
||||||
|
return self.event_class.objects.filter(**kwargs)
|
||||||
|
|
||||||
|
with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture:
|
||||||
|
yield _fixture
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -1,14 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
|
|
||||||
with open(os.path.join(dir_path, 'insights_hosts.json')) as data_file:
|
|
||||||
TEST_INSIGHTS_HOSTS = json.load(data_file)
|
|
||||||
|
|
||||||
with open(os.path.join(dir_path, 'insights.json')) as data_file:
|
|
||||||
TEST_INSIGHTS_PLANS = json.load(data_file)
|
|
||||||
|
|
||||||
with open(os.path.join(dir_path, 'insights_remediations.json')) as data_file:
|
|
||||||
TEST_INSIGHTS_REMEDIATIONS = json.load(data_file)['data']
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"total": 1,
|
|
||||||
"count": 1,
|
|
||||||
"page": 1,
|
|
||||||
"per_page": 50,
|
|
||||||
"results": [
|
|
||||||
{
|
|
||||||
"id": "11111111-1111-1111-1111-111111111111",
|
|
||||||
"insights_id": "22222222-2222-2222-2222-222222222222",
|
|
||||||
"updated": "2019-03-19T21:59:09.213151-04:00"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
{
|
|
||||||
"data": [
|
|
||||||
{
|
|
||||||
"id": "9197ba55-0abc-4028-9bbe-269e530f8bd5",
|
|
||||||
"name": "Fix Critical CVEs",
|
|
||||||
"created_by": {
|
|
||||||
"username": "jharting@redhat.com",
|
|
||||||
"first_name": "Jozef",
|
|
||||||
"last_name": "Hartinger"
|
|
||||||
},
|
|
||||||
"created_at": "2018-12-05T08:19:36.641Z",
|
|
||||||
"updated_by": {
|
|
||||||
"username": "jharting@redhat.com",
|
|
||||||
"first_name": "Jozef",
|
|
||||||
"last_name": "Hartinger"
|
|
||||||
},
|
|
||||||
"updated_at": "2018-12-05T08:19:36.641Z",
|
|
||||||
"issue_count": 0,
|
|
||||||
"system_count": 0,
|
|
||||||
"needs_reboot": true
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"meta": {
|
|
||||||
"count": 0,
|
|
||||||
"total": 0
|
|
||||||
},
|
|
||||||
"links": {
|
|
||||||
"first": null,
|
|
||||||
"last": null,
|
|
||||||
"next": null,
|
|
||||||
"previous": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user