mirror of
https://github.com/ansible/awx.git
synced 2026-02-08 13:04:43 -03:30
Compare commits
1106 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
827adbce76 | ||
|
|
849a64f20a | ||
|
|
3bbd03732b | ||
|
|
32627ce51a | ||
|
|
4a8f1d41fa | ||
|
|
508c9b3102 | ||
|
|
f8be1f4110 | ||
|
|
d727e69a00 | ||
|
|
04dd1352c9 | ||
|
|
ea54815e6b | ||
|
|
78db965797 | ||
|
|
3326979806 | ||
|
|
230949c43c | ||
|
|
a862a00d24 | ||
|
|
2e8f9185ab | ||
|
|
6d6322ae4d | ||
|
|
914ea54925 | ||
|
|
b9b62e3771 | ||
|
|
e03911d378 | ||
|
|
61287f6b36 | ||
|
|
f6bfdef34d | ||
|
|
7494ba7b9c | ||
|
|
5f62426684 | ||
|
|
6914213aa0 | ||
|
|
83721ff9a8 | ||
|
|
4998c7bf21 | ||
|
|
155a1d9a32 | ||
|
|
6f582b5688 | ||
|
|
579648a017 | ||
|
|
c4ed9a14ef | ||
|
|
21872e7101 | ||
|
|
f2e9a8d1b2 | ||
|
|
301d6ff616 | ||
|
|
d24271849d | ||
|
|
a50b03da17 | ||
|
|
27b5b534a1 | ||
|
|
6bc97158fe | ||
|
|
9ce2a9240a | ||
|
|
6b3befcb94 | ||
|
|
c8044b4755 | ||
|
|
3045511401 | ||
|
|
24f334085e | ||
|
|
90d35f07f3 | ||
|
|
e334f33d13 | ||
|
|
464db28be5 | ||
|
|
61a0d1f77b | ||
|
|
77e99ad355 | ||
|
|
9f4afe6972 | ||
|
|
b99a04dd8d | ||
|
|
357e22eb51 | ||
|
|
9dbf75f2a9 | ||
|
|
eab74cac07 | ||
|
|
979f549d90 | ||
|
|
ca82f48c18 | ||
|
|
5a45a62cb3 | ||
|
|
090349a49b | ||
|
|
c38d13c5ab | ||
|
|
f490a940cf | ||
|
|
42c24419d4 | ||
|
|
e17c93ecd7 | ||
|
|
67d48a87f8 | ||
|
|
b755fa6777 | ||
|
|
ee4dcd2055 | ||
|
|
0f7a4b384b | ||
|
|
02415db881 | ||
|
|
703345e9d8 | ||
|
|
d102b06474 | ||
|
|
55c18fa76c | ||
|
|
d37039a18a | ||
|
|
6335004c94 | ||
|
|
177867de5a | ||
|
|
08bd445caf | ||
|
|
b5776c8eb3 | ||
|
|
8f1db173c1 | ||
|
|
62e93d5c57 | ||
|
|
abfeb735f0 | ||
|
|
68b0b40e91 | ||
|
|
910d926ac3 | ||
|
|
c84ab9f1dc | ||
|
|
65cafa37c7 | ||
|
|
551fd088f5 | ||
|
|
a72e885274 | ||
|
|
bd7c048113 | ||
|
|
91135f638f | ||
|
|
cbc02dd607 | ||
|
|
de09deff66 | ||
|
|
5272d088ed | ||
|
|
22a593f30f | ||
|
|
b56812018a | ||
|
|
ab755134b3 | ||
|
|
ebb0f31b0b | ||
|
|
51ef57188c | ||
|
|
653850fa6d | ||
|
|
8ba4388014 | ||
|
|
f3e8623a21 | ||
|
|
077461a3ef | ||
|
|
795c989a49 | ||
|
|
5e595caf5e | ||
|
|
d941f11ccd | ||
|
|
c4e50cbf7d | ||
|
|
6f3527ed15 | ||
|
|
fe2ebeb872 | ||
|
|
ad7498c3fc | ||
|
|
cb7257f9e6 | ||
|
|
e3ea4e2398 | ||
|
|
e4e2d48f53 | ||
|
|
4b497b8cdc | ||
|
|
31fabad3e5 | ||
|
|
1e48d773ae | ||
|
|
4529429e99 | ||
|
|
ec4a471e7a | ||
|
|
77915544d2 | ||
|
|
5ba90b629e | ||
|
|
e9021bd173 | ||
|
|
49356236ac | ||
|
|
c9015fc0c8 | ||
|
|
4ea1101477 | ||
|
|
27948aa4e1 | ||
|
|
5263d5aced | ||
|
|
8832f667e4 | ||
|
|
f4e56b219d | ||
|
|
abdcdbca76 | ||
|
|
362016c91b | ||
|
|
f1634f092d | ||
|
|
8cd4e9b488 | ||
|
|
1fce77054a | ||
|
|
53c8c80f7f | ||
|
|
3bf7d41bf3 | ||
|
|
34259e24c0 | ||
|
|
1daeec356f | ||
|
|
5573e1c7ce | ||
|
|
1cba98e4a7 | ||
|
|
56d31fec77 | ||
|
|
564012b2c8 | ||
|
|
cfe0607b6a | ||
|
|
7f24d0c0c2 | ||
|
|
3f4e7465a7 | ||
|
|
9c32cb30d4 | ||
|
|
782d465c78 | ||
|
|
1412bf6232 | ||
|
|
e92acce4eb | ||
|
|
ac68e8c4fe | ||
|
|
97a4bb39b6 | ||
|
|
9e00337bc1 | ||
|
|
72672d6bbe | ||
|
|
51f52f6332 | ||
|
|
11b2b17d08 | ||
|
|
e17ff3e03a | ||
|
|
b998d93bfb | ||
|
|
b8ec94a0ae | ||
|
|
a02b332b10 | ||
|
|
56919c5d32 | ||
|
|
47f5c17b56 | ||
|
|
0fb800f5d0 | ||
|
|
d6e94f9c6f | ||
|
|
d5bdfa908a | ||
|
|
0a5acb6520 | ||
|
|
debc339f75 | ||
|
|
06f065766f | ||
|
|
16e672dd38 | ||
|
|
3d7420959e | ||
|
|
4bec46a910 | ||
|
|
0e70543d54 | ||
|
|
88fb30e0da | ||
|
|
558814ef3b | ||
|
|
ace5a0a2b3 | ||
|
|
8a917a5b70 | ||
|
|
1bd74a96d6 | ||
|
|
74ebb0ae59 | ||
|
|
fd56b7c590 | ||
|
|
7f02e64a0d | ||
|
|
d40a5dec8f | ||
|
|
5e481341bc | ||
|
|
0a1070834d | ||
|
|
c7de3b0528 | ||
|
|
a725778b17 | ||
|
|
3b520a8ee8 | ||
|
|
06b3e54fb1 | ||
|
|
7f2e1d46bc | ||
|
|
12158bdcba | ||
|
|
f858eda6b1 | ||
|
|
c5297b0b86 | ||
|
|
e0633c9122 | ||
|
|
3a208a0be2 | ||
|
|
cfdfd96793 | ||
|
|
db7f0f9421 | ||
|
|
f1ee963bd0 | ||
|
|
7c3cbe6e58 | ||
|
|
87de0cf0b3 | ||
|
|
18f5dd6e04 | ||
|
|
89163f2915 | ||
|
|
59c9de2761 | ||
|
|
b58c71bb74 | ||
|
|
1caa2e0287 | ||
|
|
770b457430 | ||
|
|
d58df0f34a | ||
|
|
3f5e2a3cd3 | ||
|
|
2b59af3808 | ||
|
|
9e5fe7f5c6 | ||
|
|
093d204d19 | ||
|
|
e25bd931a1 | ||
|
|
8350bb3371 | ||
|
|
d6594ab602 | ||
|
|
b6b9802f9e | ||
|
|
0da94ada2b | ||
|
|
3b9e67ed1b | ||
|
|
14320bc8e6 | ||
|
|
3c5c9c6fde | ||
|
|
f5193e5ea5 | ||
|
|
03b73027e8 | ||
|
|
c06b6306ab | ||
|
|
45ce6d794e | ||
|
|
e94bb44082 | ||
|
|
be58906aed | ||
|
|
403e9bbfb5 | ||
|
|
ea29f4b91f | ||
|
|
3f2d757f4e | ||
|
|
feac93fd24 | ||
|
|
088373963b | ||
|
|
5818dcc980 | ||
|
|
dc6c353ecd | ||
|
|
50b56aa8cb | ||
|
|
3fec69799c | ||
|
|
2a2c34f567 | ||
|
|
558e92806b | ||
|
|
355fb125cb | ||
|
|
c8eeacacca | ||
|
|
d0a3c5a42b | ||
|
|
64139f960f | ||
|
|
eda494be63 | ||
|
|
4a0c371014 | ||
|
|
6b43da35e1 | ||
|
|
afa3b500d3 | ||
|
|
c3efb13020 | ||
|
|
eb28800082 | ||
|
|
3219b9b4ac | ||
|
|
e9a48cceba | ||
|
|
9a7fa1f3a6 | ||
|
|
a03d74d828 | ||
|
|
2274b4b4e4 | ||
|
|
c054d7c3d7 | ||
|
|
26d5d7afdc | ||
|
|
6b51b41897 | ||
|
|
ca3cf244fd | ||
|
|
88d7b24f55 | ||
|
|
ecdb353f6f | ||
|
|
d9932eaf6a | ||
|
|
cbc52fa19f | ||
|
|
cc77b31d4e | ||
|
|
b875c03f4a | ||
|
|
e87f804c92 | ||
|
|
f86cbf33aa | ||
|
|
6db6fe90fd | ||
|
|
bcbe9691e5 | ||
|
|
c850148ee3 | ||
|
|
b260a88810 | ||
|
|
a0937a8b7c | ||
|
|
c4c0cace88 | ||
|
|
a55bcafa3a | ||
|
|
d0c510563f | ||
|
|
d23fb17cd9 | ||
|
|
8668f2ad46 | ||
|
|
e210ee4077 | ||
|
|
47ff56c411 | ||
|
|
1e780aad38 | ||
|
|
80234c5600 | ||
|
|
c8510f7d75 | ||
|
|
6431050b36 | ||
|
|
5c360aeff3 | ||
|
|
44e043d75f | ||
|
|
ef223b0afb | ||
|
|
e9e8283f16 | ||
|
|
b73e8d8a56 | ||
|
|
6db6c6c5ba | ||
|
|
2b5ff9a6f9 | ||
|
|
97c169780d | ||
|
|
88c46b4573 | ||
|
|
53d27c933e | ||
|
|
c340fff643 | ||
|
|
61600a8252 | ||
|
|
521cda878e | ||
|
|
9ecd6ad0fb | ||
|
|
349af22d0f | ||
|
|
ad316fc2a3 | ||
|
|
e4abf634f0 | ||
|
|
bb144acee3 | ||
|
|
16d5456d2b | ||
|
|
abe8153358 | ||
|
|
86aabb297e | ||
|
|
65a7613c26 | ||
|
|
4d1790290e | ||
|
|
dca335d17c | ||
|
|
da48cffa12 | ||
|
|
a2eeb6e7b5 | ||
|
|
f8f6fff21e | ||
|
|
3e616f2770 | ||
|
|
7c6bef15ba | ||
|
|
27b48fe55b | ||
|
|
6b20ffbfdd | ||
|
|
43abeec3d7 | ||
|
|
bd8886a867 | ||
|
|
bd68317cfd | ||
|
|
f8818730d4 | ||
|
|
b41c9e5ba3 | ||
|
|
401be0c265 | ||
|
|
35be571eed | ||
|
|
8e7faa853e | ||
|
|
1ee46ab98a | ||
|
|
ac9f526cf0 | ||
|
|
7120e92078 | ||
|
|
7e6def8acc | ||
|
|
aa4842aea5 | ||
|
|
7547793792 | ||
|
|
7d0b207571 | ||
|
|
daa9282790 | ||
|
|
bdd0b9e4d9 | ||
|
|
1876849d89 | ||
|
|
e4dd2728ef | ||
|
|
88571f6dcb | ||
|
|
a1cdc07944 | ||
|
|
eea80c45d1 | ||
|
|
beb8021580 | ||
|
|
07565b5efc | ||
|
|
3755151cc5 | ||
|
|
36078651d3 | ||
|
|
8b768bcb01 | ||
|
|
16d9e1cfc7 | ||
|
|
2584f7359e | ||
|
|
286cec8bc3 | ||
|
|
64b1aa43b1 | ||
|
|
0fd9153cf7 | ||
|
|
c95624e27f | ||
|
|
5cf33f57a4 | ||
|
|
5c331934e2 | ||
|
|
7ac21b4922 | ||
|
|
04fe18d840 | ||
|
|
b9829e2bde | ||
|
|
a99d4a8419 | ||
|
|
676b29346c | ||
|
|
6c7ab97159 | ||
|
|
8077c910b0 | ||
|
|
feef39c5cc | ||
|
|
e80843846e | ||
|
|
208dbc1f92 | ||
|
|
2cb5046ec6 | ||
|
|
ecc68c1003 | ||
|
|
356b674a49 | ||
|
|
185c581007 | ||
|
|
789397d56c | ||
|
|
e816f73ecf | ||
|
|
bbe5789e70 | ||
|
|
ad1a7fc9c9 | ||
|
|
dd5f25186b | ||
|
|
ecb7147614 | ||
|
|
87396f968c | ||
|
|
87bfb82907 | ||
|
|
65e988b44c | ||
|
|
4381a7d75c | ||
|
|
3a6528dc0d | ||
|
|
c113c2db52 | ||
|
|
b7d2d6ad89 | ||
|
|
01d77d5407 | ||
|
|
87c6ed52cd | ||
|
|
0d5a46a6e1 | ||
|
|
f3ab3de1be | ||
|
|
871695ea5e | ||
|
|
0f5b694514 | ||
|
|
9567dc9e17 | ||
|
|
24d35e9593 | ||
|
|
b6be8ca445 | ||
|
|
beb10feb0c | ||
|
|
6ec9c45341 | ||
|
|
9a394a5726 | ||
|
|
25f4aa19b7 | ||
|
|
7ff5bacce5 | ||
|
|
3e820a88e1 | ||
|
|
c1ab118481 | ||
|
|
3952be9429 | ||
|
|
35f414ccf2 | ||
|
|
304bf6805b | ||
|
|
e733506477 | ||
|
|
f4366be419 | ||
|
|
64018a71bb | ||
|
|
0c9c349fb9 | ||
|
|
6dd4d04bf5 | ||
|
|
21b4455ee6 | ||
|
|
314e345825 | ||
|
|
90e047821d | ||
|
|
01fe89e43c | ||
|
|
1f2edd81ab | ||
|
|
862de0b6f3 | ||
|
|
d75c2d9b44 | ||
|
|
1b8ff1f846 | ||
|
|
a93b1aa339 | ||
|
|
4c6191041c | ||
|
|
edb3f6df55 | ||
|
|
7a3ece7fd2 | ||
|
|
73e867b6f5 | ||
|
|
acc34c1393 | ||
|
|
3d5a002676 | ||
|
|
bb6d9af90b | ||
|
|
da94b2dc9e | ||
|
|
a1c2db3db5 | ||
|
|
d849e81891 | ||
|
|
a5afac62ca | ||
|
|
66c98ca9bc | ||
|
|
a00e7c7050 | ||
|
|
cd1ff6b16a | ||
|
|
b560a21ca3 | ||
|
|
63fa367e9d | ||
|
|
d33daeee91 | ||
|
|
9d449c419b | ||
|
|
e34e88549f | ||
|
|
c073e39c69 | ||
|
|
4fcd2c594c | ||
|
|
457dc956f1 | ||
|
|
3e5428131c | ||
|
|
d08f59272e | ||
|
|
8b95d7be94 | ||
|
|
6c22ddf608 | ||
|
|
8227054d11 | ||
|
|
73b33e1435 | ||
|
|
deaf2028ad | ||
|
|
d812972d3c | ||
|
|
54b553b78c | ||
|
|
3e08bbeb93 | ||
|
|
22524589e3 | ||
|
|
85ec73bf4b | ||
|
|
ccd36b9560 | ||
|
|
61755e2838 | ||
|
|
be56a1d3df | ||
|
|
46c86ea6c0 | ||
|
|
401c7c3da2 | ||
|
|
f1120d39db | ||
|
|
80617df22d | ||
|
|
b5e5fea117 | ||
|
|
e3ec63e8e5 | ||
|
|
e232cd392c | ||
|
|
8301254f57 | ||
|
|
9cdfc19215 | ||
|
|
c50705a2dc | ||
|
|
9f948e90d9 | ||
|
|
e7f36eb2ea | ||
|
|
c261d6acf0 | ||
|
|
32ef805e23 | ||
|
|
d009ce49f5 | ||
|
|
d14bf00f6c | ||
|
|
5dc4e30820 | ||
|
|
afbeacf499 | ||
|
|
fc80cf5241 | ||
|
|
4a6db13daa | ||
|
|
d5372dae36 | ||
|
|
0b702ede4e | ||
|
|
3c7f596288 | ||
|
|
6207dad226 | ||
|
|
2b48e43946 | ||
|
|
4709f57f46 | ||
|
|
b055aad641 | ||
|
|
acfa6d056f | ||
|
|
51a069fcc4 | ||
|
|
fc89b627d7 | ||
|
|
e90ee5113d | ||
|
|
4ccca08cda | ||
|
|
b757fdebf8 | ||
|
|
3ee6f1f3c7 | ||
|
|
d4ba32d0c5 | ||
|
|
d97f516c3a | ||
|
|
52a8935b20 | ||
|
|
07752f48f6 | ||
|
|
10b5a10728 | ||
|
|
e11ff69f3e | ||
|
|
d3fa34c665 | ||
|
|
48a615231b | ||
|
|
b09ac71647 | ||
|
|
d5dd3c521f | ||
|
|
db43341f96 | ||
|
|
3234f246db | ||
|
|
6d6d99bcf8 | ||
|
|
a6cd32522f | ||
|
|
1fe28463da | ||
|
|
51a6194b8d | ||
|
|
e75f7b0beb | ||
|
|
179c62e2f3 | ||
|
|
98f5525d28 | ||
|
|
60a137225a | ||
|
|
c1bfcd73fb | ||
|
|
322b4ee1e4 | ||
|
|
98dc6179f5 | ||
|
|
07807c2dec | ||
|
|
16ecf17c69 | ||
|
|
1f0acef844 | ||
|
|
5a164cae15 | ||
|
|
b57405b696 | ||
|
|
5fdf6cf60f | ||
|
|
c1c382a941 | ||
|
|
a997b40852 | ||
|
|
99cd2e601d | ||
|
|
fc402aff29 | ||
|
|
2ec035f918 | ||
|
|
fe046b47b5 | ||
|
|
3e0e4b6c8f | ||
|
|
7fe57268f6 | ||
|
|
9eecb24c32 | ||
|
|
a8a45fca84 | ||
|
|
33df6f8ad2 | ||
|
|
44223003aa | ||
|
|
a60e7a7855 | ||
|
|
e971ec993b | ||
|
|
989ef3538e | ||
|
|
4db3e823bf | ||
|
|
c374316648 | ||
|
|
5dba49a7bc | ||
|
|
7b880c6552 | ||
|
|
5574cf0595 | ||
|
|
e706e0a2e2 | ||
|
|
5364e78397 | ||
|
|
f93ca814ac | ||
|
|
3bf1ad3028 | ||
|
|
e096ad18cb | ||
|
|
5ca73f1101 | ||
|
|
7e8fb29658 | ||
|
|
258689a9ed | ||
|
|
e80e3f7410 | ||
|
|
154b9c36ac | ||
|
|
deced917cf | ||
|
|
88b7256e96 | ||
|
|
033848a605 | ||
|
|
0e663921d6 | ||
|
|
0582079606 | ||
|
|
6536f5a453 | ||
|
|
c5079607aa | ||
|
|
26dcb000f6 | ||
|
|
8ba4f728c0 | ||
|
|
ee090d34fa | ||
|
|
bd30951a4f | ||
|
|
43cce83ba1 | ||
|
|
946d643795 | ||
|
|
1a6ea99d37 | ||
|
|
350046d495 | ||
|
|
b532012748 | ||
|
|
1c4042340c | ||
|
|
787c4af222 | ||
|
|
768280c9ba | ||
|
|
2e4e687d69 | ||
|
|
d8513a4e86 | ||
|
|
badd667efa | ||
|
|
7908f25747 | ||
|
|
0eef67713f | ||
|
|
6591efc160 | ||
|
|
fcc679489e | ||
|
|
94df58a55b | ||
|
|
0685b2fa35 | ||
|
|
232ea1e50c | ||
|
|
3423db6ed0 | ||
|
|
c32452d6b6 | ||
|
|
018dd4c1c3 | ||
|
|
4fc2c58ae7 | ||
|
|
b4014ebabf | ||
|
|
9955ee6548 | ||
|
|
c08d402e66 | ||
|
|
1c505beba6 | ||
|
|
8a0432efb7 | ||
|
|
320276f8ca | ||
|
|
f89061da41 | ||
|
|
c23d605a7a | ||
|
|
6d90cac3f9 | ||
|
|
89e92bd337 | ||
|
|
9271127c53 | ||
|
|
9fa5942791 | ||
|
|
e028ed878e | ||
|
|
838b2b7d1e | ||
|
|
7c0ad461a5 | ||
|
|
68926dad27 | ||
|
|
ceb6f6c47d | ||
|
|
167e99fce9 | ||
|
|
c930011616 | ||
|
|
aaaca63f83 | ||
|
|
d8a9f663b1 | ||
|
|
b0d0ccf44f | ||
|
|
c57754a29b | ||
|
|
65057c1fb7 | ||
|
|
d8be6490c2 | ||
|
|
b34208d1b6 | ||
|
|
0d5a9e9c8c | ||
|
|
22d4e60028 | ||
|
|
eaa766df77 | ||
|
|
7e5776c66f | ||
|
|
8b1806d4ca | ||
|
|
07232f3694 | ||
|
|
37a33f931a | ||
|
|
4912cbd2da | ||
|
|
4c40819791 | ||
|
|
a65fd497c6 | ||
|
|
d824209485 | ||
|
|
7ae1c7c3d2 | ||
|
|
341c6ae767 | ||
|
|
e6a94ed0cf | ||
|
|
3e6b6c05a6 | ||
|
|
544d4cd3b0 | ||
|
|
e0df2f511e | ||
|
|
255fd0a9cb | ||
|
|
f31adf8a85 | ||
|
|
a2b169626a | ||
|
|
6ffc78bcb0 | ||
|
|
8e9fc550f6 | ||
|
|
779d190855 | ||
|
|
89a4b03d45 | ||
|
|
ccd4cdd71a | ||
|
|
31dbf38a35 | ||
|
|
d0bec97bbb | ||
|
|
22307bba97 | ||
|
|
b4f5d44f65 | ||
|
|
d469870686 | ||
|
|
f561bf5754 | ||
|
|
2e3547d5cf | ||
|
|
ce8897d3e8 | ||
|
|
df77147d65 | ||
|
|
9b11df04b3 | ||
|
|
58c06d5aea | ||
|
|
1d3bb97b07 | ||
|
|
ba3253e2e2 | ||
|
|
e6f0c01aa6 | ||
|
|
9310d59e0a | ||
|
|
f2e1e71302 | ||
|
|
e6e31a9fc6 | ||
|
|
801aaf9323 | ||
|
|
2a8679234a | ||
|
|
54ab671512 | ||
|
|
866dd6b259 | ||
|
|
eba893c99b | ||
|
|
fd3f410cc6 | ||
|
|
03aaf93cef | ||
|
|
9aef57003a | ||
|
|
6065eb0e65 | ||
|
|
7e4634c81f | ||
|
|
a03d73776f | ||
|
|
f14eb4327d | ||
|
|
4ebd721cc5 | ||
|
|
21a92176b9 | ||
|
|
ad04b02e24 | ||
|
|
bc0511fe66 | ||
|
|
1accb9f939 | ||
|
|
9253f16e36 | ||
|
|
42387166bf | ||
|
|
0b5f892193 | ||
|
|
1a0d36a6fd | ||
|
|
cf3ed0dc88 | ||
|
|
8d26d7861e | ||
|
|
8e0ad2ef6e | ||
|
|
0aba4c36af | ||
|
|
44cd199078 | ||
|
|
ce909093c0 | ||
|
|
df13a8fea9 | ||
|
|
ff823c9fdb | ||
|
|
a42ff9865b | ||
|
|
7e13f78567 | ||
|
|
e2fb83db98 | ||
|
|
06eb1b6683 | ||
|
|
d62994ec02 | ||
|
|
f20859c85f | ||
|
|
b5b8adb451 | ||
|
|
70b287490b | ||
|
|
0976e9e569 | ||
|
|
83a96757db | ||
|
|
9013dcfea7 | ||
|
|
4ebc2573a3 | ||
|
|
fe9b03a189 | ||
|
|
d2f6c367f0 | ||
|
|
34b717d00c | ||
|
|
0d31b05f98 | ||
|
|
87a0e40331 | ||
|
|
764c0b2e15 | ||
|
|
23677b4963 | ||
|
|
96d9d41f19 | ||
|
|
a737f35653 | ||
|
|
ed8133be2d | ||
|
|
7c8c6b5333 | ||
|
|
46fceb03a5 | ||
|
|
4dee5eddeb | ||
|
|
709482bdac | ||
|
|
62ef1baace | ||
|
|
1fc3d2e914 | ||
|
|
d271a8c9fa | ||
|
|
3bd7b3b0f8 | ||
|
|
8075cda34c | ||
|
|
09d6da117a | ||
|
|
8f6b679c85 | ||
|
|
32e017bd03 | ||
|
|
74a31224e0 | ||
|
|
667b27fe78 | ||
|
|
4c8a4013cc | ||
|
|
5e4d73b6a3 | ||
|
|
da486d7788 | ||
|
|
30d97e2fa8 | ||
|
|
3a95114c3a | ||
|
|
1f3ad85403 | ||
|
|
90cb02e0bf | ||
|
|
6e2bd828a1 | ||
|
|
fbbf5046ac | ||
|
|
47abb6f85f | ||
|
|
717698b659 | ||
|
|
6a29a0898a | ||
|
|
1833872be9 | ||
|
|
4d06c812e6 | ||
|
|
3b71d2a37b | ||
|
|
0c0cacb0d6 | ||
|
|
f57fff732e | ||
|
|
54ddeaf046 | ||
|
|
69a1a02c70 | ||
|
|
c824f0d590 | ||
|
|
c336c989e7 | ||
|
|
f6523ab5a0 | ||
|
|
47c783da37 | ||
|
|
74afc7b424 | ||
|
|
4ac5a1e15a | ||
|
|
48eeeea7f3 | ||
|
|
aa6857fd38 | ||
|
|
25fe2a2ce6 | ||
|
|
3d1e3741cd | ||
|
|
2ef57e0221 | ||
|
|
bc08c02b89 | ||
|
|
50c74a2ec8 | ||
|
|
887469d73e | ||
|
|
f9debb8f94 | ||
|
|
b3929d1177 | ||
|
|
e3cfdb74ba | ||
|
|
1d0e752989 | ||
|
|
05a3bb0622 | ||
|
|
bc7fd26af6 | ||
|
|
048d4dbd95 | ||
|
|
c70e5357d3 | ||
|
|
7576ba2ade | ||
|
|
877e630a90 | ||
|
|
ef854aabb7 | ||
|
|
fc3f19bd2b | ||
|
|
2bbcd2d663 | ||
|
|
a786118415 | ||
|
|
65429e581a | ||
|
|
eb6f4dca55 | ||
|
|
ce09c4b3cd | ||
|
|
c971e9d61c | ||
|
|
e34bf90ca7 | ||
|
|
700296a558 | ||
|
|
492ea0616e | ||
|
|
eddb6e1faf | ||
|
|
f98b274177 | ||
|
|
662ff41fe9 | ||
|
|
fd146dde30 | ||
|
|
e394d0a6f6 | ||
|
|
5a1a47b7aa | ||
|
|
3d5c32c354 | ||
|
|
01cc0ac8f1 | ||
|
|
5a9248e619 | ||
|
|
1d84d03566 | ||
|
|
50ba4f9759 | ||
|
|
de55af6ae6 | ||
|
|
ca478ac880 | ||
|
|
78ea643460 | ||
|
|
0db0f81e53 | ||
|
|
c94680eaba | ||
|
|
5b4ed6dd59 | ||
|
|
4e811c744a | ||
|
|
cd6d2299a9 | ||
|
|
590199baff | ||
|
|
3b9dd3ba8c | ||
|
|
446021cf22 | ||
|
|
ef3ab29649 | ||
|
|
f4e09eee80 | ||
|
|
af4e4b4064 | ||
|
|
10c6297706 | ||
|
|
73a9541e39 | ||
|
|
3a2a61af82 | ||
|
|
774e7fb248 | ||
|
|
a5e3d9558f | ||
|
|
1ae86ae752 | ||
|
|
1a30a0e397 | ||
|
|
490b76b141 | ||
|
|
3831efb3be | ||
|
|
a8fa816165 | ||
|
|
11ccfd8449 | ||
|
|
c33cc82d53 | ||
|
|
c7516ec50e | ||
|
|
92cc597e84 | ||
|
|
7402ac29a8 | ||
|
|
4a455c7bf7 | ||
|
|
10167eea8d | ||
|
|
46ddc84d2a | ||
|
|
5c3fe51982 | ||
|
|
b8ec3104a9 | ||
|
|
b098127961 | ||
|
|
f61af39f08 | ||
|
|
1f0294d389 | ||
|
|
1ad7e663a1 | ||
|
|
3172176940 | ||
|
|
ca85020b26 | ||
|
|
5d2912605f | ||
|
|
b38ec3599b | ||
|
|
487343a022 | ||
|
|
69049a4427 | ||
|
|
be6b42561f | ||
|
|
e59cb07064 | ||
|
|
0234df055d | ||
|
|
b54c036398 | ||
|
|
eafd40291e | ||
|
|
519956f779 | ||
|
|
0b3e2cc7e3 | ||
|
|
efa9c84806 | ||
|
|
5ed623d682 | ||
|
|
8f77d15a31 | ||
|
|
d06d4d5a8c | ||
|
|
352c8c3cb1 | ||
|
|
94f21a3464 | ||
|
|
ac376f9c87 | ||
|
|
44e4263bee | ||
|
|
b7f3852ef9 | ||
|
|
a934e146ee | ||
|
|
cab25656eb | ||
|
|
0f9c906a22 | ||
|
|
b8226109a7 | ||
|
|
b26de8b922 | ||
|
|
67d8c1a4b5 | ||
|
|
0ef7ef22eb | ||
|
|
47383e05d6 | ||
|
|
3dd97feaa6 | ||
|
|
e530adde67 | ||
|
|
38a08d163c | ||
|
|
7b4adfcc15 | ||
|
|
5d6e1284e3 | ||
|
|
a0ba125ea9 | ||
|
|
ad5d0b92db | ||
|
|
debbac5c78 | ||
|
|
f4f4a7caec | ||
|
|
b00249b515 | ||
|
|
cd49213924 | ||
|
|
9a47a28b80 | ||
|
|
7b9ad1d69a | ||
|
|
6df00e1e4c | ||
|
|
7d2ed7b763 | ||
|
|
b08e5db267 | ||
|
|
8991396d23 | ||
|
|
76a6f84c70 | ||
|
|
a984e5df7a | ||
|
|
282d705c43 | ||
|
|
43e1b4a7db | ||
|
|
71ef7cdec1 | ||
|
|
5decde3f70 | ||
|
|
3f57061509 | ||
|
|
6395d64681 | ||
|
|
f3e2caeaa7 | ||
|
|
ce5c4359ee | ||
|
|
c4ddf50cad | ||
|
|
d250dd0cd6 | ||
|
|
96bbbdd5c9 | ||
|
|
9b4b2167b3 | ||
|
|
028a0a9279 | ||
|
|
30354dbcd0 | ||
|
|
543a87ac88 | ||
|
|
4be7cf66ec | ||
|
|
fd027f87a9 | ||
|
|
dac6e115c1 | ||
|
|
eca516f8ce | ||
|
|
b06645e125 | ||
|
|
fd60cd1a35 | ||
|
|
ad8bcd0de2 | ||
|
|
fdc29eebb7 | ||
|
|
63ae2cac38 | ||
|
|
4e787cc079 | ||
|
|
2de37ce5df | ||
|
|
a419547731 | ||
|
|
04844aa44f | ||
|
|
1b3fbee38d | ||
|
|
6d2a2ab714 | ||
|
|
82dd4a3884 | ||
|
|
4fe9e5da14 | ||
|
|
bbb4701fa9 | ||
|
|
86a39938fe | ||
|
|
987fc26537 | ||
|
|
70cf4cf5d4 | ||
|
|
2d3172f648 | ||
|
|
b2c33e3204 | ||
|
|
f7f648b956 | ||
|
|
780f104ab2 | ||
|
|
4c35adad6c | ||
|
|
cf24c81b3e | ||
|
|
6d792a8234 | ||
|
|
1558c6f942 | ||
|
|
2f75b48c63 | ||
|
|
979418620c | ||
|
|
482e0ac311 | ||
|
|
a36bf4af64 | ||
|
|
3bbce18173 | ||
|
|
e54fd19bca | ||
|
|
d2289fe9c6 | ||
|
|
1c50b8427a | ||
|
|
34d01f02cc | ||
|
|
d182c96c2e | ||
|
|
e59f3982ae | ||
|
|
5435c6ec73 | ||
|
|
5f96aee871 | ||
|
|
eceeeea22d | ||
|
|
a1a864b27b | ||
|
|
0291c476d4 | ||
|
|
638e8c7add | ||
|
|
6389ec50a1 | ||
|
|
ad53f4f5f6 | ||
|
|
9718aa711f | ||
|
|
cacd2c3392 | ||
|
|
1800b49822 | ||
|
|
1e97bb71db | ||
|
|
7055460c4c | ||
|
|
864767d74a | ||
|
|
5170948241 | ||
|
|
370a7f9b25 | ||
|
|
1368835a29 | ||
|
|
48fa5bb2cd | ||
|
|
25105d813d | ||
|
|
bbea43b1fe | ||
|
|
5790aa9780 | ||
|
|
bc97d11270 | ||
|
|
326ed22efe | ||
|
|
b942411dcc | ||
|
|
374c17751f | ||
|
|
ef2fa26126 | ||
|
|
b611164422 | ||
|
|
c7c899375b | ||
|
|
ab3a728032 | ||
|
|
aaf371ee23 | ||
|
|
d6c70e8d3a | ||
|
|
79e65e3e84 | ||
|
|
42c45367a0 | ||
|
|
d759aff4e9 | ||
|
|
6b63f0ac9e | ||
|
|
2df6eab472 | ||
|
|
1c7afb66f7 | ||
|
|
1fbb714cbc | ||
|
|
de75592f2a | ||
|
|
9cb7b0902a | ||
|
|
437d9843d1 | ||
|
|
490492e505 | ||
|
|
3dd8e490c6 | ||
|
|
75c9702caa | ||
|
|
accf000bdf | ||
|
|
a94b30be9f | ||
|
|
3c31e0ed16 | ||
|
|
7d74999851 | ||
|
|
b7ca369356 | ||
|
|
d15f7b76fa | ||
|
|
4e4a535178 | ||
|
|
78b00652bd | ||
|
|
473ab7c01c | ||
|
|
ae82ba53e7 | ||
|
|
d69174b1a6 | ||
|
|
570f549cf4 | ||
|
|
55e720e25d | ||
|
|
8f33f1a6c2 | ||
|
|
7be924d155 | ||
|
|
65f226960f | ||
|
|
84f056294d | ||
|
|
b906f8d757 | ||
|
|
2fae523fd4 | ||
|
|
4d519155bc | ||
|
|
ea8a91893a | ||
|
|
145476c7d9 | ||
|
|
c6595786f5 | ||
|
|
c6159a7c3e | ||
|
|
52638c709a | ||
|
|
a264b1db1f | ||
|
|
49907e337a | ||
|
|
afc1f85668 | ||
|
|
6efa751157 | ||
|
|
10131432b5 | ||
|
|
0d365068ff | ||
|
|
256404ba03 | ||
|
|
3b430c8bdf | ||
|
|
627dae6580 | ||
|
|
44db9ad033 | ||
|
|
21890efca6 | ||
|
|
0a8fe4d812 | ||
|
|
a1d7beca83 | ||
|
|
c35c80b06c | ||
|
|
3c5e9da9a1 | ||
|
|
f9af5e8959 | ||
|
|
c983b6a755 | ||
|
|
e18639b26b | ||
|
|
6d8b843ad0 | ||
|
|
00a9e42001 | ||
|
|
fc5363a140 | ||
|
|
d8d1ccf810 | ||
|
|
046518ab8f | ||
|
|
d33bbdd4f6 | ||
|
|
46e530ceeb | ||
|
|
2a77b8b4b9 | ||
|
|
23b2b136d6 | ||
|
|
d83a786c12 | ||
|
|
5d162b739b | ||
|
|
55e37b4eaa | ||
|
|
b2a0b3fc29 | ||
|
|
d1e1bc7108 | ||
|
|
cb88ea8fd1 | ||
|
|
c2fe3fcf13 | ||
|
|
6654a116d0 | ||
|
|
b77ab8a6ca | ||
|
|
1e796076f5 | ||
|
|
8fa38d1a2e | ||
|
|
44e176dde8 | ||
|
|
1ce197041f | ||
|
|
0952bae09f | ||
|
|
12509cd652 | ||
|
|
b094c063ae | ||
|
|
4e46d5d7cd | ||
|
|
8b10da9589 | ||
|
|
99ce277b06 | ||
|
|
5db6906212 | ||
|
|
652a428438 | ||
|
|
dfc769b8fe | ||
|
|
c45b1ffca6 | ||
|
|
ceed6f8d9b | ||
|
|
03cfb7bf9a | ||
|
|
49d1fa82d3 | ||
|
|
08a195ba08 | ||
|
|
77d1c711bf | ||
|
|
ad73174029 | ||
|
|
a6539d66d4 | ||
|
|
cb3ab67361 | ||
|
|
078dc666c1 | ||
|
|
e806da25c1 | ||
|
|
ef36b4fffd | ||
|
|
cc2ba09d3a | ||
|
|
790942c0f2 | ||
|
|
fd1e574fcb | ||
|
|
2daefcd94e | ||
|
|
46a7ca4dc3 | ||
|
|
5e4c997c41 | ||
|
|
8d4d718f7d | ||
|
|
cf34a81af7 | ||
|
|
11af21972d | ||
|
|
8850687d1b | ||
|
|
792f68eaec | ||
|
|
113aa2e11e | ||
|
|
1bf0bc8203 | ||
|
|
03cd7472af | ||
|
|
d549c217bb | ||
|
|
e7fead0f2c | ||
|
|
14990f7e98 | ||
|
|
d35eba8afb | ||
|
|
b0722311e8 | ||
|
|
946c16916f | ||
|
|
8ef5a6b0e1 | ||
|
|
6fa4d6462d | ||
|
|
525fd889e9 | ||
|
|
93a4e5ef05 | ||
|
|
06ce5a16ce | ||
|
|
15c665ea52 | ||
|
|
9a420820eb | ||
|
|
fa043100bd | ||
|
|
db0d748302 | ||
|
|
e8a95a1dac | ||
|
|
f911fb2046 | ||
|
|
a0304eeb16 | ||
|
|
a6f063b199 | ||
|
|
3977ec42e1 | ||
|
|
b7a064b05d | ||
|
|
aa5532f7b5 | ||
|
|
f79b6d3708 | ||
|
|
6d075b8874 | ||
|
|
3040a25932 | ||
|
|
0f0d9ba00d | ||
|
|
053897042f | ||
|
|
64186e881e | ||
|
|
0d98a1980e | ||
|
|
2b02b1affd | ||
|
|
bf3042e85a | ||
|
|
bdc25c14f6 | ||
|
|
6e5028587a | ||
|
|
8c8713885b | ||
|
|
bc5ef7f1c8 | ||
|
|
b9b6dad0b3 | ||
|
|
829e9054d6 | ||
|
|
be68a199ec | ||
|
|
44c0eb867b | ||
|
|
773b976f8a | ||
|
|
1220847c27 | ||
|
|
ec1c2a8391 | ||
|
|
2bc6521eee | ||
|
|
107d2da845 | ||
|
|
568606d2c8 | ||
|
|
78e2cd7084 | ||
|
|
79b8e6b6f0 | ||
|
|
d72896f9a6 | ||
|
|
7b3d36ba53 | ||
|
|
9ecb704e10 | ||
|
|
1b726a1b2f | ||
|
|
1cc4e302f9 | ||
|
|
1289ca9103 | ||
|
|
b18ca5ac1f | ||
|
|
193a041ef9 | ||
|
|
7219c17d30 | ||
|
|
79f0f1940f | ||
|
|
edc65cdc36 | ||
|
|
3684975ef9 | ||
|
|
8bfcef01df | ||
|
|
bbf9c13952 | ||
|
|
dfa578fcde | ||
|
|
33bc9e63c4 | ||
|
|
919475a4c7 | ||
|
|
1db88fe4f6 |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -30,8 +30,9 @@ https://www.ansible.com/security
|
||||
|
||||
##### STEPS TO REPRODUCE
|
||||
|
||||
<!-- For bugs, please show exactly how to reproduce the problem. For new
|
||||
features, show how the feature would be used. -->
|
||||
<!-- For new features, show how the feature would be used. For bugs, please show
|
||||
exactly how to reproduce the problem. Ideally, provide all steps and data needed
|
||||
to recreate the bug from a new awx install. -->
|
||||
|
||||
##### EXPECTED RESULTS
|
||||
|
||||
|
||||
56
CHANGELOG.md
56
CHANGELOG.md
@@ -2,6 +2,62 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 10.0.0 (Mar 30, 2020)
|
||||
- As of AWX 10.0.0, the official AWX CLI no longer supports Python 2 (it requires at least Python 3.6) (https://github.com/ansible/awx/pull/6327)
|
||||
- AWX no longer relies on RabbitMQ; Redis is added as a new dependency (https://github.com/ansible/awx/issues/5443)
|
||||
- Altered AWX's event tables to allow more than ~2 billion total events (https://github.com/ansible/awx/issues/6010)
|
||||
- Improved the performance (time to execute, and memory consumption) of the periodic job cleanup system job (https://github.com/ansible/awx/pull/6166)
|
||||
- Updated Job Templates so they now have an explicit Organization field (it is no longer inferred from the associated Project) (https://github.com/ansible/awx/issues/3903)
|
||||
- Updated social-auth-core to address an upcoming GitHub API deprecation (https://github.com/ansible/awx/issues/5970)
|
||||
- Updated to ansible-runner 1.4.6 to address various bugs.
|
||||
- Updated Django to address CVE-2020-9402
|
||||
- Updated pyyaml version to address CVE-2017-18342
|
||||
- Fixed a bug which prevented the new `scm_branch` field from being used in custom notification templates (https://github.com/ansible/awx/issues/6258)
|
||||
- Fixed a race condition that sometimes causes success/failure notifications to include an incomplete list of hosts (https://github.com/ansible/awx/pull/6290)
|
||||
- Fixed a bug that can cause certain setting pages to lose unsaved form edits when a playbook is launched (https://github.com/ansible/awx/issues/5265)
|
||||
- Fixed a bug that can prevent the "Use TLS/SSL" field from properly saving when editing email notification templates (https://github.com/ansible/awx/issues/6383)
|
||||
- Fixed a race condition that sometimes broke event/stdout processing for jobs launched in container groups (https://github.com/ansible/awx/issues/6280)
|
||||
|
||||
## 9.3.0 (Mar 12, 2020)
|
||||
- Added the ability to specify an OAuth2 token description in the AWX CLI (https://github.com/ansible/awx/issues/6122)
|
||||
- Added support for K8S service account annotations to the installer (https://github.com/ansible/awx/pull/6007)
|
||||
- Added support for K8S imagePullSecrets to the installer (https://github.com/ansible/awx/pull/5989)
|
||||
- Launching jobs (and workflows) using the --monitor flag in the AWX CLI now returns a non-zero exit code on job failure (https://github.com/ansible/awx/issues/5920)
|
||||
- Improved UI performance for various job views when many simultaneous users are logged into AWX (https://github.com/ansible/awx/issues/5883)
|
||||
- Updated to the latest version of Django to address a few open CVEs (https://github.com/ansible/awx/pull/6080)
|
||||
- Fixed a critical bug which can cause AWX to hang and stop launching playbooks after a periodic of time (https://github.com/ansible/awx/issues/5617)
|
||||
- Fixed a bug which caused delays in project update stdout for certain large SCM clones (as of Ansible 2.9+) (https://github.com/ansible/awx/pull/6254)
|
||||
- Fixed a bug which caused certain smart inventory filters to mistakenly return duplicate hosts (https://github.com/ansible/awx/pull/5972)
|
||||
- Fixed an unclear server error when creating smart inventories with the AWX collection (https://github.com/ansible/awx/issues/6250)
|
||||
- Fixed a bug that broke Grafana notification support (https://github.com/ansible/awx/issues/6137)
|
||||
- Fixed a UI bug which prevent users with read access to an organization from editing credentials for that organization (https://github.com/ansible/awx/pull/6241)
|
||||
- Fixed a bug which prevent workflow approval records from recording a `started` and `elapsed` date (https://github.com/ansible/awx/issues/6202)
|
||||
- Fixed a bug which caused workflow nodes to have a confusing option for `verbosity` (https://github.com/ansible/awx/issues/6196)
|
||||
- Fixed an RBAC bug which prevented projects and inventory schedules from being created by certain users in certain contexts (https://github.com/ansible/awx/issues/5717)
|
||||
- Fixed a bug that caused `role_path` in a project's config to not be respected due to an error processing `/etc/ansible/ansible.cfg` (https://github.com/ansible/awx/pull/6038)
|
||||
- Fixed a bug that broke inventory updates for installs with custom home directories for the awx user (https://github.com/ansible/awx/pull/6152)
|
||||
- Fixed a bug that broke fact data collection when AWX encounters invalid/unexpected fact data (https://github.com/ansible/awx/issues/5935)
|
||||
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
- Added the ability to specify AZURE_PUBLIC_CLOUD (for e.g., Azure Government KeyVault support) for the Azure credential plugin https://github.com/ansible/awx/issues/5138
|
||||
- Added support for several additional parameters for Satellite dynamic inventory https://github.com/ansible/awx/pull/5598
|
||||
- Added a new field to jobs for tracking the date/time a job is cancelled https://github.com/ansible/awx/pull/5610
|
||||
- Made a series of additional optimizations to the callback receiver to further improve stdout write speed for running playbooks https://github.com/ansible/awx/pull/5677 https://github.com/ansible/awx/pull/5739
|
||||
- Updated AWX to be compatible with Helm 3.x (https://github.com/ansible/awx/pull/5776)
|
||||
- Optimized AWX's job dependency/scheduling code to drastically improve processing time in scenarios where there are many pending jobs scheduled simultaneously https://github.com/ansible/awx/issues/5154
|
||||
- Fixed a bug which could cause SCM authentication details (basic auth passwords) to be reported to external loggers in certain failure scenarios (e.g., when a git clone fails and ansible itself prints an error message to stdout) https://github.com/ansible/awx/pull/5812
|
||||
- Fixed a k8s installer bug that caused installs to fail in certain situations https://github.com/ansible/awx/issues/5574
|
||||
- Fixed a number of issues that caused analytics gathering and reporting to run more often than necessary https://github.com/ansible/awx/pull/5721
|
||||
- Fixed a bug in the AWX CLI that prevented JSON-type settings from saving properly https://github.com/ansible/awx/issues/5528
|
||||
- Improved support for fetching custom virtualenv dependencies when AWX is installed behind a proxy https://github.com/ansible/awx/pull/5805
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
- Fixed a bug that caused database migrations on Kubernetes installs to hang https://github.com/ansible/awx/pull/5579
|
||||
|
||||
@@ -155,12 +155,11 @@ If you start a second terminal session, you can take a look at the running conta
|
||||
(host)$ docker ps
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
aa4a75d6d77b gcr.io/ansible-tower-engineering/awx_devel:devel "/tini -- /bin/sh ..." 23 seconds ago Up 15 seconds 0.0.0.0:5555->5555/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 22/tcp, 0.0.0.0:8080->8080/tcp tools_awx_1
|
||||
e4c0afeb548c postgres:10 "docker-entrypoint..." 26 seconds ago Up 23 seconds 5432/tcp tools_postgres_1
|
||||
0089699d5afd tools_logstash "/docker-entrypoin..." 26 seconds ago Up 25 seconds tools_logstash_1
|
||||
4d4ff0ced266 memcached:alpine "docker-entrypoint..." 26 seconds ago Up 25 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
92842acd64cd rabbitmq:3-management "docker-entrypoint..." 26 seconds ago Up 24 seconds 4369/tcp, 5671-5672/tcp, 15671/tcp, 25672/tcp, 0.0.0.0:15672->15672/tcp tools_rabbitmq_1
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
44251b476f98 gcr.io/ansible-tower-engineering/awx_devel:devel "/entrypoint.sh /bin…" 27 seconds ago Up 23 seconds 0.0.0.0:6899->6899/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 22/tcp, 0.0.0.0:8888->8888/tcp tools_awx_run_9e820694d57e
|
||||
b049a43817b4 memcached:alpine "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:6379->6379/tcp tools_redis_1
|
||||
b66a506d3007 postgres:10 "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:5432->5432/tcp tools_postgres_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
|
||||
@@ -2,96 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Upgrades using Django migrations are not expected to work in AWX. As a result, to upgrade to a new version, it is necessary to export resources from the old AWX node and import them into a freshly-installed node with the new version. The recommended way to do this is to use the tower-cli send/receive feature.
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
This tool does __not__ support export/import of the following:
|
||||
* Logs/history
|
||||
* Credential passwords
|
||||
* LDAP/AWX config
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
### Install & Configure Tower-CLI
|
||||
|
||||
In terminal, pip install tower-cli (if you do not have pip already, install [here](https://pip.pypa.io/en/stable/installing/)):
|
||||
```
|
||||
$ pip install --upgrade ansible-tower-cli
|
||||
```
|
||||
|
||||
The AWX host URL, user, and password must be set for the AWX instance to be exported:
|
||||
```
|
||||
$ tower-cli config host http://<old-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
```
|
||||
|
||||
For more information on installing tower-cli look [here](http://tower-cli.readthedocs.io/en/latest/quickstart.html).
|
||||
|
||||
|
||||
### Export Resources
|
||||
|
||||
Export all objects
|
||||
|
||||
```$ tower-cli receive --all > assets.json```
|
||||
|
||||
|
||||
|
||||
### Teardown Old AWX
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
### Install New AWX version
|
||||
|
||||
If you are installing AWX as a dev container, pull down the latest code or version you want from GitHub, build
|
||||
the image locally, then start the container
|
||||
|
||||
```
|
||||
git pull # retrieve latest AWX changes from repository
|
||||
make docker-compose-build # build AWX image
|
||||
make docker-compose # run container
|
||||
```
|
||||
For other install methods, refer to the [Install.md](https://github.com/ansible/awx/blob/devel/INSTALL.md).
|
||||
|
||||
|
||||
### Import Resources
|
||||
|
||||
|
||||
Configure tower-cli for your new AWX host as shown earlier. Import from a JSON file named assets.json
|
||||
|
||||
```
|
||||
$ tower-cli config host http://<new-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
$ tower-cli send assets.json
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Additional Info
|
||||
|
||||
If you have two running AWX hosts, it is possible to copy all assets from one instance to another
|
||||
|
||||
```$ tower-cli receive --tower-host old-awx-host.example.com --all | tower-cli send --tower-host new-awx-host.example.com```
|
||||
|
||||
|
||||
|
||||
#### More Granular Exports:
|
||||
|
||||
Export all credentials
|
||||
|
||||
```$ tower-cli receive --credential all > credentials.json```
|
||||
> Note: This exports the credentials with blank strings for passwords and secrets
|
||||
|
||||
Export a credential named "My Credential"
|
||||
|
||||
```$ tower-cli receive --credential "My Credential"```
|
||||
|
||||
#### More Granular Imports:
|
||||
|
||||
|
||||
You could import anything except an organization defined in a JSON file named assets.json
|
||||
|
||||
```$ tower-cli send --prevent organization assets.json```
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
|
||||
49
INSTALL.md
49
INSTALL.md
@@ -41,6 +41,8 @@ This document provides a guide for installing AWX.
|
||||
+ [Run the installer](#run-the-installer-2)
|
||||
+ [Post-install](#post-install-2)
|
||||
+ [Accessing AWX](#accessing-awx-2)
|
||||
- [Installing the AWX CLI](#installing-the-awx-cli)
|
||||
* [Building the CLI Documentation](#building-the-cli-documentation)
|
||||
|
||||
|
||||
## Getting started
|
||||
@@ -128,7 +130,6 @@ For convenience, you can create a file called `vars.yml`:
|
||||
```
|
||||
admin_password: 'adminpass'
|
||||
pg_password: 'pgpass'
|
||||
rabbitmq_password: 'rabbitpass'
|
||||
secret_key: 'mysupersecret'
|
||||
```
|
||||
|
||||
@@ -506,10 +507,6 @@ If you wish to tag and push built images to a Docker registry, set the following
|
||||
|
||||
> Username of the user that will push images to the registry. Defaults to *developer*.
|
||||
|
||||
*docker_remove_local_images*
|
||||
|
||||
> Due to the way that the docker_image module behaves, images will not be pushed to a remote repository if they are present locally. Set this to delete local versions of the images that will be pushed to the remote. This will fail if containers are currently running from those images.
|
||||
|
||||
**Note**
|
||||
|
||||
> These settings are ignored if using official images
|
||||
@@ -559,16 +556,7 @@ $ ansible-playbook -i inventory -e docker_registry_password=password install.yml
|
||||
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, Docker will report up to 5 running containers. If you chose to use an existing PostgresSQL database, then it will report 4. You can view the running containers using the `docker ps` command, as follows:
|
||||
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 8052/tcp awx_task
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:443->8052/tcp awx_web
|
||||
55a552142bcd memcached:alpine "docker-entrypoint..." 2 minutes ago Up 2 minutes 11211/tcp memcached
|
||||
84011c072aad rabbitmq:3 "docker-entrypoint..." 2 minutes ago Up 2 minutes 4369/tcp, 5671-5672/tcp, 25672/tcp rabbitmq
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
```
|
||||
After the playbook run completes, Docker starts a series of containers that provide the services that make up AWX. You can view the running containers using the `docker ps` command.
|
||||
|
||||
If you're deploying using Docker Compose, container names will be prefixed by the name of the folder where the docker-compose.yml file is created (by default, `awx`).
|
||||
|
||||
@@ -634,3 +622,34 @@ Added instance awx to tower
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
|
||||
# Installing the AWX CLI
|
||||
|
||||
`awx` is the official command-line client for AWX. It:
|
||||
|
||||
* Uses naming and structure consistent with the AWX HTTP API
|
||||
* Provides consistent output formats with optional machine-parsable formats
|
||||
* To the extent possible, auto-detects API versions, available endpoints, and
|
||||
feature support across multiple versions of AWX.
|
||||
|
||||
Potential uses include:
|
||||
|
||||
* Configuring and launching jobs/playbooks
|
||||
* Checking on the status and output of job runs
|
||||
* Managing objects like organizations, users, teams, etc...
|
||||
|
||||
The preferred way to install the AWX CLI is through pip directly from GitHub:
|
||||
|
||||
pip install "https://github.com/ansible/awx/archive/$VERSION.tar.gz#egg=awxkit&subdirectory=awxkit"
|
||||
awx --help
|
||||
|
||||
...where ``$VERSION`` is the version of AWX you're running. To see a list of all available releases, visit: https://github.com/ansible/awx/releases
|
||||
|
||||
## Building the CLI Documentation
|
||||
|
||||
To build the docs, spin up a real AWX server, `pip install sphinx sphinxcontrib-autoprogram`, and run:
|
||||
|
||||
~ TOWER_HOST=https://awx.example.org TOWER_USERNAME=example TOWER_PASSWORD=secret make clean html
|
||||
~ cd build/html/ && python -m http.server
|
||||
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ..
|
||||
|
||||
64
Makefile
64
Makefile
@@ -122,7 +122,7 @@ clean-api:
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@@ -167,8 +167,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP) && \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) flit; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -266,28 +265,6 @@ migrate:
|
||||
dbchange:
|
||||
$(MANAGEMENT_COMMAND) makemigrations
|
||||
|
||||
server_noattach:
|
||||
tmux new-session -d -s awx 'exec make uwsgi'
|
||||
tmux rename-window 'AWX'
|
||||
tmux select-window -t awx:0
|
||||
tmux split-window -v 'exec make dispatcher'
|
||||
tmux new-window 'exec make daphne'
|
||||
tmux select-window -t awx:1
|
||||
tmux rename-window 'WebSockets'
|
||||
tmux split-window -h 'exec make runworker'
|
||||
tmux split-window -v 'exec make nginx'
|
||||
tmux new-window 'exec make receiver'
|
||||
tmux select-window -t awx:2
|
||||
tmux rename-window 'Extra Services'
|
||||
tmux select-window -t awx:0
|
||||
|
||||
server: server_noattach
|
||||
tmux -2 attach-session -t awx
|
||||
|
||||
# Use with iterm2's native tmux protocol support
|
||||
servercc: server_noattach
|
||||
tmux -2 -CC attach-session -t awx
|
||||
|
||||
supervisor:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -312,18 +289,11 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
runworker:
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py runworker --only-channels websocket.*
|
||||
|
||||
# Run the built-in development webserver (by default on http://localhost:8013).
|
||||
runserver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py runserver
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
# Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@@ -401,34 +371,42 @@ prepare_collection_venv:
|
||||
$(VENV_BASE)/awx/bin/pip install --target=$(COLLECTION_VENV) git+https://github.com/ansible/tower-cli.git
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
|
||||
test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$(COLLECTION_VENV):$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
|
||||
test_collection_all: prepare_collection_venv test_collection flake8_collection
|
||||
|
||||
test_collection_sanity:
|
||||
rm -rf sanity
|
||||
mkdir -p sanity/ansible_collections/awx
|
||||
cp -Ra awx_collection sanity/ansible_collections/awx/awx # symlinks do not work
|
||||
cd sanity/ansible_collections/awx/awx && git init && git add . # requires both this file structure and a git repo, so there you go
|
||||
cd sanity/ansible_collections/awx/awx && ansible-test sanity
|
||||
# WARNING: symlinking a collection is fundamentally unstable
|
||||
# this is for rapid development iteration with playbooks, do not use with other test targets
|
||||
symlink_collection:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE) # in case it does not exist
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-galaxy collection install awx_collection/awx-awx-$(VERSION).tar.gz
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
|
||||
@@ -9,7 +9,7 @@ from functools import reduce
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@@ -63,19 +63,19 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
def get_fields_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and also the revised lookup path
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of field at the end of the line as well as a corrected
|
||||
path, for special cases we do substitutions
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
'''
|
||||
# Store of all the fields used to detect repeats
|
||||
field_set = set([])
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
@@ -111,13 +111,24 @@ def get_field_from_path(model, path):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_set:
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_set.add(field)
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field, '__'.join(new_parts)
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
'''
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
@@ -133,7 +144,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_WHITELIST = (CharField, IntegerField, BooleanField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
@@ -147,11 +162,16 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field, new_lookup
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_text(value)
|
||||
@@ -182,7 +202,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = (not all(isinstance(f, self.NO_DUPLICATES_WHITELIST) for f in field_list))
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
@@ -211,10 +234,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
@@ -225,6 +248,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
@@ -256,9 +280,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@@ -282,7 +309,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key = self.value_to_python(queryset.model, key, value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
@@ -332,7 +361,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q = Q(**{k:v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
|
||||
@@ -5,10 +5,12 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
import urllib.parse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import connection
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -192,7 +194,7 @@ class APIView(views.APIView):
|
||||
response.data['detail'] += ' To establish a login session, visit /api/login/.'
|
||||
logger.info(status_msg)
|
||||
else:
|
||||
logger.warn(status_msg)
|
||||
logger.warning(status_msg)
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
@@ -548,6 +550,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -964,6 +975,11 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into memcached, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (
|
||||
@@ -971,7 +987,7 @@ class CopyAPIView(GenericAPIView):
|
||||
)
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__,
|
||||
obj.pk, new_obj.pk, request.user.pk, sub_objs,
|
||||
obj.pk, new_obj.pk, request.user.pk, key,
|
||||
permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework.fields import JSONField as DRFJSONField
|
||||
from rest_framework.request import clone_request
|
||||
|
||||
# AWX
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import InventorySource, NotificationTemplate
|
||||
from awx.main.scheduler.kubernetes import PodManager
|
||||
@@ -59,7 +60,8 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'type': _('Data type for this {}.'),
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _('Data structure with name/description for related resources.'),
|
||||
'summary_fields': _('Data structure with name/description for related resources. '
|
||||
'The output for some objects may be limited for performance reasons.'),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
}
|
||||
@@ -96,7 +98,15 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info['children'] = self.get_serializer_info(field)
|
||||
|
||||
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
|
||||
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
|
||||
choices = [
|
||||
(choice_value, choice_name) for choice_value, choice_name in field.choices.items()
|
||||
]
|
||||
if not any(choice in ('', None) for choice, _ in choices):
|
||||
if field.allow_blank:
|
||||
choices = [("", "---------")] + choices
|
||||
if field.allow_null and not isinstance(field, ChoiceNullField):
|
||||
choices = [(None, "---------")] + choices
|
||||
field_info['choices'] = choices
|
||||
|
||||
# Indicate if a field is write-only.
|
||||
if getattr(field, 'write_only', False):
|
||||
|
||||
@@ -72,6 +72,7 @@ from awx.main.utils import (
|
||||
prefetch_page_capabilities, get_external_account, truncate_stdout,
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -98,26 +99,19 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -125,7 +119,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
|
||||
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
|
||||
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
|
||||
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
@@ -139,7 +133,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
}
|
||||
@@ -354,6 +348,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
|
||||
def _generate_named_url(self, url_path, obj, node):
|
||||
url_units = url_path.split('/')
|
||||
reset_counters()
|
||||
named_url = node.generate_named_url(obj)
|
||||
url_units[4] = named_url
|
||||
return '/'.join(url_units)
|
||||
@@ -649,7 +644,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
_capabilities_prefetch = [
|
||||
'admin', 'execute',
|
||||
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
|
||||
'workflowjobtemplate.organization.workflow_admin']}
|
||||
'organization.workflow_admin']}
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -719,7 +714,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
'result_traceback', 'event_processing_finished')
|
||||
@@ -891,6 +886,9 @@ class UserSerializer(BaseSerializer):
|
||||
fields = ('*', '-name', '-description', '-modified',
|
||||
'username', 'first_name', 'last_name',
|
||||
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
|
||||
extra_kwargs = {
|
||||
'last_login': {'read_only': True}
|
||||
}
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
@@ -1253,6 +1251,7 @@ class OrganizationSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
|
||||
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
|
||||
job_templates = self.reverse('api:organization_job_templates_list', kwargs={'pk': obj.pk}),
|
||||
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
|
||||
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
|
||||
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1281,6 +1280,14 @@ class OrganizationSerializer(BaseSerializer):
|
||||
'job_templates': 0, 'admins': 0, 'projects': 0}
|
||||
else:
|
||||
summary_dict['related_field_counts'] = counts_dict[obj.id]
|
||||
|
||||
# Organization participation roles (admin, member) can't be assigned
|
||||
# to a team. This provides a hint to the ui so it can know to not
|
||||
# display these roles for team role selection.
|
||||
for key in ('admin_role', 'member_role',):
|
||||
if key in summary_dict.get('object_roles', {}):
|
||||
summary_dict['object_roles'][key]['user_only'] = True
|
||||
|
||||
return summary_dict
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -1394,12 +1401,6 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
organization = None
|
||||
if 'organization' in attrs:
|
||||
organization = attrs['organization']
|
||||
elif self.instance:
|
||||
organization = self.instance.organization
|
||||
|
||||
if 'allow_override' in attrs and self.instance:
|
||||
# case where user is turning off this project setting
|
||||
if self.instance.allow_override and not attrs['allow_override']:
|
||||
@@ -1415,11 +1416,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
' '.join([str(pk) for pk in used_by])
|
||||
)})
|
||||
|
||||
view = self.context.get('view', None)
|
||||
if not organization and not view.request.user.is_superuser:
|
||||
# Only allow super users to create orgless projects
|
||||
raise serializers.ValidationError(_('Organization is missing'))
|
||||
elif get_field_from_model_or_attrs('scm_type') == '':
|
||||
if get_field_from_model_or_attrs('scm_type') == '':
|
||||
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
|
||||
if get_field_from_model_or_attrs(fd):
|
||||
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
|
||||
@@ -1549,20 +1546,15 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'insights_credential', 'pending_deletion',)
|
||||
'has_inventory_sources', 'total_inventory_sources',
|
||||
'inventory_sources_with_failures', 'insights_credential',
|
||||
'pending_deletion',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySerializer, self).get_related(obj)
|
||||
@@ -1612,7 +1604,7 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
})
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
raise models.base.ValidationError(str(e))
|
||||
return host_filter
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -1644,6 +1636,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
has_active_failures = serializers.SerializerMethodField()
|
||||
has_inventory_sources = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
@@ -1757,6 +1752,14 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ret['last_job_host_summary'] = None
|
||||
return ret
|
||||
|
||||
def get_has_active_failures(self, obj):
|
||||
return bool(
|
||||
obj.last_job_host_summary and obj.last_job_host_summary.failed
|
||||
)
|
||||
|
||||
def get_has_inventory_sources(self, obj):
|
||||
return obj.inventory_sources.exists()
|
||||
|
||||
|
||||
class AnsibleFactsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -1769,17 +1772,10 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['copy', 'edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = ('*', 'inventory', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources')
|
||||
fields = ('*', 'inventory', 'variables')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -2123,7 +2119,13 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') != 'scm':
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if (('source' in attrs or 'source_project' in attrs) and
|
||||
get_field_from_model_or_attrs('source_project') is None):
|
||||
raise serializers.ValidationError(
|
||||
{"source_project": _("Project required for scm type sources.")}
|
||||
)
|
||||
else:
|
||||
redundant_scm_fields = list(filter(
|
||||
lambda x: attrs.get(x, None),
|
||||
['source_project', 'source_path', 'update_on_project_update']
|
||||
@@ -2740,7 +2742,8 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
fields = ('*', 'job_type', 'inventory', 'project', 'playbook', 'scm_branch',
|
||||
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
|
||||
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
|
||||
'use_fact_cache',)
|
||||
'use_fact_cache', 'organization',)
|
||||
read_only_fields = ('organization',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobOptionsSerializer, self).get_related(obj)
|
||||
@@ -2755,6 +2758,8 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'project', None)
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
@@ -2823,7 +2828,7 @@ class JobTemplateMixin(object):
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
@@ -2901,6 +2906,10 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
)
|
||||
if obj.host_config_key:
|
||||
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if obj.webhook_credential_id:
|
||||
res['webhook_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.webhook_credential_id})
|
||||
return res
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -3206,7 +3215,7 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
field_kwargs['choices'] = module_name_choices
|
||||
field_kwargs['required'] = bool(not module_name_default)
|
||||
field_kwargs['default'] = module_name_default or serializers.empty
|
||||
field_kwargs['allow_blank'] = bool(module_name_default)
|
||||
field_kwargs['allow_blank'] = False
|
||||
field_kwargs.pop('max_length', None)
|
||||
return field_class, field_kwargs
|
||||
|
||||
@@ -3391,6 +3400,8 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
)
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.webhook_credential_id:
|
||||
res['webhook_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.webhook_credential_id})
|
||||
return res
|
||||
|
||||
def validate_extra_vars(self, value):
|
||||
@@ -3685,7 +3696,8 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',
|
||||
'identifier',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3725,7 +3737,7 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
'all_parents_must_converge', 'do_not_run', 'identifier')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -3833,7 +3845,7 @@ class JobEventSerializer(BaseSerializer):
|
||||
model = JobEvent
|
||||
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
|
||||
'event_display', 'event_data', 'event_level', 'failed',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
|
||||
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity')
|
||||
|
||||
@@ -3842,13 +3854,9 @@ class JobEventSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
|
||||
))
|
||||
if obj.parent_id:
|
||||
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
|
||||
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
|
||||
if obj.host_id:
|
||||
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
|
||||
if obj.hosts.exists():
|
||||
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -4060,6 +4068,13 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
**attrs)
|
||||
self._ignored_fields = rejected
|
||||
|
||||
# Basic validation - cannot run a playbook without a playbook
|
||||
if not template.project:
|
||||
errors['project'] = _("A project is required to run a job.")
|
||||
elif template.project.status in ('error', 'failed'):
|
||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||
|
||||
# cannot run a playbook without an inventory
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
|
||||
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
OrganizationAdminsList,
|
||||
OrganizationInventoriesList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationCredentialList,
|
||||
@@ -33,6 +34,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/admins/$', OrganizationAdminsList.as_view(), name='organization_admins_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/inventories/$', OrganizationInventoriesList.as_view(), name='organization_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/projects/$', OrganizationProjectsList.as_view(), name='organization_projects_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_templates/$', OrganizationJobTemplatesList.as_view(), name='organization_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_job_templates/$', OrganizationWorkflowJobTemplatesList.as_view(), name='organization_workflow_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/teams/$', OrganizationTeamsList.as_view(), name='organization_teams_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/credentials/$', OrganizationCredentialList.as_view(), name='organization_credential_list'),
|
||||
|
||||
@@ -34,7 +34,9 @@ from awx.api.views import (
|
||||
OAuth2ApplicationDetail,
|
||||
)
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.metrics import (
|
||||
MetricsView,
|
||||
)
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
|
||||
@@ -81,7 +81,8 @@ from awx.main.utils import (
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
@@ -110,6 +111,7 @@ from awx.api.views.organization import ( # noqa
|
||||
OrganizationUsersList,
|
||||
OrganizationAdminsList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationActivityStreamList,
|
||||
@@ -204,20 +206,15 @@ class DashboardView(APIView):
|
||||
'failed': ec2_inventory_failed.count()}
|
||||
|
||||
user_groups = get_user_queryset(request.user, models.Group)
|
||||
groups_job_failed = (
|
||||
models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0)
|
||||
).count()
|
||||
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
|
||||
data['groups'] = {'url': reverse('api:group_list', request=request),
|
||||
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
|
||||
'total': user_groups.count(),
|
||||
'job_failed': groups_job_failed,
|
||||
'inventory_failed': groups_inventory_failed}
|
||||
|
||||
user_hosts = get_user_queryset(request.user, models.Host)
|
||||
user_hosts_failed = user_hosts.filter(has_active_failures=True)
|
||||
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
|
||||
data['hosts'] = {'url': reverse('api:host_list', request=request),
|
||||
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
|
||||
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
|
||||
'total': user_hosts.count(),
|
||||
'failed': user_hosts_failed.count()}
|
||||
|
||||
@@ -1611,7 +1608,8 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -2150,7 +2148,7 @@ class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2177,7 +2175,7 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -3268,7 +3266,7 @@ class WorkflowJobRelaunch(GenericAPIView):
|
||||
jt = obj.job_template
|
||||
if not jt:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
elif not jt.inventory or min(jt.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
@@ -3819,6 +3817,12 @@ class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
@@ -3841,8 +3845,7 @@ class HostJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
parent_obj = self.get_parent_object()
|
||||
self.check_parent_access(parent_obj)
|
||||
qs = self.request.user.get_queryset(self.model).filter(
|
||||
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3858,9 +3861,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events
|
||||
qs = qs.select_related('host')
|
||||
qs = qs.prefetch_related('hosts', 'children')
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
|
||||
|
||||
@@ -4303,7 +4304,7 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
if obj.notification_type in ('email', 'pagerduty'):
|
||||
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
elif obj.notification_type == 'webhook':
|
||||
elif obj.notification_type in ('webhook', 'grafana'):
|
||||
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
else:
|
||||
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
|
||||
@@ -39,3 +39,4 @@ class MetricsView(APIView):
|
||||
if (request.user.is_superuser or request.user.is_system_auditor):
|
||||
return Response(metrics().decode('UTF-8'))
|
||||
raise PermissionDenied()
|
||||
|
||||
|
||||
@@ -4,10 +4,7 @@
|
||||
import dateutil
|
||||
import logging
|
||||
|
||||
from django.db.models import (
|
||||
Count,
|
||||
F,
|
||||
)
|
||||
from django.db.models import Count
|
||||
from django.db import transaction
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
@@ -175,28 +172,18 @@ class OrganizationCountsMixin(object):
|
||||
|
||||
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
project_qs = Project.accessible_objects(self.request.user, 'read_role')
|
||||
jt_qs = JobTemplate.accessible_objects(self.request.user, 'read_role')
|
||||
|
||||
# Produce counts of Foreign Key relationships
|
||||
db_results['inventories'] = inv_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
db_results['inventories'] = inv_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['teams'] = Team.accessible_objects(
|
||||
self.request.user, 'read_role').values('organization').annotate(
|
||||
Count('organization')).order_by('organization')
|
||||
|
||||
JT_project_reference = 'project__organization'
|
||||
JT_inventory_reference = 'inventory__organization'
|
||||
db_results['job_templates_project'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').exclude(
|
||||
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
|
||||
Count(JT_project_reference)).order_by(JT_project_reference)
|
||||
db_results['job_templates'] = jt_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
|
||||
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
|
||||
|
||||
db_results['projects'] = project_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
db_results['projects'] = project_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
# Other members and admins of organization are always viewable
|
||||
db_results['users'] = org_qs.annotate(
|
||||
@@ -212,11 +199,7 @@ class OrganizationCountsMixin(object):
|
||||
'admins': 0, 'projects': 0}
|
||||
|
||||
for res, count_qs in db_results.items():
|
||||
if res == 'job_templates_project':
|
||||
org_reference = JT_project_reference
|
||||
elif res == 'job_templates_inventory':
|
||||
org_reference = JT_inventory_reference
|
||||
elif res == 'users':
|
||||
if res == 'users':
|
||||
org_reference = 'id'
|
||||
else:
|
||||
org_reference = 'organization'
|
||||
@@ -229,14 +212,6 @@ class OrganizationCountsMixin(object):
|
||||
continue
|
||||
count_context[org_id][res] = entry['%s__count' % org_reference]
|
||||
|
||||
# Combine the counts for job templates by project and inventory
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id]['job_templates'] = 0
|
||||
for related_path in ['job_templates_project', 'job_templates_inventory']:
|
||||
if related_path in count_context[org_id]:
|
||||
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
|
||||
|
||||
full_context['related_field_counts'] = count_context
|
||||
|
||||
return full_context
|
||||
|
||||
@@ -20,7 +20,7 @@ from awx.main.models import (
|
||||
Role,
|
||||
User,
|
||||
Team,
|
||||
InstanceGroup,
|
||||
InstanceGroup
|
||||
)
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
@@ -28,6 +28,7 @@ from awx.api.generics import (
|
||||
SubListAPIView,
|
||||
SubListCreateAttachDetachAPIView,
|
||||
SubListAttachDetachAPIView,
|
||||
SubListCreateAPIView,
|
||||
ResourceAccessList,
|
||||
BaseUsersList,
|
||||
)
|
||||
@@ -35,14 +36,13 @@ from awx.api.generics import (
|
||||
from awx.api.serializers import (
|
||||
OrganizationSerializer,
|
||||
InventorySerializer,
|
||||
ProjectSerializer,
|
||||
UserSerializer,
|
||||
TeamSerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
NotificationTemplateSerializer,
|
||||
WorkflowJobTemplateSerializer,
|
||||
InstanceGroupSerializer,
|
||||
ProjectSerializer, JobTemplateSerializer, WorkflowJobTemplateSerializer
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
@@ -94,7 +94,7 @@ class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPI
|
||||
org_counts['projects'] = Project.accessible_objects(**access_kwargs).filter(
|
||||
organization__id=org_id).count()
|
||||
org_counts['job_templates'] = JobTemplate.accessible_objects(**access_kwargs).filter(
|
||||
project__organization__id=org_id).count()
|
||||
organization__id=org_id).count()
|
||||
|
||||
full_context['related_field_counts'] = {}
|
||||
full_context['related_field_counts'][org_id] = org_counts
|
||||
@@ -128,21 +128,27 @@ class OrganizationAdminsList(BaseUsersList):
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
|
||||
class OrganizationProjectsList(SubListCreateAPIView):
|
||||
|
||||
model = Project
|
||||
serializer_class = ProjectSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'projects'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView):
|
||||
class OrganizationJobTemplatesList(SubListCreateAPIView):
|
||||
|
||||
model = JobTemplate
|
||||
serializer_class = JobTemplateSerializer
|
||||
parent_model = Organization
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'workflows'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@@ -37,6 +38,7 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -190,7 +192,8 @@ class ApiV2SubscriptionView(APIView):
|
||||
data['rh_password'] = settings.REDHAT_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('rh_username'), data.get('rh_password')
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.REDHAT_USERNAME = data['rh_username']
|
||||
if pw:
|
||||
@@ -202,10 +205,15 @@ class ApiV2SubscriptionView(APIView):
|
||||
getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
if isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
elif isinstance(exc, requests.exceptions.ConnectionError):
|
||||
msg = _("Could not connect to subscription service.")
|
||||
elif isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
msg = exc.args[0]
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
else:
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(validated)
|
||||
@@ -302,7 +310,8 @@ class ApiV2ConfigView(APIView):
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
if not settings_registry.is_setting_read_only('TOWER_URL_BASE'):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
|
||||
10
awx/asgi.py
10
awx/asgi.py
@@ -2,14 +2,15 @@
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
import django
|
||||
from awx import __version__ as tower_version
|
||||
|
||||
# Prepare the AWX environment.
|
||||
from awx import prepare_env, MODE
|
||||
prepare_env() # NOQA
|
||||
|
||||
from django.core.wsgi import get_wsgi_application # NOQA
|
||||
from channels.asgi import get_channel_layer
|
||||
from channels.routing import get_default_application
|
||||
|
||||
|
||||
"""
|
||||
ASGI config for AWX project.
|
||||
@@ -32,6 +33,5 @@ if MODE == 'production':
|
||||
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")
|
||||
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
django.setup()
|
||||
channel_layer = get_default_application()
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField,
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -28,8 +23,6 @@ from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
import cachetools
|
||||
|
||||
# FIXME: Gracefully handle when settings are accessed before the database is
|
||||
# ready (or during migrations).
|
||||
|
||||
@@ -62,15 +55,6 @@ SETTING_CACHE_DEFAULTS = True
|
||||
__all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
|
||||
|
||||
|
||||
def normalize_broker_url(value):
|
||||
parts = value.rsplit('@', 1)
|
||||
match = re.search('(amqp://[^:]+:)(.*)', parts[0])
|
||||
if match:
|
||||
prefix, password = match.group(1), match.group(2)
|
||||
parts[0] = prefix + urllib.parse.quote(password)
|
||||
return '@'.join(parts)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _ctit_db_wrapper(trans_safe=False):
|
||||
'''
|
||||
@@ -91,42 +75,11 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -138,12 +91,13 @@ def filter_sensitive(registry, key, value):
|
||||
return value
|
||||
|
||||
|
||||
# settings.__getattr__ is called *constantly*, and the LOG_AGGREGATOR_ ones are
|
||||
# so ubiquitous when external logging is enabled that they should kept in memory
|
||||
# with a short TTL to avoid even having to contact memcached
|
||||
# the primary use case for this optimization is the callback receiver
|
||||
# when external logging is enabled
|
||||
LOGGING_SETTINGS_CACHE = cachetools.TTLCache(maxsize=50, ttl=1)
|
||||
class TransientSetting(object):
|
||||
|
||||
__slots__ = ('pk', 'value')
|
||||
|
||||
def __init__(self, pk, value):
|
||||
self.pk = pk
|
||||
self.value = value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
@@ -173,7 +127,6 @@ class EncryptedCacheProxy(object):
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
@@ -196,8 +149,6 @@ class EncryptedCacheProxy(object):
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
@@ -447,28 +398,13 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
cached = LOGGING_SETTINGS_CACHE.get(name)
|
||||
if cached:
|
||||
return cached
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
LOGGING_SETTINGS_CACHE[name] = value
|
||||
return value
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
# unescaped : and @ characters that confused urlparse, e.g.,
|
||||
# amqp://guest:a@ns:ibl3#@localhost:5672//
|
||||
#
|
||||
# detect these scenarios, and automatically escape the user's
|
||||
# password so it just works
|
||||
if name == 'BROKER_URL':
|
||||
value = normalize_broker_url(value)
|
||||
return value
|
||||
return self._get_default(name)
|
||||
|
||||
def _set_local(self, name, value):
|
||||
field = self.registry.get_setting_field(name)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -307,7 +307,7 @@ class BaseAccess(object):
|
||||
|
||||
return True # User has access to both, permission check passed
|
||||
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True):
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True, quiet=False):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
@@ -317,8 +317,10 @@ class BaseAccess(object):
|
||||
validation_info['time_remaining'] = 99999999
|
||||
validation_info['grace_period_remaining'] = 99999999
|
||||
|
||||
report_violation = lambda message: logger.error(message)
|
||||
|
||||
if quiet:
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if (
|
||||
validation_info.get('trial', False) is True or
|
||||
validation_info['instance_count'] == 10 # basic 10 license
|
||||
@@ -787,7 +789,6 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
self.check_license(check_expiration=False)
|
||||
is_change_possible = self.can_change(obj, None)
|
||||
if not is_change_possible:
|
||||
return False
|
||||
@@ -907,7 +908,7 @@ class HostAccess(BaseAccess):
|
||||
model = Host
|
||||
select_related = ('created_by', 'modified_by', 'inventory',
|
||||
'last_job__job_template', 'last_job_host_summary__job',)
|
||||
prefetch_related = ('groups',)
|
||||
prefetch_related = ('groups', 'inventory_sources')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -1409,7 +1410,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobTemplate
|
||||
select_related = ('created_by', 'modified_by', 'inventory', 'project',
|
||||
select_related = ('created_by', 'modified_by', 'inventory', 'project', 'organization',
|
||||
'next_schedule',)
|
||||
prefetch_related = (
|
||||
'instance_groups',
|
||||
@@ -1433,16 +1434,11 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
Users who are able to create deploy jobs can also run normal and check (dry run) jobs.
|
||||
'''
|
||||
if not data: # So the browseable API will work
|
||||
return (
|
||||
Project.accessible_objects(self.user, 'use_role').exists() or
|
||||
Inventory.accessible_objects(self.user, 'use_role').exists())
|
||||
return Organization.accessible_objects(self.user, 'job_template_admin_role').exists()
|
||||
|
||||
# if reference_obj is provided, determine if it can be copied
|
||||
reference_obj = data.get('reference_obj', None)
|
||||
|
||||
if 'survey_enabled' in data and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
@@ -1502,22 +1498,28 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
return self.user in obj.execute_role
|
||||
|
||||
def can_change(self, obj, data):
|
||||
data_for_change = data
|
||||
if self.user not in obj.admin_role and not self.user.is_superuser:
|
||||
return False
|
||||
if data is not None:
|
||||
data = dict(data)
|
||||
if data is None:
|
||||
return True
|
||||
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
if 'survey_enabled' in data and obj.survey_enabled != data['survey_enabled'] and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
return True
|
||||
# standard type of check for organization - cannot change the value
|
||||
# unless posessing the respective job_template_admin_role, otherwise non-blocking
|
||||
if not self.check_related('organization', Organization, data, obj=obj, role_field='job_template_admin_role'):
|
||||
return False
|
||||
|
||||
for required_field in ('inventory', 'project'):
|
||||
required_obj = getattr(obj, required_field, None)
|
||||
if required_field not in data_for_change and required_obj is not None:
|
||||
data_for_change[required_field] = required_obj.pk
|
||||
return self.can_read(obj) and (self.can_add(data_for_change) if data is not None else True)
|
||||
data = dict(data)
|
||||
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
return True
|
||||
|
||||
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
||||
is_mandatory = True
|
||||
if not getattr(obj, '{}_id'.format(required_field)):
|
||||
is_mandatory = False
|
||||
if not self.check_related(required_field, cls, data, obj=obj, role_field='use_role', mandatory=is_mandatory):
|
||||
return False
|
||||
return True
|
||||
|
||||
def changes_are_non_sensitive(self, obj, data):
|
||||
'''
|
||||
@@ -1552,9 +1554,9 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == "instance_groups":
|
||||
if not obj.project.organization:
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.project.organization.admin_role
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return self.user in obj.admin_role and self.user in sub_obj.use_role
|
||||
return super(JobTemplateAccess, self).can_attach(
|
||||
@@ -1585,6 +1587,7 @@ class JobAccess(BaseAccess):
|
||||
select_related = ('created_by', 'modified_by', 'job_template', 'inventory',
|
||||
'project', 'project_update',)
|
||||
prefetch_related = (
|
||||
'organization',
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
'credentials__credential_type',
|
||||
@@ -1605,42 +1608,19 @@ class JobAccess(BaseAccess):
|
||||
|
||||
return qs.filter(
|
||||
Q(job_template__in=JobTemplate.accessible_objects(self.user, 'read_role')) |
|
||||
Q(inventory__organization__in=org_access_qs) |
|
||||
Q(project__organization__in=org_access_qs)).distinct()
|
||||
|
||||
def related_orgs(self, obj):
|
||||
orgs = []
|
||||
if obj.inventory and obj.inventory.organization:
|
||||
orgs.append(obj.inventory.organization)
|
||||
if obj.project and obj.project.organization and obj.project.organization not in orgs:
|
||||
orgs.append(obj.project.organization)
|
||||
return orgs
|
||||
|
||||
def org_access(self, obj, role_types=['admin_role']):
|
||||
orgs = self.related_orgs(obj)
|
||||
for org in orgs:
|
||||
for role_type in role_types:
|
||||
role = getattr(org, role_type)
|
||||
if self.user in role:
|
||||
return True
|
||||
return False
|
||||
Q(organization__in=org_access_qs)).distinct()
|
||||
|
||||
def can_add(self, data, validate_license=True):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
if not data: # So the browseable API will work
|
||||
return True
|
||||
return self.user.is_superuser
|
||||
raise NotImplementedError('Direct job creation not possible in v2 API')
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return (obj.status == 'new' and
|
||||
self.can_read(obj) and
|
||||
self.can_add(data, validate_license=False))
|
||||
raise NotImplementedError('Direct job editing not supported in v2 API')
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return self.org_access(obj)
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user in obj.organization.admin_role
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
if validate_license:
|
||||
@@ -1660,6 +1640,7 @@ class JobAccess(BaseAccess):
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
|
||||
# Standard permissions model
|
||||
if obj.job_template and (self.user not in obj.job_template.execute_role):
|
||||
return False
|
||||
|
||||
@@ -1674,24 +1655,17 @@ class JobAccess(BaseAccess):
|
||||
if JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
return True
|
||||
|
||||
org_access = bool(obj.inventory) and self.user in obj.inventory.organization.inventory_admin_role
|
||||
project_access = obj.project is None or self.user in obj.project.admin_role
|
||||
credential_access = all([self.user in cred.use_role for cred in obj.credentials.all()])
|
||||
# Standard permissions model without job template involved
|
||||
if obj.organization and self.user in obj.organization.execute_role:
|
||||
return True
|
||||
elif not (obj.job_template or obj.organization):
|
||||
raise PermissionDenied(_('Job has been orphaned from its job template and organization.'))
|
||||
elif obj.job_template and config is not None:
|
||||
raise PermissionDenied(_('Job was launched with prompted fields you do not have access to.'))
|
||||
elif obj.job_template and config is None:
|
||||
raise PermissionDenied(_('Job was launched with unknown prompted fields. Organization admin permissions required.'))
|
||||
|
||||
# job can be relaunched if user could make an equivalent JT
|
||||
ret = org_access and credential_access and project_access
|
||||
if not ret and self.save_messages and not self.messages:
|
||||
if not obj.job_template:
|
||||
pretext = _('Job has been orphaned from its job template.')
|
||||
elif config is None:
|
||||
pretext = _('Job was launched with unknown prompted fields.')
|
||||
else:
|
||||
pretext = _('Job was launched with prompted fields.')
|
||||
if credential_access:
|
||||
self.messages['detail'] = '{} {}'.format(pretext, _(' Organization level permissions required.'))
|
||||
else:
|
||||
self.messages['detail'] = '{} {}'.format(pretext, _(' You do not have permission to related resources.'))
|
||||
return ret
|
||||
return False
|
||||
|
||||
def get_method_capability(self, method, obj, parent_obj):
|
||||
if method == 'start':
|
||||
@@ -1704,10 +1678,16 @@ class JobAccess(BaseAccess):
|
||||
def can_cancel(self, obj):
|
||||
if not obj.can_cancel:
|
||||
return False
|
||||
# Delete access allows org admins to stop running jobs
|
||||
if self.user == obj.created_by or self.can_delete(obj):
|
||||
# Users may always cancel their own jobs
|
||||
if self.user == obj.created_by:
|
||||
return True
|
||||
return obj.job_template is not None and self.user in obj.job_template.admin_role
|
||||
# Users with direct admin to JT may cancel jobs started by anyone
|
||||
if obj.job_template and self.user in obj.job_template.admin_role:
|
||||
return True
|
||||
# If orphaned, allow org JT admins to stop running jobs
|
||||
if not obj.job_template and obj.organization and self.user in obj.organization.job_template_admin_role:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobTemplateAccess(BaseAccess):
|
||||
@@ -1942,11 +1922,11 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
# TODO: notification attachments?
|
||||
class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can only see/manage Workflow Job Templates if I'm a super user
|
||||
I can see/manage Workflow Job Templates based on object roles
|
||||
'''
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
select_related = ('created_by', 'modified_by', 'next_schedule',
|
||||
select_related = ('created_by', 'modified_by', 'organization', 'next_schedule',
|
||||
'admin_role', 'execute_role', 'read_role',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
@@ -1964,10 +1944,6 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
# will check this if surveys are added to WFJT
|
||||
if 'survey_enabled' in data and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
@@ -2036,7 +2012,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
I can also cancel it if I started it
|
||||
'''
|
||||
model = WorkflowJob
|
||||
select_related = ('created_by', 'modified_by',)
|
||||
select_related = ('created_by', 'modified_by', 'organization',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
@@ -2238,7 +2214,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
prefetch_related = ('job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -2330,6 +2306,7 @@ class UnifiedJobTemplateAccess(BaseAccess):
|
||||
prefetch_related = (
|
||||
'last_job',
|
||||
'current_job',
|
||||
'organization',
|
||||
'credentials__credential_type',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
|
||||
)
|
||||
@@ -2369,6 +2346,7 @@ class UnifiedJobAccess(BaseAccess):
|
||||
prefetch_related = (
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'organization',
|
||||
'unified_job_node__workflow_job',
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
@@ -2399,8 +2377,7 @@ class UnifiedJobAccess(BaseAccess):
|
||||
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) |
|
||||
Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs) |
|
||||
Q(adhoccommand__inventory__id__in=inv_pk_qs) |
|
||||
Q(job__inventory__organization__in=org_auditor_qs) |
|
||||
Q(job__project__organization__in=org_auditor_qs)
|
||||
Q(organization__in=org_auditor_qs)
|
||||
)
|
||||
return qs
|
||||
|
||||
@@ -2427,6 +2404,9 @@ class ScheduleAccess(BaseAccess):
|
||||
def can_add(self, data):
|
||||
if not JobLaunchConfigAccess(self.user).can_add(data):
|
||||
return False
|
||||
if not data:
|
||||
return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists()
|
||||
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
|
||||
|
||||
@check_superuser
|
||||
|
||||
166
awx/main/analytics/broadcast_websocket.py
Normal file
166
awx/main/analytics/broadcast_websocket.py
Normal file
@@ -0,0 +1,166 @@
|
||||
import datetime
|
||||
import asyncio
|
||||
import logging
|
||||
import aioredis
|
||||
import redis
|
||||
import re
|
||||
|
||||
from prometheus_client import (
|
||||
generate_latest,
|
||||
Gauge,
|
||||
Counter,
|
||||
Enum,
|
||||
CollectorRegistry,
|
||||
)
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
BROADCAST_WEBSOCKET_REDIS_KEY_NAME = 'broadcast_websocket_stats'
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics.broadcast_websocket')
|
||||
|
||||
|
||||
def dt_to_seconds(dt):
|
||||
return int((dt - datetime.datetime(1970,1,1)).total_seconds())
|
||||
|
||||
|
||||
def now_seconds():
|
||||
return dt_to_seconds(datetime.datetime.now())
|
||||
|
||||
|
||||
# Second granularity; Per-minute
|
||||
class FixedSlidingWindow():
|
||||
def __init__(self, start_time=None):
|
||||
self.buckets = dict()
|
||||
self.start_time = start_time or now_seconds()
|
||||
|
||||
def cleanup(self, now_bucket=None):
|
||||
now_bucket = now_bucket or now_seconds()
|
||||
if self.start_time + 60 <= now_bucket:
|
||||
self.start_time = now_bucket + 60 + 1
|
||||
|
||||
# Delete old entries
|
||||
for k in list(self.buckets.keys()):
|
||||
if k < self.start_time:
|
||||
del self.buckets[k]
|
||||
|
||||
def record(self, ts=None):
|
||||
ts = ts or datetime.datetime.now()
|
||||
now_bucket = int((ts - datetime.datetime(1970,1,1)).total_seconds())
|
||||
|
||||
val = self.buckets.get(now_bucket, 0)
|
||||
self.buckets[now_bucket] = val + 1
|
||||
|
||||
self.cleanup(now_bucket)
|
||||
|
||||
def render(self):
|
||||
self.cleanup()
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager():
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
self._event_loop = event_loop
|
||||
self._stats = dict()
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname,
|
||||
remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
del self._stats[remote_hostname]
|
||||
|
||||
async def run_loop(self):
|
||||
try:
|
||||
redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
|
||||
while True:
|
||||
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
|
||||
await redis_conn.set(self._redis_key, stats_data_str)
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
except Exception as e:
|
||||
logger.warn(e)
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
self.start()
|
||||
|
||||
def start(self):
|
||||
self.async_task = self._event_loop.create_task(self.run_loop())
|
||||
return self.async_task
|
||||
|
||||
@classmethod
|
||||
def get_stats_sync(cls):
|
||||
'''
|
||||
Stringified verion of all the stats
|
||||
'''
|
||||
redis_conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
return redis_conn.get(BROADCAST_WEBSOCKET_REDIS_KEY_NAME)
|
||||
|
||||
|
||||
class BroadcastWebsocketStats():
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
self._registry = CollectorRegistry()
|
||||
|
||||
# TODO: More robust replacement
|
||||
self.name = self.safe_name(self._local_hostname)
|
||||
self.remote_name = self.safe_name(self._remote_hostname)
|
||||
|
||||
self._messages_received_total = Counter(f'awx_{self.remote_name}_messages_received_total',
|
||||
'Number of messages received, to be forwarded, by the broadcast websocket system',
|
||||
registry=self._registry)
|
||||
self._messages_received = Gauge(f'awx_{self.remote_name}_messages_received',
|
||||
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
|
||||
registry=self._registry)
|
||||
self._connection = Enum(f'awx_{self.remote_name}_connection',
|
||||
'Websocket broadcast connection',
|
||||
states=['disconnected', 'connected'],
|
||||
registry=self._registry)
|
||||
self._connection_start = Gauge(f'awx_{self.remote_name}_connection_start',
|
||||
'Time the connection was established',
|
||||
registry=self._registry)
|
||||
|
||||
self._messages_received_per_minute = Gauge(f'awx_{self.remote_name}_messages_received_per_minute',
|
||||
'Messages received per minute',
|
||||
registry=self._registry)
|
||||
self._internal_messages_received_per_minute = FixedSlidingWindow()
|
||||
|
||||
def safe_name(self, s):
|
||||
# Replace all non alpha-numeric characters with _
|
||||
return re.sub('[^0-9a-zA-Z]+', '_', s)
|
||||
|
||||
def unregister(self):
|
||||
self._registry.unregister(f'awx_{self.remote_name}_messages_received')
|
||||
self._registry.unregister(f'awx_{self.remote_name}_connection')
|
||||
|
||||
def record_message_received(self):
|
||||
self._internal_messages_received_per_minute.record()
|
||||
self._messages_received.inc()
|
||||
self._messages_received_total.inc()
|
||||
|
||||
def record_connection_established(self):
|
||||
self._connection.state('connected')
|
||||
self._connection_start.set_to_current_time()
|
||||
self._messages_received.set(0)
|
||||
|
||||
def record_connection_lost(self):
|
||||
self._connection.state('disconnected')
|
||||
|
||||
def get_connection_duration(self):
|
||||
return (datetime.datetime.now() - self._connection_established_ts).total_seconds()
|
||||
|
||||
def render(self):
|
||||
msgs_per_min = self._internal_messages_received_per_minute.render()
|
||||
self._messages_received_per_minute.set(msgs_per_min)
|
||||
|
||||
def serialize(self):
|
||||
self.render()
|
||||
|
||||
registry_data = generate_latest(self._registry).decode('UTF-8')
|
||||
return registry_data
|
||||
@@ -31,7 +31,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
'''
|
||||
|
||||
|
||||
@register('config', '1.0')
|
||||
@register('config', '1.1')
|
||||
def config(since):
|
||||
license_info = get_license(show_key=False)
|
||||
install_type = 'traditional'
|
||||
@@ -53,6 +53,7 @@ def config(since):
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
@@ -256,7 +257,7 @@ def copy_tables(since, full_path):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_project.organization_id,
|
||||
main_unifiedjob.organization_id,
|
||||
main_organization.name as organization_name,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
@@ -274,10 +275,8 @@ def copy_tables(since, full_path):
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id
|
||||
FROM main_unifiedjob
|
||||
JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
JOIN main_project ON main_project.unifiedjobtemplate_ptr_id = main_job.project_id
|
||||
JOIN main_organization ON main_organization.id = main_project.organization_id
|
||||
JOIN main_organization ON main_organization.id = main_unifiedjob.organization_id
|
||||
WHERE main_unifiedjob.created > {}
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
|
||||
@@ -15,7 +15,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
@@ -134,13 +134,17 @@ def gather(dest=None, module=None, collection_type='scheduled'):
|
||||
settings.SYSTEM_UUID,
|
||||
run_now.strftime('%Y-%m-%d-%H%M%S%z')
|
||||
])
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
shutil.rmtree(dest)
|
||||
return tgz
|
||||
try:
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
return tgz
|
||||
except Exception:
|
||||
logger.exception("Failed to write analytics archive file")
|
||||
finally:
|
||||
shutil.rmtree(dest)
|
||||
|
||||
|
||||
def ship(path):
|
||||
@@ -169,12 +173,13 @@ def ship(path):
|
||||
s = requests.Session()
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
if response.status_code != 202:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
|
||||
@@ -616,6 +616,18 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_FORKS',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
@@ -787,6 +799,28 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
@@ -811,10 +845,7 @@ def galaxy_validate(serializer, attrs):
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
@@ -823,10 +854,22 @@ def galaxy_validate(serializer, attrs):
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
errors = {}
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
|
||||
@@ -1,97 +1,264 @@
|
||||
import json
|
||||
import logging
|
||||
import datetime
|
||||
import hmac
|
||||
import asyncio
|
||||
|
||||
from channels import Group
|
||||
from channels.auth import channel_session_user_from_http, channel_session_user
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
from django.http.cookie import parse_cookie
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import force_bytes
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from channels.generic.websocket import AsyncJsonWebsocketConsumer
|
||||
from channels.layers import get_channel_layer
|
||||
from channels.db import database_sync_to_async
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
XRF_KEY = '_auth_user_xrf'
|
||||
|
||||
|
||||
def discard_groups(message):
|
||||
if 'groups' in message.channel_session:
|
||||
for group in message.channel_session['groups']:
|
||||
Group(group).discard(message.reply_channel)
|
||||
class WebsocketSecretAuthHelper:
|
||||
"""
|
||||
Middlewareish for websockets to verify node websocket broadcast interconnect.
|
||||
|
||||
Note: The "ish" is due to the channels routing interface. Routing occurs
|
||||
_after_ authentication; making it hard to apply this auth to _only_ a subset of
|
||||
websocket endpoints.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def construct_secret(cls):
|
||||
nonce_serialized = "{}".format(int((datetime.datetime.utcnow() - datetime.datetime.fromtimestamp(0)).total_seconds()))
|
||||
payload_dict = {
|
||||
'secret': settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
'nonce': nonce_serialized
|
||||
}
|
||||
payload_serialized = json.dumps(payload_dict)
|
||||
|
||||
secret_serialized = hmac.new(force_bytes(settings.BROADCAST_WEBSOCKET_SECRET),
|
||||
msg=force_bytes(payload_serialized),
|
||||
digestmod='sha256').hexdigest()
|
||||
|
||||
return 'HMAC-SHA256 {}:{}'.format(nonce_serialized, secret_serialized)
|
||||
|
||||
|
||||
@channel_session_user_from_http
|
||||
def ws_connect(message):
|
||||
headers = dict(message.content.get('headers', ''))
|
||||
message.reply_channel.send({"accept": True})
|
||||
message.content['method'] = 'FAKE'
|
||||
if message.user.is_authenticated:
|
||||
message.reply_channel.send(
|
||||
{"text": json.dumps({"accept": True, "user": message.user.id})}
|
||||
)
|
||||
# store the valid CSRF token from the cookie so we can compare it later
|
||||
# on ws_receive
|
||||
cookie_token = parse_cookie(
|
||||
smart_str(headers.get(b'cookie'))
|
||||
).get('csrftoken')
|
||||
if cookie_token:
|
||||
message.channel_session[XRF_KEY] = cookie_token
|
||||
else:
|
||||
logger.error("Request user is not authenticated to use websocket.")
|
||||
message.reply_channel.send({"close": True})
|
||||
return None
|
||||
@classmethod
|
||||
def verify_secret(cls, s, nonce_tolerance=300):
|
||||
try:
|
||||
(prefix, payload) = s.split(' ')
|
||||
if prefix != 'HMAC-SHA256':
|
||||
raise ValueError('Unsupported encryption algorithm')
|
||||
(nonce_parsed, secret_parsed) = payload.split(':')
|
||||
except Exception:
|
||||
raise ValueError("Failed to parse secret")
|
||||
|
||||
try:
|
||||
payload_expected = {
|
||||
'secret': settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
'nonce': nonce_parsed,
|
||||
}
|
||||
payload_serialized = json.dumps(payload_expected)
|
||||
except Exception:
|
||||
raise ValueError("Failed to create hash to compare to secret.")
|
||||
|
||||
secret_serialized = hmac.new(force_bytes(settings.BROADCAST_WEBSOCKET_SECRET),
|
||||
msg=force_bytes(payload_serialized),
|
||||
digestmod='sha256').hexdigest()
|
||||
|
||||
if secret_serialized != secret_parsed:
|
||||
raise ValueError("Invalid secret")
|
||||
|
||||
# Avoid timing attack and check the nonce after all the heavy lifting
|
||||
now = datetime.datetime.utcnow()
|
||||
nonce_parsed = datetime.datetime.fromtimestamp(int(nonce_parsed))
|
||||
if (now - nonce_parsed).total_seconds() > nonce_tolerance:
|
||||
raise ValueError("Potential replay attack or machine(s) time out of sync.")
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def is_authorized(cls, scope):
|
||||
secret = ''
|
||||
for k, v in scope['headers']:
|
||||
if k.decode("utf-8") == 'secret':
|
||||
secret = v.decode("utf-8")
|
||||
break
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
@channel_session_user
|
||||
def ws_disconnect(message):
|
||||
discard_groups(message)
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
except Exception:
|
||||
# TODO: log ip of connected client
|
||||
logger.warn("Broadcast client failed to authorize.")
|
||||
await self.close()
|
||||
return
|
||||
|
||||
# TODO: log ip of connected client
|
||||
logger.info(f"Broadcast client connected.")
|
||||
await self.accept()
|
||||
await self.channel_layer.group_add(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
|
||||
async def disconnect(self, code):
|
||||
# TODO: log ip of disconnected client
|
||||
logger.info("Client disconnected")
|
||||
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
|
||||
@channel_session_user
|
||||
def ws_receive(message):
|
||||
from awx.main.access import consumer_access
|
||||
user = message.user
|
||||
raw_data = message.content['text']
|
||||
data = json.loads(raw_data)
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
user = self.scope['user']
|
||||
if user and not user.is_anonymous:
|
||||
await self.accept()
|
||||
await self.send_json({"accept": True, "user": user.id})
|
||||
# store the valid CSRF token from the cookie so we can compare it later
|
||||
# on ws_receive
|
||||
cookie_token = self.scope['cookies'].get('csrftoken')
|
||||
if cookie_token:
|
||||
self.scope['session'][XRF_KEY] = cookie_token
|
||||
else:
|
||||
logger.error("Request user is not authenticated to use websocket.")
|
||||
# TODO: Carry over from channels 1 implementation
|
||||
# We should never .accept() the client and close without sending a close message
|
||||
await self.accept()
|
||||
await self.send_json({"close": True})
|
||||
await self.close()
|
||||
|
||||
xrftoken = data.get('xrftoken')
|
||||
if (
|
||||
not xrftoken or
|
||||
XRF_KEY not in message.channel_session or
|
||||
xrftoken != message.channel_session[XRF_KEY]
|
||||
):
|
||||
logger.error(
|
||||
"access denied to channel, XRF mismatch for {}".format(user.username)
|
||||
)
|
||||
message.reply_channel.send({
|
||||
"text": json.dumps({"error": "access denied to channel"})
|
||||
})
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
# This causes problems with our generic role permissions checking.
|
||||
# Specifically, type(user) != User
|
||||
# Therefore, get the "real" User objects from the database before
|
||||
# calling the access permission methods
|
||||
user_access.user = User.objects.get(id=user_access.user.id)
|
||||
res = user_access.get_queryset().filter(pk=oid).exists()
|
||||
return res
|
||||
|
||||
async def receive_json(self, data):
|
||||
from awx.main.access import consumer_access
|
||||
user = self.scope['user']
|
||||
xrftoken = data.get('xrftoken')
|
||||
if (
|
||||
not xrftoken or
|
||||
XRF_KEY not in self.scope["session"] or
|
||||
xrftoken != self.scope["session"][XRF_KEY]
|
||||
):
|
||||
logger.error(f"access denied to channel, XRF mismatch for {user.username}")
|
||||
await self.send_json({"error": "access denied to channel"})
|
||||
return
|
||||
|
||||
if 'groups' in data:
|
||||
groups = data['groups']
|
||||
new_groups = set()
|
||||
current_groups = set(self.scope['session'].pop('groups') if 'groups' in self.scope['session'] else [])
|
||||
for group_name,v in groups.items():
|
||||
if type(v) is list:
|
||||
for oid in v:
|
||||
name = '{}-{}'.format(group_name, oid)
|
||||
access_cls = consumer_access(group_name)
|
||||
if access_cls is not None:
|
||||
user_access = access_cls(user)
|
||||
if not await self.user_can_see_object_id(user_access, oid):
|
||||
await self.send_json({"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})
|
||||
continue
|
||||
new_groups.add(name)
|
||||
else:
|
||||
await self.send_json({"error": "access denied to channel"})
|
||||
logger.error(f"groups must be a list, not {groups}")
|
||||
return
|
||||
|
||||
old_groups = current_groups - new_groups
|
||||
for group_name in old_groups:
|
||||
await self.channel_layer.group_discard(
|
||||
group_name,
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(
|
||||
group_name,
|
||||
self.channel_name
|
||||
)
|
||||
logger.debug(f"Channel {self.channel_name} left groups {old_groups} and joined {new_groups_exclusive}")
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({
|
||||
"groups_current": list(new_groups),
|
||||
"groups_left": list(old_groups),
|
||||
"groups_joined": list(new_groups_exclusive)
|
||||
})
|
||||
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
|
||||
def run_sync(func):
|
||||
event_loop = asyncio.new_event_loop()
|
||||
event_loop.run_until_complete(func)
|
||||
event_loop.close()
|
||||
|
||||
|
||||
def _dump_payload(payload):
|
||||
try:
|
||||
return json.dumps(payload, cls=DjangoJSONEncoder)
|
||||
except ValueError:
|
||||
logger.error("Invalid payload to emit")
|
||||
return None
|
||||
|
||||
|
||||
async def emit_channel_notification_async(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
|
||||
if 'groups' in data:
|
||||
discard_groups(message)
|
||||
groups = data['groups']
|
||||
current_groups = set(message.channel_session.pop('groups') if 'groups' in message.channel_session else [])
|
||||
for group_name,v in groups.items():
|
||||
if type(v) is list:
|
||||
for oid in v:
|
||||
name = '{}-{}'.format(group_name, oid)
|
||||
access_cls = consumer_access(group_name)
|
||||
if access_cls is not None:
|
||||
user_access = access_cls(user)
|
||||
if not user_access.get_queryset().filter(pk=oid).exists():
|
||||
message.reply_channel.send({"text": json.dumps(
|
||||
{"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})})
|
||||
continue
|
||||
current_groups.add(name)
|
||||
Group(name).add(message.reply_channel)
|
||||
else:
|
||||
current_groups.add(group_name)
|
||||
Group(group_name).add(message.reply_channel)
|
||||
message.channel_session['groups'] = list(current_groups)
|
||||
channel_layer = get_channel_layer()
|
||||
await channel_layer.group_send(
|
||||
group,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": payload_dumped
|
||||
},
|
||||
)
|
||||
|
||||
await channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
try:
|
||||
Group(group).send({"text": json.dumps(payload, cls=DjangoJSONEncoder)})
|
||||
except ValueError:
|
||||
logger.error("Invalid payload emitting channel {} on topic: {}".format(group, payload))
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
|
||||
run_sync(channel_layer.group_send(
|
||||
group,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": payload_dumped
|
||||
},
|
||||
))
|
||||
|
||||
run_sync(channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
))
|
||||
|
||||
@@ -43,7 +43,7 @@ aim_inputs = {
|
||||
'id': 'object_query',
|
||||
'label': _('Object Query'),
|
||||
'type': 'string',
|
||||
'help_text': _('Lookup query for the object. Ex: "Safe=TestSafe;Object=testAccountName123"'),
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
}, {
|
||||
'id': 'object_query_format',
|
||||
'label': _('Object Query Format'),
|
||||
|
||||
@@ -3,6 +3,16 @@ from .plugin import CredentialPlugin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
clouds = [
|
||||
vars(azure_cloud)[n]
|
||||
for n in dir(azure_cloud)
|
||||
if n.startswith("AZURE_") and n.endswith("_CLOUD")
|
||||
]
|
||||
default_cloud = vars(azure_cloud)["AZURE_PUBLIC_CLOUD"]
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
@@ -24,6 +34,12 @@ azure_keyvault_inputs = {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_name',
|
||||
'label': _('Cloud Environment'),
|
||||
'help_text': _('Specify which azure cloud environment to use.'),
|
||||
'choices': list(set([default_cloud.name] + [c.name for c in clouds])),
|
||||
'default': default_cloud.name
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
@@ -42,6 +58,7 @@ azure_keyvault_inputs = {
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
@@ -49,7 +66,7 @@ def azure_keyvault_backend(**kwargs):
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
resource = f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
@@ -64,7 +64,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'runworker'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@@ -1,5 +1,62 @@
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
assert conn.autocommit, "Connection must be in autocommit mode."
|
||||
self.conn = conn
|
||||
|
||||
def listen(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('LISTEN "%s";' % channel)
|
||||
|
||||
def unlisten(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('UNLISTEN "%s";' % channel)
|
||||
|
||||
def notify(self, channel, payload):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
yield None
|
||||
else:
|
||||
self.conn.poll()
|
||||
while self.conn.notifies:
|
||||
yield self.conn.notifies.pop(0)
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn():
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(dbname=conf['NAME'],
|
||||
host=conf['HOST'],
|
||||
user=conf['USER'],
|
||||
password=conf['PASSWORD'],
|
||||
port=conf['PORT'],
|
||||
**conf.get("OPTIONS", {}))
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
||||
conn.set_session(autocommit=True)
|
||||
pubsub = PubSub(conn)
|
||||
yield pubsub
|
||||
conn.close()
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from django.conf import settings
|
||||
import uuid
|
||||
import json
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from kombu import Queue, Exchange, Producer, Consumer
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -20,15 +19,6 @@ class Control(object):
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queue = Queue(self.queuename, Exchange(self.queuename), routing_key=self.queuename)
|
||||
|
||||
def publish(self, msg, conn, **kwargs):
|
||||
producer = Producer(
|
||||
exchange=self.queue.exchange,
|
||||
channel=conn,
|
||||
routing_key=self.queuename
|
||||
)
|
||||
producer.publish(msg, expiration=5, **kwargs)
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
return self.control_with_reply('status', *args, **kwargs)
|
||||
@@ -36,24 +26,28 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def generate_reply_queue_name(cls):
|
||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Queue(name="amq.rabbitmq.reply-to")
|
||||
reply_queue = Control.generate_reply_queue_name()
|
||||
self.result = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
with Consumer(conn, reply_queue, callbacks=[self.process_message], no_ack=True):
|
||||
self.publish({'control': command}, conn, reply_to='amq.rabbitmq.reply-to')
|
||||
try:
|
||||
conn.drain_events(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('{} did not reply within {}s'.format(self.service, timeout))
|
||||
raise
|
||||
return self.result
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen(reply_queue)
|
||||
conn.notify(self.queuename,
|
||||
json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
if reply is None:
|
||||
logger.error(f'{self.service} did not reply within {timeout}s')
|
||||
raise RuntimeError("{self.service} did not reply within {timeout}s")
|
||||
break
|
||||
|
||||
return json.loads(reply.payload)
|
||||
|
||||
def control(self, msg, **kwargs):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
self.publish(msg, conn)
|
||||
|
||||
def process_message(self, body, message):
|
||||
self.result = body
|
||||
message.ack()
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(self.queuename, json.dumps(msg))
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
from amqp.exceptions import PreconditionFailed
|
||||
from django.conf import settings
|
||||
from kombu.connection import Connection as KombuConnection
|
||||
from kombu.transport import pyamqp
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
__all__ = ['Connection']
|
||||
|
||||
|
||||
class Connection(KombuConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
class _Channel(pyamqp.Channel):
|
||||
|
||||
def queue_declare(self, queue, *args, **kwargs):
|
||||
kwargs['durable'] = settings.BROKER_DURABILITY
|
||||
try:
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
except PreconditionFailed as e:
|
||||
if "inequivalent arg 'durable'" in getattr(e, 'reply_text', None):
|
||||
logger.error(
|
||||
'queue {} durability is not {}, deleting and recreating'.format(
|
||||
|
||||
queue,
|
||||
kwargs['durable']
|
||||
)
|
||||
)
|
||||
self.queue_delete(queue)
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
|
||||
class _Connection(pyamqp.Connection):
|
||||
Channel = _Channel
|
||||
|
||||
class _Transport(pyamqp.Transport):
|
||||
Connection = _Connection
|
||||
|
||||
self.transport_cls = _Transport
|
||||
56
awx/main/dispatch/periodic.py
Normal file
56
awx/main/dispatch/periodic.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warn(f'periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
|
||||
process = Process(target=run)
|
||||
process.daemon = True
|
||||
process.start()
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
scheduler.run_continuously()
|
||||
@@ -1,7 +1,9 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -72,9 +74,6 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
@@ -247,7 +246,7 @@ class WorkerPool(object):
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% if "age" in task %}for: {{ "%.1f" % task["age"] }}s {% endif %}{% else %}queued {% endif %}'
|
||||
'{{ task["uuid"] }} '
|
||||
'{% if "task" in task %}'
|
||||
'{{ task["task"].rsplit(".", 1)[-1] }}'
|
||||
@@ -368,6 +367,26 @@ class AutoscalePool(WorkerPool):
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
# if we discover a task manager invocation that's been running
|
||||
# too long, reap it (because otherwise it'll just hold the postgres
|
||||
# advisory lock forever); the goal of this code is to discover
|
||||
# deadlocks or other serious issues in the task manager that cause
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[
|
||||
current_task['uuid']
|
||||
]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(
|
||||
f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}'
|
||||
) # noqa
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from kombu import Exchange, Producer
|
||||
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from . import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -39,24 +39,22 @@ class task:
|
||||
add.apply_async([1, 1])
|
||||
Adder.apply_async([1, 1])
|
||||
|
||||
# Tasks can also define a specific target queue or exchange type:
|
||||
# Tasks can also define a specific target queue or use the special fan-out queue tower_broadcast:
|
||||
|
||||
@task(queue='slow-tasks')
|
||||
def snooze():
|
||||
time.sleep(10)
|
||||
|
||||
@task(queue='tower_broadcast', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast')
|
||||
def announce():
|
||||
print("Run this everywhere!")
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None, exchange_type=None):
|
||||
def __init__(self, queue=None):
|
||||
self.queue = queue
|
||||
self.exchange_type = exchange_type
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
exchange_type = self.exchange_type
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
@@ -73,9 +71,12 @@ class task:
|
||||
kwargs = kwargs or {}
|
||||
queue = (
|
||||
queue or
|
||||
getattr(cls.queue, 'im_func', cls.queue) or
|
||||
settings.CELERY_DEFAULT_QUEUE
|
||||
getattr(cls.queue, 'im_func', cls.queue)
|
||||
)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {
|
||||
'uuid': task_id,
|
||||
'args': args,
|
||||
@@ -86,21 +87,8 @@ class task:
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not settings.IS_TESTING(sys.argv):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
exchange = Exchange(queue, type=exchange_type or 'direct')
|
||||
producer = Producer(conn)
|
||||
logger.debug('publish {}({}, queue={})'.format(
|
||||
cls.name,
|
||||
task_id,
|
||||
queue
|
||||
))
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
compression='bzip2',
|
||||
exchange=exchange,
|
||||
declare=[exchange],
|
||||
delivery_mode="persistent",
|
||||
routing_key=queue)
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(queue, json.dumps(obj))
|
||||
return (obj, queue)
|
||||
|
||||
# If the object we're wrapping *is* a class (e.g., RunJob), return
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .base import AWXConsumer, BaseWorker # noqa
|
||||
from .base import AWXConsumerRedis, AWXConsumerPG, BaseWorker # noqa
|
||||
from .callback import CallbackBrokerWorker # noqa
|
||||
from .task import TaskWorker # noqa
|
||||
|
||||
@@ -5,14 +5,17 @@ import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django import db
|
||||
from kombu import Producer
|
||||
from kombu.mixins import ConsumerMixin
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -37,10 +40,11 @@ class WorkerSignalHandler:
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class AWXConsumer(ConsumerMixin):
|
||||
class AWXConsumerBase(object):
|
||||
def __init__(self, name, worker, queues=[], pool=None):
|
||||
self.should_stop = False
|
||||
|
||||
def __init__(self, name, connection, worker, queues=[], pool=None):
|
||||
self.connection = connection
|
||||
self.name = name
|
||||
self.total_messages = 0
|
||||
self.queues = queues
|
||||
self.worker = worker
|
||||
@@ -49,25 +53,15 @@ class AWXConsumer(ConsumerMixin):
|
||||
self.pool = WorkerPool()
|
||||
self.pool.init_workers(self.worker.work_loop)
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
logger.debug(self.listening_on)
|
||||
return [Consumer(queues=self.queues, accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
@property
|
||||
def listening_on(self):
|
||||
return 'listening on {}'.format([
|
||||
'{} [{}]'.format(q.name, q.exchange.type) for q in self.queues
|
||||
])
|
||||
return f'listening on {self.queues}'
|
||||
|
||||
def control(self, body, message):
|
||||
def control(self, body):
|
||||
logger.warn(body)
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
channel=self.connection,
|
||||
routing_key=message.properties['reply_to']
|
||||
)
|
||||
reply_queue = body['reply_to']
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
elif control == 'running':
|
||||
@@ -75,20 +69,21 @@ class AWXConsumer(ConsumerMixin):
|
||||
for worker in self.pool.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
msg.extend(worker.managed_tasks.keys())
|
||||
producer.publish(msg)
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
message.ack()
|
||||
|
||||
def process_task(self, body, message):
|
||||
def process_task(self, body):
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body, message)
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception("Exception handling control message:")
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
@@ -102,21 +97,58 @@ class AWXConsumer(ConsumerMixin):
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
signal.signal(signal.SIGTERM, self.stop)
|
||||
self.worker.on_start()
|
||||
super(AWXConsumer, self).run(*args, **kwargs)
|
||||
|
||||
# Child should implement other things here
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
self.should_stop = True
|
||||
logger.warn('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
class AWXConsumerRedis(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
if self.should_stop:
|
||||
return
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.warn(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
try:
|
||||
with pg_bus_conn() as conn:
|
||||
for queue in self.queues:
|
||||
conn.listen(queue)
|
||||
if init is False:
|
||||
self.worker.on_start()
|
||||
init = True
|
||||
for e in conn.events():
|
||||
self.process_task(json.loads(e.payload))
|
||||
if self.should_stop:
|
||||
return
|
||||
except psycopg2.InterfaceError:
|
||||
logger.warn("Stale Postgres message bus connection, reconnecting")
|
||||
continue
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def read(self, queue):
|
||||
@@ -148,7 +180,6 @@ class BaseWorker(object):
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
|
||||
Job)
|
||||
from awx.main.tasks import handle_success_and_failure_notifications
|
||||
from awx.main.models.events import emit_event_detail
|
||||
|
||||
from .base import BaseWorker
|
||||
@@ -32,6 +39,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
@@ -42,6 +50,26 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
|
||||
self.prof = False
|
||||
logger.error(f'profiling is disabled, wrote {filepath}')
|
||||
else:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
logger.error('profiling is enabled')
|
||||
|
||||
def work_loop(self, *args, **kw):
|
||||
if settings.AWX_CALLBACK_PROFILE:
|
||||
signal.signal(signal.SIGUSR1, self.toggle_profiling)
|
||||
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
@@ -111,19 +139,14 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
|
||||
if isinstance(uj, Job):
|
||||
# *actual playbooks* send their success/failure
|
||||
# notifications in response to the playbook_on_stats
|
||||
# event handling code in main.models.events
|
||||
pass
|
||||
elif hasattr(uj, 'send_notification_templates'):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
@@ -56,7 +56,8 @@ from awx.main import utils
|
||||
|
||||
__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField',
|
||||
'SmartFilterField', 'OrderedManyToManyField',
|
||||
'update_role_parentage_for_instance', 'is_implicit_parent']
|
||||
'update_role_parentage_for_instance',
|
||||
'is_implicit_parent']
|
||||
|
||||
|
||||
# Provide a (better) custom error message for enum jsonschema validation
|
||||
@@ -140,8 +141,9 @@ def resolve_role_field(obj, field):
|
||||
return []
|
||||
|
||||
if len(field_components) == 1:
|
||||
role_cls = str(utils.get_current_apps().get_model('main', 'Role'))
|
||||
if not str(type(obj)) == role_cls:
|
||||
# use extremely generous duck typing to accomidate all possible forms
|
||||
# of the model that may be used during various migrations
|
||||
if obj._meta.model_name != 'role' or obj._meta.app_label != 'main':
|
||||
raise Exception(smart_text('{} refers to a {}, not a Role'.format(field, type(obj))))
|
||||
ret.append(obj.id)
|
||||
else:
|
||||
@@ -197,18 +199,27 @@ def update_role_parentage_for_instance(instance):
|
||||
updates the parents listing for all the roles
|
||||
of a given instance if they have changed
|
||||
'''
|
||||
parents_removed = set()
|
||||
parents_added = set()
|
||||
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
|
||||
cur_role = getattr(instance, implicit_role_field.name)
|
||||
original_parents = set(json.loads(cur_role.implicit_parents))
|
||||
new_parents = implicit_role_field._resolve_parent_roles(instance)
|
||||
cur_role.parents.remove(*list(original_parents - new_parents))
|
||||
cur_role.parents.add(*list(new_parents - original_parents))
|
||||
removals = original_parents - new_parents
|
||||
if removals:
|
||||
cur_role.parents.remove(*list(removals))
|
||||
parents_removed.add(cur_role.pk)
|
||||
additions = new_parents - original_parents
|
||||
if additions:
|
||||
cur_role.parents.add(*list(additions))
|
||||
parents_added.add(cur_role.pk)
|
||||
new_parents_list = list(new_parents)
|
||||
new_parents_list.sort()
|
||||
new_parents_json = json.dumps(new_parents_list)
|
||||
if cur_role.implicit_parents != new_parents_json:
|
||||
cur_role.implicit_parents = new_parents_json
|
||||
cur_role.save()
|
||||
cur_role.save(update_fields=['implicit_parents'])
|
||||
return (parents_added, parents_removed)
|
||||
|
||||
|
||||
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
|
||||
@@ -256,20 +267,18 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
field_names = [field_names]
|
||||
|
||||
for field_name in field_names:
|
||||
# Handle the OR syntax for role parents
|
||||
if type(field_name) == tuple:
|
||||
continue
|
||||
|
||||
if type(field_name) == bytes:
|
||||
field_name = field_name.decode('utf-8')
|
||||
|
||||
if field_name.startswith('singleton:'):
|
||||
continue
|
||||
|
||||
field_name, sep, field_attr = field_name.partition('.')
|
||||
field = getattr(cls, field_name)
|
||||
# Non existent fields will occur if ever a parent model is
|
||||
# moved inside a migration, needed for job_template_organization_field
|
||||
# migration in particular
|
||||
# consistency is assured by unit test awx.main.tests.functional
|
||||
field = getattr(cls, field_name, None)
|
||||
|
||||
if type(field) is ReverseManyToOneDescriptor or \
|
||||
if field and type(field) is ReverseManyToOneDescriptor or \
|
||||
type(field) is ManyToManyDescriptor:
|
||||
|
||||
if '.' in field_attr:
|
||||
|
||||
@@ -15,7 +15,6 @@ import awx
|
||||
from awx.main.utils import (
|
||||
get_system_task_capacity
|
||||
)
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
@@ -32,12 +31,14 @@ def set_pythonpath(venv_libdir, env):
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
def __init__(self, event_handler, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
"""
|
||||
:param event_handler: a callable used to persist event data from isolated nodes
|
||||
:param canceled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
canceled
|
||||
"""
|
||||
self.event_handler = event_handler
|
||||
self.canceled_callback = canceled_callback
|
||||
self.check_callback = check_callback
|
||||
self.started_at = None
|
||||
@@ -208,7 +209,6 @@ class IsolatedManager(object):
|
||||
status = 'failed'
|
||||
rc = None
|
||||
last_check = time.time()
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
while status == 'failed':
|
||||
canceled = self.canceled_callback() if self.canceled_callback else False
|
||||
@@ -238,7 +238,7 @@ class IsolatedManager(object):
|
||||
except json.decoder.JSONDecodeError: # Just in case it's not fully here yet.
|
||||
pass
|
||||
|
||||
self.consume_events(dispatcher)
|
||||
self.consume_events()
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
@@ -266,19 +266,18 @@ class IsolatedManager(object):
|
||||
|
||||
# consume events one last time just to be sure we didn't miss anything
|
||||
# in the final sync
|
||||
self.consume_events(dispatcher)
|
||||
self.consume_events()
|
||||
|
||||
# emit an EOF event
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': len(self.handled_events)
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
self.event_handler(event_data)
|
||||
|
||||
return status, rc
|
||||
|
||||
def consume_events(self, dispatcher):
|
||||
def consume_events(self):
|
||||
# discover new events and ingest them
|
||||
events_path = self.path_to('artifacts', self.ident, 'job_events')
|
||||
|
||||
@@ -302,16 +301,10 @@ class IsolatedManager(object):
|
||||
# practice
|
||||
# in this scenario, just ignore this event and try it
|
||||
# again on the next sync
|
||||
pass
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
continue
|
||||
self.event_handler(event_data)
|
||||
self.handled_events.add(event)
|
||||
|
||||
# handle artifacts
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
|
||||
def cleanup(self):
|
||||
extravars = {
|
||||
@@ -370,39 +363,37 @@ class IsolatedManager(object):
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args,
|
||||
event_data_key, ident=None):
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args, ident=None):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
@@ -413,14 +404,12 @@ class IsolatedManager(object):
|
||||
:param playbook: the playbook to run
|
||||
:param module: the module to run
|
||||
:param module_args: the module args to use
|
||||
:param event_data_key: e.g., job_id, inventory_id, ...
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.ident = ident
|
||||
self.event_data_key = event_data_key
|
||||
self.instance = instance
|
||||
self.private_data_dir = private_data_dir
|
||||
self.runner_params = self.build_runner_params(
|
||||
@@ -434,6 +423,5 @@ class IsolatedManager(object):
|
||||
else:
|
||||
# emit an EOF event
|
||||
event_data = {'event': 'EOF', 'final_counter': 0}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
CallbackQueueDispatcher().dispatch(event_data)
|
||||
self.event_handler(event_data)
|
||||
return status, rc
|
||||
|
||||
@@ -9,6 +9,13 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
@@ -17,19 +24,15 @@ class Command(BaseCommand):
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
lines.append(relation)
|
||||
for label, interval in (
|
||||
('last minute: ', '1 minute'),
|
||||
('last 5 minutes:', '5 minutes'),
|
||||
('last hour: ', '1 hour'),
|
||||
):
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE modified > now() - '{interval}'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ {label} {events}')
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
if clear:
|
||||
for i in range(20):
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
|
||||
@@ -16,13 +16,12 @@ from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
from awx.main.management.commands.deletion import AWXCollector, pre_delete
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -60,27 +59,37 @@ class Command(BaseCommand):
|
||||
action='store_true', dest='only_workflow_jobs',
|
||||
help='Remove workflow jobs')
|
||||
|
||||
def cleanup_jobs(self):
|
||||
#jobs_qs = Job.objects.exclude(status__in=('pending', 'running'))
|
||||
#jobs_qs = jobs_qs.filter(created__lte=self.cutoff)
|
||||
skipped, deleted = 0, 0
|
||||
jobs = Job.objects.filter(created__lt=self.cutoff)
|
||||
for job in jobs.iterator():
|
||||
job_display = '"%s" (%d host summaries, %d events)' % \
|
||||
(str(job),
|
||||
job.job_host_summaries.count(), job.job_events.count())
|
||||
if job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, job.status, job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, job_display)
|
||||
if not self.dry_run:
|
||||
job.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += Job.objects.filter(created__gte=self.cutoff).count()
|
||||
def cleanup_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
|
||||
batch_size = 1000000
|
||||
|
||||
while True:
|
||||
# get queryset for available jobs to remove
|
||||
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
||||
# get pk list for the first N (batch_size) objects
|
||||
pk_list = qs[0:batch_size].values_list('pk')
|
||||
# You cannot delete queries with sql LIMIT set, so we must
|
||||
# create a new query from this pk_list
|
||||
qs_batch = Job.objects.filter(pk__in=pk_list)
|
||||
just_deleted = 0
|
||||
if not self.dry_run:
|
||||
del_query = pre_delete(qs_batch)
|
||||
collector = AWXCollector(del_query.db)
|
||||
collector.collect(del_query)
|
||||
_, models_deleted = collector.delete()
|
||||
if models_deleted:
|
||||
just_deleted = models_deleted['main.Job']
|
||||
deleted += just_deleted
|
||||
else:
|
||||
just_deleted = 0 # break from loop, this is dry run
|
||||
deleted = qs.count()
|
||||
|
||||
if just_deleted == 0:
|
||||
break
|
||||
|
||||
skipped += (Job.objects.filter(created__gte=self.cutoff) | Job.objects.filter(status__in=['pending', 'waiting', 'running'])).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
|
||||
177
awx/main/management/commands/deletion.py
Normal file
177
awx/main/management/commands/deletion.py
Normal file
@@ -0,0 +1,177 @@
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.db.models.deletion import (
|
||||
DO_NOTHING, Collector, get_candidate_relations_to_delete,
|
||||
)
|
||||
from collections import Counter, OrderedDict
|
||||
from django.db import transaction
|
||||
from django.db.models import sql
|
||||
|
||||
|
||||
def bulk_related_objects(field, objs, using):
|
||||
# This overrides the method in django.contrib.contenttypes.fields.py
|
||||
"""
|
||||
Return all objects related to ``objs`` via this ``GenericRelation``.
|
||||
"""
|
||||
return field.remote_field.model._base_manager.db_manager(using).filter(**{
|
||||
"%s__pk" % field.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
|
||||
field.model, for_concrete_model=field.for_concrete_model).pk,
|
||||
"%s__in" % field.object_id_field_name: list(objs.values_list('pk', flat=True))
|
||||
})
|
||||
|
||||
|
||||
def pre_delete(qs):
|
||||
# taken from .delete method in django.db.models.query.py
|
||||
assert qs.query.can_filter(), \
|
||||
"Cannot use 'limit' or 'offset' with delete."
|
||||
|
||||
if qs._fields is not None:
|
||||
raise TypeError("Cannot call delete() after .values() or .values_list()")
|
||||
|
||||
del_query = qs._chain()
|
||||
|
||||
# The delete is actually 2 queries - one to find related objects,
|
||||
# and one to delete. Make sure that the discovery of related
|
||||
# objects is performed on the same database as the deletion.
|
||||
del_query._for_write = True
|
||||
|
||||
# Disable non-supported fields.
|
||||
del_query.query.select_for_update = False
|
||||
del_query.query.select_related = False
|
||||
del_query.query.clear_ordering(force_empty=True)
|
||||
return del_query
|
||||
|
||||
|
||||
class AWXCollector(Collector):
|
||||
|
||||
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
|
||||
"""
|
||||
Add 'objs' to the collection of objects to be deleted. If the call is
|
||||
the result of a cascade, 'source' should be the model that caused it,
|
||||
and 'nullable' should be set to True if the relation can be null.
|
||||
|
||||
Return a list of all objects that were not already collected.
|
||||
"""
|
||||
if not objs.exists():
|
||||
return objs
|
||||
model = objs.model
|
||||
self.data.setdefault(model, [])
|
||||
self.data[model].append(objs)
|
||||
# Nullable relationships can be ignored -- they are nulled out before
|
||||
# deleting, and therefore do not affect the order in which objects have
|
||||
# to be deleted.
|
||||
if source is not None and not nullable:
|
||||
if reverse_dependency:
|
||||
source, model = model, source
|
||||
self.dependencies.setdefault(
|
||||
source._meta.concrete_model, set()).add(model._meta.concrete_model)
|
||||
return objs
|
||||
|
||||
def add_field_update(self, field, value, objs):
|
||||
"""
|
||||
Schedule a field update. 'objs' must be a homogeneous iterable
|
||||
collection of model instances (e.g. a QuerySet).
|
||||
"""
|
||||
if not objs.exists():
|
||||
return
|
||||
model = objs.model
|
||||
self.field_updates.setdefault(model, {})
|
||||
self.field_updates[model].setdefault((field, value), [])
|
||||
self.field_updates[model][(field, value)].append(objs)
|
||||
|
||||
def collect(self, objs, source=None, nullable=False, collect_related=True,
|
||||
source_attr=None, reverse_dependency=False, keep_parents=False):
|
||||
"""
|
||||
Add 'objs' to the collection of objects to be deleted as well as all
|
||||
parent instances. 'objs' must be a homogeneous iterable collection of
|
||||
model instances (e.g. a QuerySet). If 'collect_related' is True,
|
||||
related objects will be handled by their respective on_delete handler.
|
||||
|
||||
If the call is the result of a cascade, 'source' should be the model
|
||||
that caused it and 'nullable' should be set to True, if the relation
|
||||
can be null.
|
||||
|
||||
If 'reverse_dependency' is True, 'source' will be deleted before the
|
||||
current model, rather than after. (Needed for cascading to parent
|
||||
models, the one case in which the cascade follows the forwards
|
||||
direction of an FK rather than the reverse direction.)
|
||||
|
||||
If 'keep_parents' is True, data of parent model's will be not deleted.
|
||||
"""
|
||||
|
||||
if hasattr(objs, 'polymorphic_disabled'):
|
||||
objs.polymorphic_disabled = True
|
||||
|
||||
if self.can_fast_delete(objs):
|
||||
self.fast_deletes.append(objs)
|
||||
return
|
||||
new_objs = self.add(objs, source, nullable,
|
||||
reverse_dependency=reverse_dependency)
|
||||
if not new_objs.exists():
|
||||
return
|
||||
|
||||
model = new_objs.model
|
||||
|
||||
if not keep_parents:
|
||||
# Recursively collect concrete model's parent models, but not their
|
||||
# related objects. These will be found by meta.get_fields()
|
||||
concrete_model = model._meta.concrete_model
|
||||
for ptr in concrete_model._meta.parents.keys():
|
||||
if ptr:
|
||||
parent_objs = ptr.objects.filter(pk__in = new_objs.values_list('pk', flat=True))
|
||||
self.collect(parent_objs, source=model,
|
||||
collect_related=False,
|
||||
reverse_dependency=True)
|
||||
if collect_related:
|
||||
parents = model._meta.parents
|
||||
for related in get_candidate_relations_to_delete(model._meta):
|
||||
# Preserve parent reverse relationships if keep_parents=True.
|
||||
if keep_parents and related.model in parents:
|
||||
continue
|
||||
field = related.field
|
||||
if field.remote_field.on_delete == DO_NOTHING:
|
||||
continue
|
||||
related_qs = self.related_objects(related, new_objs)
|
||||
if self.can_fast_delete(related_qs, from_field=field):
|
||||
self.fast_deletes.append(related_qs)
|
||||
elif related_qs:
|
||||
field.remote_field.on_delete(self, field, related_qs, self.using)
|
||||
for field in model._meta.private_fields:
|
||||
if hasattr(field, 'bulk_related_objects'):
|
||||
# It's something like generic foreign key.
|
||||
sub_objs = bulk_related_objects(field, new_objs, self.using)
|
||||
self.collect(sub_objs, source=model, nullable=True)
|
||||
|
||||
def delete(self):
|
||||
self.sort()
|
||||
|
||||
# collect pk_list before deletion (once things start to delete
|
||||
# queries might not be able to retreive pk list)
|
||||
del_dict = OrderedDict()
|
||||
for model, instances in self.data.items():
|
||||
del_dict.setdefault(model, [])
|
||||
for inst in instances:
|
||||
del_dict[model] += list(inst.values_list('pk', flat=True))
|
||||
|
||||
deleted_counter = Counter()
|
||||
|
||||
with transaction.atomic(using=self.using, savepoint=False):
|
||||
|
||||
# update fields
|
||||
for model, instances_for_fieldvalues in self.field_updates.items():
|
||||
for (field, value), instances in instances_for_fieldvalues.items():
|
||||
for inst in instances:
|
||||
query = sql.UpdateQuery(model)
|
||||
query.update_batch(inst.values_list('pk', flat=True),
|
||||
{field.name: value}, self.using)
|
||||
# fast deletes
|
||||
for qs in self.fast_deletes:
|
||||
count = qs._raw_delete(using=self.using)
|
||||
deleted_counter[qs.model._meta.label] += count
|
||||
|
||||
# delete instances
|
||||
for model, pk_list in del_dict.items():
|
||||
query = sql.DeleteQuery(model)
|
||||
count = query.delete_batch(pk_list, self.using)
|
||||
deleted_counter[model._meta.label] += count
|
||||
|
||||
return sum(deleted_counter.values()), dict(deleted_counter)
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
import subprocess
|
||||
|
||||
from django.db import transaction
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
@@ -33,18 +31,9 @@ class Command(BaseCommand):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = Instance.objects.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
isolated = instance.first().is_isolated()
|
||||
instance.delete()
|
||||
print("Instance Removed")
|
||||
if isolated:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
else:
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('No instance found matching name {}'.format(hostname))
|
||||
|
||||
@@ -921,11 +921,14 @@ class Command(BaseCommand):
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
if license_info.get('trial', False) is True:
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
@@ -938,15 +941,11 @@ class Command(BaseCommand):
|
||||
'new_count': new_count,
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
if (
|
||||
license_info.get('trial', False) is True or
|
||||
license_info['instance_count'] == 10 # basic 10 license
|
||||
):
|
||||
raise CommandError('License count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
@@ -1007,12 +1006,6 @@ class Command(BaseCommand):
|
||||
except re.error:
|
||||
raise CommandError('invalid regular expression for --host-filter')
|
||||
|
||||
'''
|
||||
TODO: Remove this deprecation when we remove support for rax.py
|
||||
'''
|
||||
if self.source == "rax.py":
|
||||
logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
|
||||
begin = time.time()
|
||||
self.load_inventory_from_database()
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.models import Instance, InstanceGroup
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
|
||||
|
||||
class InstanceNotFound(Exception):
|
||||
@@ -31,7 +32,6 @@ class Command(BaseCommand):
|
||||
|
||||
|
||||
def get_create_update_instance_group(self, queuename, instance_percent, instance_min):
|
||||
ig = InstanceGroup.objects.filter(name=queuename)
|
||||
created = False
|
||||
changed = False
|
||||
|
||||
@@ -98,26 +98,27 @@ class Command(BaseCommand):
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
with advisory_lock('instance_group_registration_{}'.format(queuename)):
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
with advisory_lock('cluster_policy_lock'):
|
||||
with transaction.atomic():
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
|
||||
if any([changed1, changed2, changed3]):
|
||||
print('(changed: True)')
|
||||
|
||||
@@ -3,10 +3,8 @@
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from kombu import Exchange, Queue
|
||||
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.worker import AWXConsumer, CallbackBrokerWorker
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -18,23 +16,15 @@ class Command(BaseCommand):
|
||||
help = 'Launch the job callback receiver'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
consumer = None
|
||||
try:
|
||||
consumer = AWXConsumer(
|
||||
'callback_receiver',
|
||||
conn,
|
||||
CallbackBrokerWorker(),
|
||||
[
|
||||
Queue(
|
||||
settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE
|
||||
)
|
||||
]
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
consumer = None
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
'callback_receiver',
|
||||
CallbackBrokerWorker(),
|
||||
queues=[getattr(settings, 'CALLBACK_QUEUE', '')],
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection, connections
|
||||
from kombu import Exchange, Queue
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.utils.handlers import AWXProxyHandler
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -36,71 +33,6 @@ class Command(BaseCommand):
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
sched_file = '/var/lib/awx/beat.db'
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
|
||||
# celery in py3 seems to have a bug where the celerybeat schedule
|
||||
# shelve can become corrupted; we've _only_ seen this in Ubuntu and py36
|
||||
# it can be avoided by detecting and removing the corrupted file
|
||||
# at some point, we'll just stop using celerybeat, because it's clearly
|
||||
# buggy, too -_-
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4777
|
||||
sched = AWXScheduler(schedule_filename=sched_file, app=app)
|
||||
try:
|
||||
sched.setup_schedule()
|
||||
except Exception:
|
||||
logger.exception('{} is corrupted, removing.'.format(sched_file))
|
||||
sched._remove_db()
|
||||
finally:
|
||||
try:
|
||||
sched.close()
|
||||
except Exception:
|
||||
logger.exception('{} failed to sync/close'.format(sched_file))
|
||||
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule=sched_file, scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
@@ -116,9 +48,10 @@ class Command(BaseCommand):
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
@@ -128,30 +61,16 @@ class Command(BaseCommand):
|
||||
# in cpython itself:
|
||||
# https://bugs.python.org/issue37429
|
||||
AWXProxyHandler.disable()
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
try:
|
||||
bcast = 'tower_broadcast_all'
|
||||
queues = [
|
||||
Queue(q, Exchange(q), routing_key=q)
|
||||
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
|
||||
]
|
||||
queues.append(
|
||||
Queue(
|
||||
construct_bcast_queue_name(bcast),
|
||||
exchange=Exchange(bcast, type='fanout'),
|
||||
routing_key=bcast,
|
||||
reply=True
|
||||
)
|
||||
)
|
||||
consumer = AWXConsumer(
|
||||
'dispatcher',
|
||||
conn,
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
consumer = AWXConsumerPG(
|
||||
'dispatcher',
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
25
awx/main/management/commands/run_wsbroadcast.py
Normal file
25
awx/main/management/commands/run_wsbroadcast.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the websocket broadcaster'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
@@ -78,8 +79,7 @@ class HostManager(models.Manager):
|
||||
self.core_filters = {}
|
||||
|
||||
qs = qs & q
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
return qs.order_by('name', 'pk').distinct('name')
|
||||
return qs
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, uuid=None, hostname=None):
|
||||
def register(self, uuid=None, hostname=None, ip_address=None):
|
||||
if not uuid:
|
||||
uuid = settings.SYSTEM_UUID
|
||||
if not hostname:
|
||||
@@ -123,13 +123,23 @@ class InstanceManager(models.Manager):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = self.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
return (False, instance[0])
|
||||
instance = self.create(uuid=uuid, hostname=hostname, capacity=0)
|
||||
instance = instance.get()
|
||||
if instance.ip_address != ip_address:
|
||||
instance.ip_address = ip_address
|
||||
instance.save(update_fields=['ip_address'])
|
||||
return (True, instance)
|
||||
else:
|
||||
return (False, instance)
|
||||
instance = self.create(uuid=uuid,
|
||||
hostname=hostname,
|
||||
ip_address=ip_address,
|
||||
capacity=0)
|
||||
return (True, instance)
|
||||
|
||||
def get_or_register(self):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
return self.register()
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
return self.register(ip_address=pod_ip)
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
|
||||
@@ -192,21 +192,41 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
)
|
||||
super().__init__(get_response)
|
||||
|
||||
def _named_url_to_pk(self, node, named_url):
|
||||
kwargs = {}
|
||||
if not node.populate_named_url_query_kwargs(kwargs, named_url):
|
||||
return named_url
|
||||
return str(get_object_or_404(node.model, **kwargs).pk)
|
||||
@staticmethod
|
||||
def _hijack_for_old_jt_name(node, kwargs, named_url):
|
||||
try:
|
||||
int(named_url)
|
||||
return False
|
||||
except ValueError:
|
||||
pass
|
||||
JobTemplate = node.model
|
||||
name = urllib.parse.unquote(named_url)
|
||||
return JobTemplate.objects.filter(name=name).order_by('organization__created').first()
|
||||
|
||||
def _convert_named_url(self, url_path):
|
||||
@classmethod
|
||||
def _named_url_to_pk(cls, node, resource, named_url):
|
||||
kwargs = {}
|
||||
if node.populate_named_url_query_kwargs(kwargs, named_url):
|
||||
return str(get_object_or_404(node.model, **kwargs).pk)
|
||||
if resource == 'job_templates' and '++' not in named_url:
|
||||
# special case for deprecated job template case
|
||||
# will not raise a 404 on its own
|
||||
jt = cls._hijack_for_old_jt_name(node, kwargs, named_url)
|
||||
if jt:
|
||||
return str(jt.pk)
|
||||
return named_url
|
||||
|
||||
@classmethod
|
||||
def _convert_named_url(cls, url_path):
|
||||
url_units = url_path.split('/')
|
||||
# If the identifier is an empty string, it is always invalid.
|
||||
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2'] or not url_units[4]:
|
||||
return url_path
|
||||
resource = url_units[3]
|
||||
if resource in settings.NAMED_URL_MAPPINGS:
|
||||
url_units[4] = self._named_url_to_pk(settings.NAMED_URL_GRAPH[settings.NAMED_URL_MAPPINGS[resource]],
|
||||
url_units[4])
|
||||
url_units[4] = cls._named_url_to_pk(
|
||||
settings.NAMED_URL_GRAPH[settings.NAMED_URL_MAPPINGS[resource]],
|
||||
resource, url_units[4])
|
||||
return '/'.join(url_units)
|
||||
|
||||
def process_request(self, request):
|
||||
|
||||
@@ -3,15 +3,17 @@ from uuid import uuid4
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
def _generate_new_uuid_for_iso_nodes(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
for instance in Instance.objects.all():
|
||||
if instance.is_isolated():
|
||||
# The below code is a copy paste of instance.is_isolated()
|
||||
# We can't call is_isolated because we are using the "old" version
|
||||
# of the Instance definition.
|
||||
if instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
instance.uuid = str(uuid4())
|
||||
instance.save()
|
||||
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
|
||||
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-25 20:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0101_v370_generate_new_uuids_for_iso_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='canceled_on',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False, help_text='The date and time when the cancel request was sent.', null=True),
|
||||
),
|
||||
]
|
||||
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-21 17:35
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0102_v370_unifiedjob_canceled'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='hosts_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_groups',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_hosts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobhostsummary',
|
||||
name='failed',
|
||||
field=models.BooleanField(db_index=True, default=False, editable=False),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 20:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def cleanup_scan_jts(apps, schema_editor):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
JobTemplate.objects.filter(job_type='scan').update(job_type='run')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0103_v370_remove_computed_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(cleanup_scan_jts),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(choices=[('run', 'Run'), ('check', 'Check')], default='run', max_length=64),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 18:01
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0104_v370_cleanup_old_scan_jts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='parent',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='hosts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-27 12:39
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0105_v370_remove_jobevent_parent_and_hosts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.4 on 2020-01-08 22:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0106_v370_remove_inventory_groups_with_active_failures'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-06 16:43
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0107_v370_workflow_convergence_api_toggle'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='dependencies_processed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,81 @@
|
||||
# Generated by Django 2.2.4 on 2019-08-07 19:56
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
from awx.main.migrations._rbac import (
|
||||
rebuild_role_parentage, rebuild_role_hierarchy,
|
||||
migrate_ujt_organization, migrate_ujt_organization_backward,
|
||||
restore_inventory_admins, restore_inventory_admins_backward
|
||||
)
|
||||
|
||||
|
||||
def rebuild_jt_parents(apps, schema_editor):
|
||||
rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',))
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0108_v370_unifiedjob_dependencies_processed'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# backwards parents and ancestors caching
|
||||
migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents),
|
||||
# add new organization field for JT and all other unified jobs
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='tmp_organization',
|
||||
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='tmp_organization',
|
||||
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'),
|
||||
),
|
||||
# while new and old fields exist, copy the organization fields
|
||||
migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward),
|
||||
# with data saved, remove old fields
|
||||
migrations.RemoveField(
|
||||
model_name='project',
|
||||
name='organization',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobtemplate',
|
||||
name='organization',
|
||||
),
|
||||
# now, without safely rename the new field without conflicts from old field
|
||||
migrations.RenameField(
|
||||
model_name='unifiedjobtemplate',
|
||||
old_name='tmp_organization',
|
||||
new_name='organization',
|
||||
),
|
||||
migrations.RenameField(
|
||||
model_name='unifiedjob',
|
||||
old_name='tmp_organization',
|
||||
new_name='organization',
|
||||
),
|
||||
# parentage of job template roles has genuinely changed at this point
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
# Re-compute the role parents and ancestors caching
|
||||
migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop),
|
||||
# for all permissions that will be removed, make them explicit
|
||||
migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward),
|
||||
]
|
||||
18
awx/main/migrations/0110_v370_instance_ip_address.py
Normal file
18
awx/main/migrations/0110_v370_instance_ip_address.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-12 17:55
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0109_v370_job_template_organization_field'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='ip_address',
|
||||
field=models.CharField(blank=True, default=None, max_length=50, null=True, unique=True),
|
||||
),
|
||||
]
|
||||
16
awx/main/migrations/0111_v370_delete_channelgroup.py
Normal file
16
awx/main/migrations/0111_v370_delete_channelgroup.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-17 14:50
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0110_v370_instance_ip_address'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='ChannelGroup',
|
||||
),
|
||||
]
|
||||
61
awx/main/migrations/0112_v370_workflow_node_identifier.py
Normal file
61
awx/main/migrations/0112_v370_workflow_node_identifier.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# Generated by Django 2.2.8 on 2020-03-14 02:29
|
||||
|
||||
from django.db import migrations, models
|
||||
import uuid
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def create_uuid(apps, schema_editor):
|
||||
WorkflowJobTemplateNode = apps.get_model('main', 'WorkflowJobTemplateNode')
|
||||
ct = 0
|
||||
for node in WorkflowJobTemplateNode.objects.iterator():
|
||||
node.identifier = uuid.uuid4()
|
||||
node.save(update_fields=['identifier'])
|
||||
ct += 1
|
||||
if ct:
|
||||
logger.info(f'Automatically created uuid4 identifier for {ct} workflow nodes')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0111_v370_delete_channelgroup'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='identifier',
|
||||
field=models.CharField(blank=True, help_text='An identifier coresponding to the workflow job template node that this node was created from.', max_length=512),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='identifier',
|
||||
field=models.CharField(blank=True, null=True, help_text='An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.', max_length=512),
|
||||
),
|
||||
migrations.RunPython(create_uuid, migrations.RunPython.noop), # this fixes the uuid4 issue
|
||||
migrations.AlterField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='identifier',
|
||||
field=models.CharField(default=uuid.uuid4, help_text='An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.', max_length=512),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='workflowjobtemplatenode',
|
||||
unique_together={('identifier', 'workflow_job_template')},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobnode',
|
||||
index=models.Index(fields=['identifier', 'workflow_job'], name='main_workfl_identif_87b752_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobnode',
|
||||
index=models.Index(fields=['identifier'], name='main_workfl_identif_efdfe8_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobtemplatenode',
|
||||
index=models.Index(fields=['identifier'], name='main_workfl_identif_0cc025_idx'),
|
||||
),
|
||||
]
|
||||
118
awx/main/migrations/0113_v370_event_bigint.py
Normal file
118
awx/main/migrations/0113_v370_event_bigint.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-21 16:31
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
#
|
||||
# the goal of this function is to end with event tables (e.g., main_jobevent)
|
||||
# that have a bigint primary key (because the old usage of an integer
|
||||
# numeric isn't enough, as its range is about 2.1B, see:
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
|
||||
# unfortunately, we can't do this with a simple ALTER TABLE, because
|
||||
# for tables with hundreds of millions or billions of rows, the ALTER TABLE
|
||||
# can take *hours* on modest hardware.
|
||||
#
|
||||
# the approach in this migration means that post-migration, event data will
|
||||
# *not* immediately show up, but will be repopulated over time progressively
|
||||
# the trade-off here is not having to wait hours for the full data migration
|
||||
# before you can start and run AWX again (including new playbook runs)
|
||||
for tblname in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent',
|
||||
'main_systemjobevent'
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
# rename the current event table
|
||||
cursor.execute(
|
||||
f'ALTER TABLE {tblname} RENAME TO _old_{tblname};'
|
||||
)
|
||||
# create a *new* table with the same schema
|
||||
cursor.execute(
|
||||
f'CREATE TABLE {tblname} (LIKE _old_{tblname} INCLUDING ALL);'
|
||||
)
|
||||
# alter the *new* table so that the primary key is a big int
|
||||
cursor.execute(
|
||||
f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;'
|
||||
)
|
||||
|
||||
# recreate counter for the new table's primary key to
|
||||
# start where the *old* table left off (we have to do this because the
|
||||
# counter changed from an int to a bigint)
|
||||
cursor.execute(f'DROP SEQUENCE IF EXISTS "{tblname}_id_seq" CASCADE;')
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(
|
||||
f'ALTER TABLE "{tblname}" ALTER COLUMN "id" '
|
||||
f"SET DEFAULT nextval('{tblname}_id_seq');"
|
||||
)
|
||||
cursor.execute(
|
||||
f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);"
|
||||
)
|
||||
|
||||
# replace the BTREE index on main_jobevent.job_id with
|
||||
# a BRIN index to drastically improve per-UJ lookup performance
|
||||
# see: https://info.crunchydata.com/blog/postgresql-brin-indexes-big-data-performance-with-minimal-storage
|
||||
if tblname == 'main_jobevent':
|
||||
cursor.execute("SELECT indexname FROM pg_indexes WHERE tablename='main_jobevent' AND indexdef LIKE '%USING btree (job_id)';")
|
||||
old_index = cursor.fetchone()[0]
|
||||
cursor.execute(f'DROP INDEX {old_index}')
|
||||
cursor.execute('CREATE INDEX main_jobevent_job_id_brin_idx ON main_jobevent USING brin (job_id);')
|
||||
|
||||
# remove all of the indexes and constraints from the old table
|
||||
# (they just slow down the data migration)
|
||||
cursor.execute(f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename='_old_{tblname}' AND indexname != '{tblname}_pkey';")
|
||||
indexes = cursor.fetchall()
|
||||
|
||||
cursor.execute(f"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = '_old_{tblname}'::regclass AND conname != '{tblname}_pkey';")
|
||||
constraints = cursor.fetchall()
|
||||
|
||||
for indexname, indexdef in indexes:
|
||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
||||
for conname, contype, condef in constraints:
|
||||
cursor.execute(f'ALTER TABLE _old_{tblname} DROP CONSTRAINT IF EXISTS {conname}')
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
# going to accomplish the migration with some custom raw SQL
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0112_v370_workflow_node_identifier'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='jobevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='systemjobevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
]
|
||||
@@ -1,6 +1,9 @@
|
||||
import logging
|
||||
from time import time
|
||||
|
||||
from django.db.models import Subquery, OuterRef, F
|
||||
|
||||
from awx.main.fields import update_role_parentage_for_instance
|
||||
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
|
||||
|
||||
logger = logging.getLogger('rbac_migrations')
|
||||
@@ -10,11 +13,11 @@ def create_roles(apps, schema_editor):
|
||||
'''
|
||||
Implicit role creation happens in our post_save hook for all of our
|
||||
resources. Here we iterate through all of our resource types and call
|
||||
.save() to ensure all that happens for every object in the system before we
|
||||
get busy with the actual migration work.
|
||||
.save() to ensure all that happens for every object in the system.
|
||||
|
||||
This gets run after migrate_users, which does role creation for users a
|
||||
little differently.
|
||||
This can be used whenever new roles are introduced in a migration to
|
||||
create those roles for pre-existing objects that did not previously
|
||||
have them created via signals.
|
||||
'''
|
||||
|
||||
models = [
|
||||
@@ -35,7 +38,189 @@ def create_roles(apps, schema_editor):
|
||||
obj.save()
|
||||
|
||||
|
||||
def delete_all_user_roles(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||
Role = apps.get_model('main', "Role")
|
||||
User = apps.get_model('auth', "User")
|
||||
user_content_type = ContentType.objects.get_for_model(User)
|
||||
for role in Role.objects.filter(content_type=user_content_type).iterator():
|
||||
role.delete()
|
||||
|
||||
|
||||
UNIFIED_ORG_LOOKUPS = {
|
||||
# Job Templates had an implicit organization via their project
|
||||
'jobtemplate': 'project',
|
||||
# Inventory Sources had an implicit organization via their inventory
|
||||
'inventorysource': 'inventory',
|
||||
# Projects had an explicit organization in their subclass table
|
||||
'project': None,
|
||||
# Workflow JTs also had an explicit organization in their subclass table
|
||||
'workflowjobtemplate': None,
|
||||
# Jobs inherited project from job templates as a convenience field
|
||||
'job': 'project',
|
||||
# Inventory Sources had an convenience field of inventory
|
||||
'inventoryupdate': 'inventory',
|
||||
# Project Updates did not have a direct organization field, obtained it from project
|
||||
'projectupdate': 'project',
|
||||
# Workflow Jobs are handled same as project updates
|
||||
# Sliced jobs are a special case, but old data is not given special treatment for simplicity
|
||||
'workflowjob': 'workflow_job_template',
|
||||
# AdHocCommands do not have a template, but still migrate them
|
||||
'adhoccommand': 'inventory'
|
||||
}
|
||||
|
||||
|
||||
def implicit_org_subquery(UnifiedClass, cls, backward=False):
|
||||
"""Returns a subquery that returns the so-called organization for objects
|
||||
in the class in question, before migration to the explicit unified org field.
|
||||
In some cases, this can still be applied post-migration.
|
||||
"""
|
||||
if cls._meta.model_name not in UNIFIED_ORG_LOOKUPS:
|
||||
return None
|
||||
cls_name = cls._meta.model_name
|
||||
source_field = UNIFIED_ORG_LOOKUPS[cls_name]
|
||||
|
||||
unified_field = UnifiedClass._meta.get_field(cls_name)
|
||||
unified_ptr = unified_field.remote_field.name
|
||||
if backward:
|
||||
qs = UnifiedClass.objects.filter(**{cls_name: OuterRef('id')}).order_by().values_list('tmp_organization')[:1]
|
||||
elif source_field is None:
|
||||
qs = cls.objects.filter(**{unified_ptr: OuterRef('id')}).order_by().values_list('organization')[:1]
|
||||
else:
|
||||
intermediary_field = cls._meta.get_field(source_field)
|
||||
intermediary_model = intermediary_field.related_model
|
||||
intermediary_reverse_rel = intermediary_field.remote_field.name
|
||||
qs = intermediary_model.objects.filter(**{
|
||||
# this filter leverages the fact that the Unified models have same pk as subclasses.
|
||||
# For instance... filters projects used in job template, where that job template
|
||||
# has same id same as UJT from the outer reference (which it does)
|
||||
intermediary_reverse_rel: OuterRef('id')}
|
||||
).order_by().values_list('organization')[:1]
|
||||
return Subquery(qs)
|
||||
|
||||
|
||||
def _migrate_unified_organization(apps, unified_cls_name, backward=False):
|
||||
"""Given a unified base model (either UJT or UJ)
|
||||
and a dict org_field_mapping which gives related model to get org from
|
||||
saves organization for those objects to the temporary migration
|
||||
variable tmp_organization on the unified model
|
||||
(optimized method)
|
||||
"""
|
||||
start = time()
|
||||
UnifiedClass = apps.get_model('main', unified_cls_name)
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
|
||||
for cls in UnifiedClass.__subclasses__():
|
||||
cls_name = cls._meta.model_name
|
||||
if backward and UNIFIED_ORG_LOOKUPS.get(cls_name, 'not-found') is not None:
|
||||
logger.debug('Not reverse migrating {}, existing data should remain valid'.format(cls_name))
|
||||
continue
|
||||
logger.debug('{}Migrating {} to new organization field'.format('Reverse ' if backward else '', cls_name))
|
||||
|
||||
sub_qs = implicit_org_subquery(UnifiedClass, cls, backward=backward)
|
||||
if sub_qs is None:
|
||||
logger.debug('Class {} has no organization migration'.format(cls_name))
|
||||
continue
|
||||
|
||||
this_ct = ContentType.objects.get_for_model(cls)
|
||||
if backward:
|
||||
r = cls.objects.order_by().update(organization=sub_qs)
|
||||
else:
|
||||
r = UnifiedClass.objects.order_by().filter(polymorphic_ctype=this_ct).update(tmp_organization=sub_qs)
|
||||
if r:
|
||||
logger.info('Organization migration on {} affected {} rows.'.format(cls_name, r))
|
||||
logger.info('Unified organization migration completed in {:.4f} seconds'.format(time() - start))
|
||||
|
||||
|
||||
def migrate_ujt_organization(apps, schema_editor):
|
||||
'''Move organization field to UJT and UJ models'''
|
||||
_migrate_unified_organization(apps, 'UnifiedJobTemplate')
|
||||
_migrate_unified_organization(apps, 'UnifiedJob')
|
||||
|
||||
|
||||
def migrate_ujt_organization_backward(apps, schema_editor):
|
||||
'''Move organization field from UJT and UJ models back to their original places'''
|
||||
_migrate_unified_organization(apps, 'UnifiedJobTemplate', backward=True)
|
||||
_migrate_unified_organization(apps, 'UnifiedJob', backward=True)
|
||||
|
||||
|
||||
def _restore_inventory_admins(apps, schema_editor, backward=False):
|
||||
"""With the JT.organization changes, admins of organizations connected to
|
||||
job templates via inventory will have their permissions demoted.
|
||||
This maintains current permissions over the migration by granting the
|
||||
permissions they used to have explicitly on the JT itself.
|
||||
"""
|
||||
start = time()
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
User = apps.get_model('auth', 'User')
|
||||
changed_ct = 0
|
||||
jt_qs = JobTemplate.objects.filter(inventory__isnull=False)
|
||||
jt_qs = jt_qs.exclude(inventory__organization=F('project__organization'))
|
||||
jt_qs = jt_qs.only('id', 'admin_role_id', 'execute_role_id', 'inventory_id')
|
||||
for jt in jt_qs.iterator():
|
||||
org = jt.inventory.organization
|
||||
for jt_role, org_roles in (
|
||||
('admin_role', ('admin_role', 'job_template_admin_role',)),
|
||||
('execute_role', ('execute_role',))
|
||||
):
|
||||
role_id = getattr(jt, '{}_id'.format(jt_role))
|
||||
|
||||
user_qs = User.objects
|
||||
if not backward:
|
||||
# In this specific case, the name for the org role and JT roles were the same
|
||||
org_role_ids = [getattr(org, '{}_id'.format(role_name)) for role_name in org_roles]
|
||||
user_qs = user_qs.filter(roles__in=org_role_ids)
|
||||
# bizarre migration behavior - ancestors / descendents of
|
||||
# migration version of Role model is reversed, using current model briefly
|
||||
ancestor_ids = list(
|
||||
Role.objects.filter(descendents=role_id).values_list('id', flat=True)
|
||||
)
|
||||
# same as Role.__contains__, filter for "user in jt.admin_role"
|
||||
user_qs = user_qs.exclude(roles__in=ancestor_ids)
|
||||
else:
|
||||
# use the database to filter intersection of users without access
|
||||
# to the JT role and either organization role
|
||||
user_qs = user_qs.filter(roles__in=[org.admin_role_id, org.execute_role_id])
|
||||
# in reverse, intersection of users who have both
|
||||
user_qs = user_qs.filter(roles=role_id)
|
||||
|
||||
user_ids = list(user_qs.values_list('id', flat=True))
|
||||
if not user_ids:
|
||||
continue
|
||||
|
||||
role = getattr(jt, jt_role)
|
||||
logger.debug('{} {} on jt {} for users {} via inventory.organization {}'.format(
|
||||
'Removing' if backward else 'Setting',
|
||||
jt_role, jt.pk, user_ids, org.pk
|
||||
))
|
||||
if not backward:
|
||||
# in reverse, explit role becomes redundant
|
||||
role.members.add(*user_ids)
|
||||
else:
|
||||
role.members.remove(*user_ids)
|
||||
changed_ct += len(user_ids)
|
||||
|
||||
if changed_ct:
|
||||
logger.info('{} explicit JT permission for {} users in {:.4f} seconds'.format(
|
||||
'Removed' if backward else 'Added',
|
||||
changed_ct, time() - start
|
||||
))
|
||||
|
||||
|
||||
def restore_inventory_admins(apps, schema_editor):
|
||||
_restore_inventory_admins(apps, schema_editor)
|
||||
|
||||
|
||||
def restore_inventory_admins_backward(apps, schema_editor):
|
||||
_restore_inventory_admins(apps, schema_editor, backward=True)
|
||||
|
||||
|
||||
def rebuild_role_hierarchy(apps, schema_editor):
|
||||
'''
|
||||
This should be called in any migration when ownerships are changed.
|
||||
Ex. I remove a user from the admin_role of a credential.
|
||||
Ancestors are cached from parents for performance, this re-computes ancestors.
|
||||
'''
|
||||
logger.info('Computing role roots..')
|
||||
start = time()
|
||||
roots = Role.objects \
|
||||
@@ -46,14 +231,74 @@ def rebuild_role_hierarchy(apps, schema_editor):
|
||||
start = time()
|
||||
Role.rebuild_role_ancestor_list(roots, [])
|
||||
stop = time()
|
||||
logger.info('Rebuild completed in %f seconds' % (stop - start))
|
||||
logger.info('Rebuild ancestors completed in %f seconds' % (stop - start))
|
||||
logger.info('Done.')
|
||||
|
||||
|
||||
def delete_all_user_roles(apps, schema_editor):
|
||||
def rebuild_role_parentage(apps, schema_editor, models=None):
|
||||
'''
|
||||
This should be called in any migration when any parent_role entry
|
||||
is modified so that the cached parent fields will be updated. Ex:
|
||||
foo_role = ImplicitRoleField(
|
||||
parent_role=['bar_role'] # change to parent_role=['admin_role']
|
||||
)
|
||||
|
||||
This is like rebuild_role_hierarchy, but that method updates ancestors,
|
||||
whereas this method updates parents.
|
||||
'''
|
||||
start = time()
|
||||
seen_models = set()
|
||||
model_ct = 0
|
||||
noop_ct = 0
|
||||
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||
Role = apps.get_model('main', "Role")
|
||||
User = apps.get_model('auth', "User")
|
||||
user_content_type = ContentType.objects.get_for_model(User)
|
||||
for role in Role.objects.filter(content_type=user_content_type).iterator():
|
||||
role.delete()
|
||||
additions = set()
|
||||
removals = set()
|
||||
|
||||
role_qs = Role.objects
|
||||
if models:
|
||||
# update_role_parentage_for_instance is expensive
|
||||
# if the models have been downselected, ignore those which are not in the list
|
||||
ct_ids = list(ContentType.objects.filter(
|
||||
model__in=[name.lower() for name in models]
|
||||
).values_list('id', flat=True))
|
||||
role_qs = role_qs.filter(content_type__in=ct_ids)
|
||||
|
||||
for role in role_qs.iterator():
|
||||
if not role.object_id:
|
||||
continue
|
||||
model_tuple = (role.content_type_id, role.object_id)
|
||||
if model_tuple in seen_models:
|
||||
continue
|
||||
seen_models.add(model_tuple)
|
||||
|
||||
# The GenericForeignKey does not work right in migrations
|
||||
# with the usage as role.content_object
|
||||
# so we do the lookup ourselves with current migration models
|
||||
ct = role.content_type
|
||||
app = ct.app_label
|
||||
ct_model = apps.get_model(app, ct.model)
|
||||
content_object = ct_model.objects.get(pk=role.object_id)
|
||||
|
||||
parents_added, parents_removed = update_role_parentage_for_instance(content_object)
|
||||
additions.update(parents_added)
|
||||
removals.update(parents_removed)
|
||||
if parents_added:
|
||||
model_ct += 1
|
||||
logger.debug('Added to parents of roles {} of {}'.format(parents_added, content_object))
|
||||
if parents_removed:
|
||||
model_ct += 1
|
||||
logger.debug('Removed from parents of roles {} of {}'.format(parents_removed, content_object))
|
||||
else:
|
||||
noop_ct += 1
|
||||
|
||||
logger.debug('No changes to role parents for {} resources'.format(noop_ct))
|
||||
logger.debug('Added parents to {} roles'.format(len(additions)))
|
||||
logger.debug('Removed parents from {} roles'.format(len(removals)))
|
||||
if model_ct:
|
||||
logger.info('Updated implicit parents of {} resources'.format(model_ct))
|
||||
|
||||
logger.info('Rebuild parentage completed in %f seconds' % (time() - start))
|
||||
|
||||
# this is ran because the ordinary signals for
|
||||
# Role.parents.add and Role.parents.remove not called in migration
|
||||
Role.rebuild_role_ancestor_list(list(additions), list(removals))
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Django
|
||||
from django.conf import settings # noqa
|
||||
from django.db import connection, ProgrammingError
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
@@ -58,7 +59,6 @@ from awx.main.models.workflow import ( # noqa
|
||||
WorkflowJob, WorkflowJobNode, WorkflowJobOptions, WorkflowJobTemplate,
|
||||
WorkflowJobTemplateNode, WorkflowApproval, WorkflowApprovalTemplate,
|
||||
)
|
||||
from awx.main.models.channels import ChannelGroup # noqa
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.oauth import ( # noqa
|
||||
OAuth2AccessToken, OAuth2Application
|
||||
@@ -80,6 +80,27 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def enforce_bigint_pk_migration():
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
# look at all the event tables and verify that they have been fully migrated
|
||||
# from the *old* int primary key table to the replacement bigint table
|
||||
# if not, attempt to migrate them in the background
|
||||
for tblname in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent',
|
||||
'main_systemjobevent'
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname}')
|
||||
if cursor.fetchone():
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
pass
|
||||
|
||||
|
||||
def cleanup_created_modified_by(sender, **kwargs):
|
||||
# work around a bug in django-polymorphic that doesn't properly
|
||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
from django.db import models
|
||||
|
||||
|
||||
class ChannelGroup(models.Model):
|
||||
group = models.CharField(max_length=200, unique=True)
|
||||
channels = models.TextField()
|
||||
@@ -1136,7 +1136,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.')
|
||||
},{
|
||||
'id': 'bearer_token',
|
||||
'label': ugettext_noop('API authentication bearer token.'),
|
||||
'label': ugettext_noop('API authentication bearer token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},{
|
||||
|
||||
@@ -4,7 +4,7 @@ import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.db import models, DatabaseError
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
@@ -356,15 +356,22 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([self.job.id])
|
||||
connection.on_commit(_send_notifications)
|
||||
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
if isinstance(self, JobEvent):
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
@@ -431,6 +438,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
('job', 'parent_uuid'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
@@ -450,19 +458,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
@@ -532,6 +527,7 @@ class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
('project_update', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
project_update = models.ForeignKey(
|
||||
'ProjectUpdate',
|
||||
related_name='project_update_events',
|
||||
@@ -617,6 +613,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
kwargs.pop('workflow_job_id', None)
|
||||
event = cls(**kwargs)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
@@ -682,6 +679,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
|
||||
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
@@ -744,6 +742,7 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
||||
('inventory_update', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
inventory_update = models.ForeignKey(
|
||||
'InventoryUpdate',
|
||||
related_name='inventory_update_events',
|
||||
@@ -777,6 +776,7 @@ class SystemJobEvent(BaseCommandEvent):
|
||||
('system_job', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
system_job = models.ForeignKey(
|
||||
'SystemJob',
|
||||
related_name='system_job_events',
|
||||
|
||||
@@ -53,6 +53,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
|
||||
uuid = models.CharField(max_length=40)
|
||||
hostname = models.CharField(max_length=250, unique=True)
|
||||
ip_address = models.CharField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
max_length=50,
|
||||
unique=True,
|
||||
)
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
last_isolated_check = models.DateTimeField(
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -123,12 +122,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of groups in this inventory.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of groups in this inventory with active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -339,139 +332,17 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
return data
|
||||
|
||||
def update_host_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all hosts in this inventory.
|
||||
'''
|
||||
hosts_to_update = {}
|
||||
hosts_qs = self.hosts
|
||||
# Define queryset of all hosts with active failures.
|
||||
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_active_failures flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = True
|
||||
# Find all hosts that need the has_active_failures flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = False
|
||||
# Define queryset of all hosts with cloud inventory sources.
|
||||
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_inventory_sources flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = True
|
||||
# Find all hosts that need the has_inventory_sources flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = False
|
||||
# Now apply updates to hosts where needed (in batches).
|
||||
all_update_pks = list(hosts_to_update.keys())
|
||||
|
||||
def _chunk(items, chunk_size):
|
||||
for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size):
|
||||
yield (g[1] for g in group)
|
||||
|
||||
for update_pks in _chunk(all_update_pks, 500):
|
||||
for host in hosts_qs.filter(pk__in=update_pks):
|
||||
host_updates = hosts_to_update[host.pk]
|
||||
for field, value in host_updates.items():
|
||||
setattr(host, field, value)
|
||||
host.save(update_fields=host_updates.keys())
|
||||
|
||||
def update_group_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all active groups in this inventory.
|
||||
'''
|
||||
group_children_map = self.get_group_children_map()
|
||||
group_hosts_map = self.get_group_hosts_map()
|
||||
active_host_pks = set(self.hosts.values_list('pk', flat=True))
|
||||
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
|
||||
# active_group_pks = set(self.groups.values_list('pk', flat=True))
|
||||
failed_group_pks = set() # Update below as we check each group.
|
||||
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
|
||||
groups_to_update = {}
|
||||
|
||||
# Build list of group pks to check, starting with the groups at the
|
||||
# deepest level within the tree.
|
||||
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
|
||||
group_depths = {} # pk: max_depth
|
||||
|
||||
def update_group_depths(group_pk, current_depth=0):
|
||||
max_depth = group_depths.get(group_pk, -1)
|
||||
# Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000).
|
||||
if current_depth > 100:
|
||||
return
|
||||
if current_depth > max_depth:
|
||||
group_depths[group_pk] = current_depth
|
||||
for child_pk in group_children_map.get(group_pk, set()):
|
||||
update_group_depths(child_pk, current_depth + 1)
|
||||
for group_pk in root_group_pks:
|
||||
update_group_depths(group_pk)
|
||||
group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)]
|
||||
|
||||
for group_pk in group_pks_to_check:
|
||||
# Get all children and host pks for this group.
|
||||
parent_pks_to_check = set([group_pk])
|
||||
parent_pks_checked = set()
|
||||
child_pks = set()
|
||||
host_pks = set()
|
||||
while parent_pks_to_check:
|
||||
for parent_pk in list(parent_pks_to_check):
|
||||
c_ids = group_children_map.get(parent_pk, set())
|
||||
child_pks.update(c_ids)
|
||||
parent_pks_to_check.remove(parent_pk)
|
||||
parent_pks_checked.add(parent_pk)
|
||||
parent_pks_to_check.update(c_ids - parent_pks_checked)
|
||||
h_ids = group_hosts_map.get(parent_pk, set())
|
||||
host_pks.update(h_ids)
|
||||
# Define updates needed for this group.
|
||||
group_updates = groups_to_update.setdefault(group_pk, {})
|
||||
group_updates.update({
|
||||
'total_hosts': len(active_host_pks & host_pks),
|
||||
'has_active_failures': bool(failed_host_pks & host_pks),
|
||||
'hosts_with_active_failures': len(failed_host_pks & host_pks),
|
||||
'total_groups': len(child_pks),
|
||||
'groups_with_active_failures': len(failed_group_pks & child_pks),
|
||||
'has_inventory_sources': bool(group_pk in groups_with_cloud_pks),
|
||||
})
|
||||
if group_updates['has_active_failures']:
|
||||
failed_group_pks.add(group_pk)
|
||||
|
||||
# Now apply updates to each group as needed (in batches).
|
||||
all_update_pks = list(groups_to_update.keys())
|
||||
for offset in range(0, len(all_update_pks), 500):
|
||||
update_pks = all_update_pks[offset:(offset + 500)]
|
||||
for group in self.groups.filter(pk__in=update_pks):
|
||||
group_updates = groups_to_update[group.pk]
|
||||
for field, value in list(group_updates.items()):
|
||||
if getattr(group, field) != value:
|
||||
setattr(group, field, value)
|
||||
else:
|
||||
group_updates.pop(field)
|
||||
if group_updates:
|
||||
group.save(update_fields=group_updates.keys())
|
||||
|
||||
def update_computed_fields(self, update_groups=True, update_hosts=True):
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk))
|
||||
start_time = time.time()
|
||||
if update_hosts:
|
||||
self.update_host_computed_fields()
|
||||
if update_groups:
|
||||
self.update_group_computed_fields()
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
@@ -482,7 +353,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'total_hosts': active_hosts.count(),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
'total_inventory_sources': active_inventory_sources.count(),
|
||||
'inventory_sources_with_failures': failed_inventory_sources.count(),
|
||||
@@ -545,7 +415,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.update_computed_fields()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -556,9 +426,9 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___inventory=self) |
|
||||
Q(InventoryUpdate___inventory_source__inventory=self) |
|
||||
Q(AdHocCommand___inventory=self)
|
||||
Q(job__inventory=self) |
|
||||
Q(inventoryupdate__inventory=self) |
|
||||
Q(adhoccommand__inventory=self)
|
||||
)
|
||||
|
||||
|
||||
@@ -631,18 +501,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether the last job failed for this host.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this host was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='hosts',
|
||||
@@ -673,34 +531,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def update_computed_fields(self, update_inventory=True, update_groups=True):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
has_active_failures = bool(self.last_job_host_summary and
|
||||
self.last_job_host_summary.failed)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'has_active_failures': has_active_failures,
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
# Groups and inventory may also need to be updated when host fields
|
||||
# change.
|
||||
# NOTE: I think this is no longer needed
|
||||
# if update_groups:
|
||||
# for group in self.all_groups:
|
||||
# group.update_computed_fields()
|
||||
# if update_inventory:
|
||||
# self.inventory.update_computed_fields(update_groups=False,
|
||||
# update_hosts=False)
|
||||
# Rebuild summary fields cache
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
@property
|
||||
@@ -815,42 +645,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
blank=True,
|
||||
help_text=_('Hosts associated directly with this group.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of hosts directly or indirectly in this group.'),
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group has any hosts with active failures.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of hosts in this group with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of child groups contained within this group.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of child groups within this group that have active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='groups',
|
||||
@@ -925,32 +719,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
mark_actual()
|
||||
activity_stream_delete(None, self)
|
||||
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
active_hosts = self.all_hosts
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.all_children
|
||||
# FIXME: May not be accurate unless we always update groups depth-first.
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'total_hosts': active_hosts.count(),
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
def get_all_parents(self, except_pks=None):
|
||||
@@ -1040,8 +808,8 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___inventory=self.inventory) |
|
||||
Q(InventoryUpdate___inventory_source__groups=self)
|
||||
Q(job__inventory=self.inventory) |
|
||||
Q(inventoryupdate__inventory_source__groups=self)
|
||||
)
|
||||
|
||||
|
||||
@@ -1509,10 +1277,14 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in InventorySourceOptions._meta.fields) | set(
|
||||
['name', 'description', 'credentials', 'inventory']
|
||||
['name', 'description', 'organization', 'credentials', 'inventory']
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# if this is a new object, inherit organization from its inventory
|
||||
if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@@ -1556,7 +1328,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.inventory.update_computed_fields()
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.source:
|
||||
@@ -2616,6 +2388,9 @@ class satellite6(PluginFileInjector):
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
@@ -2625,6 +2400,12 @@ class satellite6(PluginFileInjector):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
@@ -2636,9 +2417,11 @@ class satellite6(PluginFileInjector):
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
|
||||
@@ -13,6 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -28,7 +29,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search, accepts_json,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
@@ -198,12 +199,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
'labels', 'instance_groups', 'credentials', 'survey_spec'
|
||||
]
|
||||
FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential']
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
default='run',
|
||||
)
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -256,13 +262,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
|
||||
parent_role=['organization.job_template_admin_role']
|
||||
)
|
||||
execute_role = ImplicitRoleField(
|
||||
parent_role=['admin_role', 'project.organization.execute_role', 'inventory.organization.execute_role'],
|
||||
parent_role=['admin_role', 'organization.execute_role'],
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'],
|
||||
parent_role=[
|
||||
'organization.auditor_role',
|
||||
'inventory.organization.auditor_role', # partial support for old inheritance via inventory
|
||||
'execute_role', 'admin_role'
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -273,7 +283,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'survey_passwords', 'labels', 'credentials',
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials',
|
||||
'job_slice_number', 'job_slice_count']
|
||||
)
|
||||
|
||||
@@ -293,6 +303,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def resources_needed_to_start(self):
|
||||
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
|
||||
|
||||
def clean_forks(self):
|
||||
if settings.MAX_FORKS > 0 and self.forks > settings.MAX_FORKS:
|
||||
raise ValidationError(_(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.'))
|
||||
return self.forks
|
||||
|
||||
def create_job(self, **kwargs):
|
||||
'''
|
||||
Create a new job based on this template.
|
||||
@@ -308,6 +323,41 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
else:
|
||||
return self.job_slice_count
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# if project is deleted for some reason, then keep the old organization
|
||||
# to retain ownership for organization admins
|
||||
if self.project and self.project.organization_id != self.organization_id:
|
||||
self.organization_id = self.project.organization_id
|
||||
if 'organization' not in update_fields and 'organization_id' not in update_fields:
|
||||
update_fields.append('organization_id')
|
||||
return super(JobTemplate, self).save(*args, **kwargs)
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append(
|
||||
'%s with this (%s) combination already exists.' % (
|
||||
JobTemplate.__name__,
|
||||
', '.join(set(ut) - {'polymorphic_ctype'})
|
||||
)
|
||||
)
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_ct = self.get_effective_slice_ct(kwargs)
|
||||
@@ -468,13 +518,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
success_notification_templates = list(base_notification_templates.filter(
|
||||
unifiedjobtemplate_notification_templates_for_success__in=[self, self.project]))
|
||||
# Get Organization NotificationTemplates
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
if self.organization is not None:
|
||||
error_notification_templates = set(error_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_errors=self.project.organization)))
|
||||
organization_notification_templates_for_errors=self.organization)))
|
||||
started_notification_templates = set(started_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_started=self.project.organization)))
|
||||
organization_notification_templates_for_started=self.organization)))
|
||||
success_notification_templates = set(success_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_success=self.project.organization)))
|
||||
organization_notification_templates_for_success=self.organization)))
|
||||
return dict(error=list(error_notification_templates),
|
||||
started=list(started_notification_templates),
|
||||
success=list(success_notification_templates))
|
||||
@@ -577,7 +627,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
for virtualenv in (
|
||||
self.job_template.custom_virtualenv if self.job_template else None,
|
||||
self.project.custom_virtualenv,
|
||||
self.project.organization.custom_virtualenv if self.project.organization else None
|
||||
self.organization.custom_virtualenv if self.organization else None
|
||||
):
|
||||
if virtualenv:
|
||||
return virtualenv
|
||||
@@ -730,8 +780,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
organization_groups = [x for x in self.project.organization.instance_groups.all()]
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
organization_groups = []
|
||||
if self.inventory is not None:
|
||||
@@ -818,8 +868,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
ansible_local_system_id = ansible_facts.get('ansible_local', {}).get('insights', {}).get('system_id', None)
|
||||
ansible_facts_system_id = ansible_facts.get('insights', {}).get('system_id', None)
|
||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
||||
ansible_facts = ansible_facts.get('insights', {})
|
||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
||||
if ansible_local_system_id:
|
||||
print("Setting local {}".format(ansible_local_system_id))
|
||||
logger.debug("Insights system_id {} found for host <{}, {}> in"
|
||||
@@ -1060,7 +1112,7 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False, db_index=True)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
@@ -1095,7 +1147,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
@@ -1132,7 +1183,7 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'job_type', 'extra_vars']
|
||||
return ['name', 'description', 'organization', 'job_type', 'extra_vars']
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:system_job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -269,22 +269,20 @@ class JobNotificationMixin(object):
|
||||
'timeout', 'use_fact_cache', 'launch_type', 'status', 'failed', 'started', 'finished',
|
||||
'elapsed', 'job_explanation', 'execution_node', 'controller_node', 'allow_simultaneous',
|
||||
'scm_revision', 'diff_mode', 'job_slice_number', 'job_slice_count', 'custom_virtualenv',
|
||||
'approval_status', 'approval_node_name', 'workflow_url',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failures', 'dark']},
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
'approval_status', 'approval_node_name', 'workflow_url', 'scm_branch',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark'
|
||||
'processed', 'rescued', 'ignored']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
{'project_update': ['id', 'name', 'description', 'status', 'failed']},
|
||||
{'job_template': ['id', 'name', 'description']},
|
||||
{'unified_job_template': ['id', 'name', 'description', 'unified_job_type']},
|
||||
{'instance_group': ['name', 'id']},
|
||||
{'created_by': ['id', 'username', 'first_name', 'last_name']},
|
||||
{'labels': ['count', 'results']},
|
||||
{'source_workflow_job': ['description', 'elapsed', 'failed', 'id', 'name', 'status']}]}]
|
||||
{'labels': ['count', 'results']}]}]
|
||||
|
||||
@classmethod
|
||||
def context_stub(cls):
|
||||
@@ -303,7 +301,7 @@ class JobNotificationMixin(object):
|
||||
'finished': False,
|
||||
'force_handlers': False,
|
||||
'forks': 0,
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0},
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0, 'failed': False, 'processed': 0, 'rescued': 0},
|
||||
'id': 42,
|
||||
'job_explanation': 'Sample job explanation',
|
||||
'job_slice_count': 1,
|
||||
@@ -314,8 +312,8 @@ class JobNotificationMixin(object):
|
||||
'limit': 'bar_limit',
|
||||
'modified': datetime.datetime(2018, 12, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc),
|
||||
'name': 'Stub JobTemplate',
|
||||
'playbook_counts': {'play_count': 5, 'task_count': 10},
|
||||
'playbook': 'ping.yml',
|
||||
'scm_branch': '',
|
||||
'scm_revision': '',
|
||||
'skip_tags': '',
|
||||
'start_at_task': '',
|
||||
@@ -327,7 +325,6 @@ class JobNotificationMixin(object):
|
||||
'username': 'admin'},
|
||||
'instance_group': {'id': 1, 'name': 'tower'},
|
||||
'inventory': {'description': 'Sample inventory description',
|
||||
'groups_with_active_failures': 0,
|
||||
'has_active_failures': False,
|
||||
'has_inventory_sources': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
@@ -348,18 +345,10 @@ class JobNotificationMixin(object):
|
||||
'name': 'Stub project',
|
||||
'scm_type': 'git',
|
||||
'status': 'successful'},
|
||||
'project_update': {'id': 5, 'name': 'Stub Project Update', 'description': 'Project Update',
|
||||
'status': 'running', 'failed': False},
|
||||
'unified_job_template': {'description': 'Sample unified job template description',
|
||||
'id': 39,
|
||||
'name': 'Stub Job Template',
|
||||
'unified_job_type': 'job'},
|
||||
'source_workflow_job': {'description': 'Sample workflow job description',
|
||||
'elapsed': 0.000,
|
||||
'failed': False,
|
||||
'id': 88,
|
||||
'name': 'Stub WorkflowJobTemplate',
|
||||
'status': 'running'}},
|
||||
'unified_job_type': 'job'}},
|
||||
'timeout': 0,
|
||||
'type': 'job',
|
||||
'url': '/api/v2/jobs/13/',
|
||||
@@ -393,10 +382,20 @@ class JobNotificationMixin(object):
|
||||
The context will contain whitelisted content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_WHITELIST), the job's friendly name,
|
||||
and a url to the job run."""
|
||||
context = {'job': {},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)}
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
if hasattr(self, 'job_host_summaries'):
|
||||
summary = self.job_host_summaries.first()
|
||||
if summary:
|
||||
from awx.api.serializers import JobHostSummarySerializer
|
||||
summary_data = JobHostSummarySerializer(summary).to_representation(summary)
|
||||
job_context['host_status_counts'] = summary_data
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)
|
||||
}
|
||||
|
||||
def build_context(node, fields, whitelisted_fields):
|
||||
for safe_field in whitelisted_fields:
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now as tz_now
|
||||
@@ -106,12 +105,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
project_ids = self.projects.all().values_list('id')
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___project__in=project_ids) |
|
||||
Q(ProjectUpdate___project__in=project_ids) |
|
||||
Q(InventoryUpdate___inventory_source__inventory__organization=self)
|
||||
)
|
||||
return UnifiedJob.objects.non_polymorphic().filter(organization=self)
|
||||
|
||||
|
||||
class Team(CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
@@ -254,13 +254,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
app_label = 'main'
|
||||
ordering = ('id',)
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
related_name='projects',
|
||||
)
|
||||
scm_update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Update the project when a job is launched that uses the project.'),
|
||||
@@ -329,9 +322,16 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in ProjectOptions._meta.fields) | set(
|
||||
['name', 'description']
|
||||
['name', 'description', 'organization']
|
||||
)
|
||||
|
||||
def clean_organization(self):
|
||||
if self.pk:
|
||||
old_org_id = getattr(self, '_prior_values_store', {}).get('organization_id', None)
|
||||
if self.organization_id != old_org_id and self.jobtemplates.exists():
|
||||
raise ValidationError({'organization': _('Organization cannot be changed when in use by job templates.')})
|
||||
return self.organization
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
new_instance = not bool(self.pk)
|
||||
pre_save_vals = getattr(self, '_prior_values_store', {})
|
||||
@@ -450,8 +450,8 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
models.Q(Job___project=self) |
|
||||
models.Q(ProjectUpdate___project=self)
|
||||
models.Q(job__project=self) |
|
||||
models.Q(projectupdate__project=self)
|
||||
)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
@@ -584,8 +584,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
organization_groups = [x for x in self.project.organization.instance_groups.all()]
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
organization_groups = []
|
||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Python
|
||||
from io import StringIO
|
||||
import datetime
|
||||
import codecs
|
||||
import json
|
||||
import logging
|
||||
@@ -35,6 +36,7 @@ from awx.main.models.base import (
|
||||
NotificationFieldsModel,
|
||||
prevent_search
|
||||
)
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
@@ -101,7 +103,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
ordering = ('name',)
|
||||
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
||||
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
#unique_together = [('polymorphic_ctype', 'name')]
|
||||
#unique_together = [('polymorphic_ctype', 'name', 'organization')]
|
||||
|
||||
old_pk = models.PositiveIntegerField(
|
||||
null=True,
|
||||
@@ -156,6 +158,14 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
default='ok',
|
||||
editable=False,
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The organization used to determine access to this template.'),
|
||||
)
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
@@ -623,6 +633,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The date and time the job was queued for starting."),
|
||||
)
|
||||
dependencies_processed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
|
||||
)
|
||||
finished = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
@@ -630,6 +645,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
help_text=_("The date and time the job finished execution."),
|
||||
db_index=True,
|
||||
)
|
||||
canceled_on = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("The date and time when the cancel request was sent."),
|
||||
db_index=True,
|
||||
)
|
||||
elapsed = models.DecimalField(
|
||||
max_digits=12,
|
||||
decimal_places=3,
|
||||
@@ -687,6 +709,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
help_text=_('The Rampart/Instance group the job was run under'),
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The organization used to determine access to this unified job.'),
|
||||
)
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
@@ -833,7 +863,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
if 'canceled_on' not in update_fields:
|
||||
update_fields.append('canceled_on')
|
||||
# Okay; we're done. Perform the actual save.
|
||||
result = super(UnifiedJob, self).save(*args, **kwargs)
|
||||
|
||||
@@ -997,6 +1032,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
dir=settings.JOBOUTPUT_ROOT,
|
||||
encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
@@ -1199,12 +1236,17 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
status_data['instance_group_name'] = self.instance_group.name
|
||||
else:
|
||||
status_data['instance_group_name'] = None
|
||||
elif status in ['successful', 'failed', 'canceled'] and self.finished:
|
||||
status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
status_data.update(self.websocket_emit_data())
|
||||
status_data['group_name'] = 'jobs'
|
||||
if getattr(self, 'unified_job_template_id', None):
|
||||
status_data['unified_job_template_id'] = self.unified_job_template_id
|
||||
emit_channel_notification('jobs-status_changed', status_data)
|
||||
|
||||
if self.spawned_by_workflow:
|
||||
status_data['group_name'] = "workflow_events"
|
||||
status_data['workflow_job_template_id'] = self.unified_job_template.id
|
||||
emit_channel_notification('workflow_events-' + str(self.workflow_job_id), status_data)
|
||||
except IOError: # includes socket errors
|
||||
logger.exception('%s failed to emit channel msg about status change', self.log_format)
|
||||
@@ -1319,7 +1361,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
timeout = 5
|
||||
try:
|
||||
running = self.celery_task_id in ControlDispatcher(
|
||||
'dispatcher', self.execution_node
|
||||
'dispatcher', self.controller_node or self.execution_node
|
||||
).running(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('could not reach dispatcher on {} within {}s'.format(
|
||||
@@ -1425,7 +1467,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
|
||||
def is_isolated(self):
|
||||
return bool(self.controller_node)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
from uuid import uuid4
|
||||
from copy import copy
|
||||
from urllib.parse import urljoin
|
||||
|
||||
@@ -79,6 +80,11 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
symmetrical=False,
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("If enabled then the node will only run if all of the parent nodes "
|
||||
"have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
related_name='%(class)ss',
|
||||
@@ -102,7 +108,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
'''
|
||||
return ['workflow_job', 'unified_job_template',
|
||||
'extra_data', 'survey_passwords',
|
||||
'inventory', 'credentials', 'char_prompts']
|
||||
'inventory', 'credentials', 'char_prompts', 'all_parents_must_converge']
|
||||
|
||||
def create_workflow_job_node(self, **kwargs):
|
||||
'''
|
||||
@@ -116,6 +122,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
create_kwargs[field_name] = kwargs[field_name]
|
||||
elif hasattr(self, field_name):
|
||||
create_kwargs[field_name] = getattr(self, field_name)
|
||||
create_kwargs['identifier'] = self.identifier
|
||||
new_node = WorkflowJobNode.objects.create(**create_kwargs)
|
||||
if self.pk:
|
||||
allowed_creds = self.credentials.all()
|
||||
@@ -130,7 +137,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts'
|
||||
'char_prompts', 'all_parents_must_converge', 'identifier'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
@@ -139,6 +146,21 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
related_name='workflow_job_template_nodes',
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
identifier = models.CharField(
|
||||
max_length=512,
|
||||
default=uuid4,
|
||||
blank=False,
|
||||
help_text=_(
|
||||
'An identifier for this node that is unique within its workflow. '
|
||||
'It is copied to workflow job nodes corresponding to this node.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
unique_together = (("identifier", "workflow_job_template"),)
|
||||
indexes = [
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_template_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -208,6 +230,18 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
"semantics will mark this True if the node is in a path that will "
|
||||
"decidedly not be ran. A value of False means the node may not run."),
|
||||
)
|
||||
identifier = models.CharField(
|
||||
max_length=512,
|
||||
blank=True, # blank denotes pre-migration job nodes
|
||||
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
indexes = [
|
||||
models.Index(fields=["identifier", "workflow_job"]),
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -330,7 +364,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'survey_passwords', 'labels', 'limit', 'scm_branch']
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch']
|
||||
)
|
||||
r.remove('char_prompts') # needed due to copying launch config to launch config
|
||||
return r
|
||||
@@ -371,19 +405,12 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'labels', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'
|
||||
'labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='workflows',
|
||||
)
|
||||
ask_inventory_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
@@ -744,6 +771,8 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
can_start = super(WorkflowApproval, self).signal_start(**kwargs)
|
||||
self.started = self.created
|
||||
self.save(update_fields=['started'])
|
||||
self.send_approval_notification('running')
|
||||
return can_start
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
import dateutil.parser as dp
|
||||
@@ -23,6 +24,33 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
recipient_parameter = "grafana_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {
|
||||
"started": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"success": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"error": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"workflow_approval": {
|
||||
"running": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG, "body": None
|
||||
},
|
||||
"approved": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG, "body": None
|
||||
},
|
||||
"timed_out": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None
|
||||
},
|
||||
"denied": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG, "body": None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
@@ -34,6 +62,13 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
self.isRegion = isRegion
|
||||
|
||||
def format_body(self, body):
|
||||
# expect body to be a string representing a dict
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
body = {}
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
@@ -41,14 +76,16 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
for m in messages:
|
||||
grafana_data = {}
|
||||
grafana_headers = {}
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if 'started' in m.body:
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = grafana_data['timeEnd'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
if m.body.get('finished'):
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
grafana_data['isRegion'] = self.isRegion
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
grafana_data['panelId'] = self.panelId
|
||||
|
||||
@@ -4,15 +4,11 @@
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import redis
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
# Kombu
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from kombu import Exchange, Producer
|
||||
from kombu.serialization import registry
|
||||
|
||||
__all__ = ['CallbackQueueDispatcher']
|
||||
|
||||
@@ -28,47 +24,12 @@ class AnsibleJSONEncoder(json.JSONEncoder):
|
||||
return super(AnsibleJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
registry.register(
|
||||
'json-ansible',
|
||||
lambda obj: json.dumps(obj, cls=AnsibleJSONEncoder),
|
||||
lambda obj: json.loads(obj),
|
||||
content_type='application/json',
|
||||
content_encoding='utf-8'
|
||||
)
|
||||
|
||||
|
||||
class CallbackQueueDispatcher(object):
|
||||
|
||||
def __init__(self):
|
||||
self.callback_connection = getattr(settings, 'BROKER_URL', None)
|
||||
self.connection_queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.connection = None
|
||||
self.exchange = None
|
||||
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
|
||||
self.connection = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
def dispatch(self, obj):
|
||||
if not self.callback_connection or not self.connection_queue:
|
||||
return
|
||||
active_pid = os.getpid()
|
||||
for retry_count in range(4):
|
||||
try:
|
||||
if not hasattr(self, 'connection_pid'):
|
||||
self.connection_pid = active_pid
|
||||
if self.connection_pid != active_pid:
|
||||
self.connection = None
|
||||
if self.connection is None:
|
||||
self.connection = Connection(self.callback_connection)
|
||||
self.exchange = Exchange(self.connection_queue, type='direct')
|
||||
|
||||
producer = Producer(self.connection)
|
||||
producer.publish(obj,
|
||||
serializer='json-ansible',
|
||||
compression='bzip2',
|
||||
exchange=self.exchange,
|
||||
declare=[self.exchange],
|
||||
delivery_mode="persistent" if settings.PERSISTENT_CALLBACK_MESSAGES else "transient",
|
||||
routing_key=self.connection_queue)
|
||||
return
|
||||
except Exception as e:
|
||||
self.logger.info('Publish Job Event Exception: %r, retry=%d', e,
|
||||
retry_count, exc_info=True)
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
|
||||
@@ -8,7 +8,7 @@ REPLACE_STR = '$encrypted$'
|
||||
|
||||
class UriCleaner(object):
|
||||
REPLACE_STR = REPLACE_STR
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w+:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w{1,20}:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
|
||||
@staticmethod
|
||||
def remove_sensitive(cleartext):
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
from channels.routing import route
|
||||
from django.conf.urls import url
|
||||
from channels.auth import AuthMiddlewareStack
|
||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||
from . import consumers
|
||||
|
||||
|
||||
channel_routing = [
|
||||
route("websocket.connect", "awx.main.consumers.ws_connect", path=r'^/websocket/$'),
|
||||
route("websocket.disconnect", "awx.main.consumers.ws_disconnect", path=r'^/websocket/$'),
|
||||
route("websocket.receive", "awx.main.consumers.ws_receive", path=r'^/websocket/$'),
|
||||
websocket_urlpatterns = [
|
||||
url(r'websocket/$', consumers.EventConsumer),
|
||||
url(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
]
|
||||
|
||||
application = ProtocolTypeRouter({
|
||||
'websocket': AuthMiddlewareStack(
|
||||
URLRouter(websocket_urlpatterns)
|
||||
),
|
||||
})
|
||||
|
||||
@@ -89,8 +89,8 @@ class SimpleDAG(object):
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for label, edges in self.node_from_edges_by_label.items():
|
||||
for from_node, to_nodes in edges.items():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
@@ -140,36 +140,36 @@ class SimpleDAG(object):
|
||||
def find_ord(self, obj):
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
def _get_children_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
def get_children(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependencies_by_label(this_ord, l))
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
def get_parents(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependents_by_label(this_ord, l))
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
@@ -188,7 +188,7 @@ class SimpleDAG(object):
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children = [node['node_object'] for node in self.get_children(node_obj)]
|
||||
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
|
||||
|
||||
if children_to_add:
|
||||
@@ -212,7 +212,7 @@ class SimpleDAG(object):
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
for child in self.get_children(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
@@ -55,7 +55,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
@@ -69,33 +69,55 @@ class WorkflowDAG(SimpleDAG):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _all_parents_met_convergence_criteria(self, node):
|
||||
# This function takes any node and checks that all it's parents have met their criteria to run the child.
|
||||
# This returns a boolean and is really only useful if the node is an ALL convergence node and is
|
||||
# intended to be used in conjuction with the node property `all_parents_must_converge`
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
#node has a status
|
||||
if p.job and p.job.status in ["successful", "failed"]:
|
||||
if p.job and p.job.status == "successful":
|
||||
status = "success_nodes"
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)] and
|
||||
p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
# This catches ALL convergence nodes
|
||||
elif obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
if self._all_parents_met_convergence_criteria(n):
|
||||
nodes_found.append(n)
|
||||
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
@@ -135,8 +157,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if (len(self.get_children(obj, 'failure_nodes')) +
|
||||
len(self.get_children(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
@@ -145,8 +167,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
failed_path_nodes_id_status.append((str(obj.id), obj.job.status))
|
||||
|
||||
if res is True:
|
||||
s = _("No error handle path for workflow job node(s) [{node_status}] workflow job "
|
||||
"node(s) missing unified job template and error handle path [{no_ufjt}].")
|
||||
s = _("No error handling path for workflow job node(s) [{node_status}]. Workflow job "
|
||||
"node(s) missing unified job template and error handling path [{no_ufjt}].")
|
||||
parms = {
|
||||
'node_status': '',
|
||||
'no_ufjt': '',
|
||||
@@ -190,35 +212,48 @@ class WorkflowDAG(SimpleDAG):
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'success_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
elif not p.do_not_run and p.unified_job_template is None:
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
'''
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
if not obj.do_not_run and not obj.job and node not in root_nodes:
|
||||
if obj.all_parents_must_converge:
|
||||
if any(p.do_not_run for p in parent_nodes) or not self._all_parents_met_convergence_criteria(node):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
else:
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -23,6 +23,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
@@ -74,21 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
@@ -200,9 +186,6 @@ class TaskManager():
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
@@ -364,10 +347,6 @@ class TaskManager():
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
@@ -393,8 +372,6 @@ class TaskManager():
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
@@ -426,18 +403,21 @@ class TaskManager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
@@ -452,56 +432,20 @@ class TaskManager():
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
if not rampart_group.is_containerized:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = [t for t in dependency_tasks if t != task]
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.filter(pk__in = [task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
@@ -574,13 +518,6 @@ class TaskManager():
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
@@ -598,6 +535,9 @@ class TaskManager():
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
|
||||
@@ -5,11 +5,12 @@ import logging
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def run_task_manager():
|
||||
logger.debug("Running Tower task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -6,10 +6,10 @@ import contextlib
|
||||
import logging
|
||||
import threading
|
||||
import json
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
pre_save,
|
||||
@@ -71,41 +71,6 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
logger.debug("In update inventory computed fields")
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
instance = kwargs['instance']
|
||||
if sender == Group.hosts.through:
|
||||
sender_name = 'group.hosts'
|
||||
elif sender == Group.parents.through:
|
||||
sender_name = 'group.parents'
|
||||
elif sender == Host.inventory_sources.through:
|
||||
sender_name = 'host.inventory_sources'
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
sender_action = 'saved'
|
||||
elif kwargs['signal'] == post_delete:
|
||||
sender_action = 'deleted'
|
||||
elif kwargs['signal'] == m2m_changed and kwargs['action'] in ('post_add', 'post_remove', 'post_clear'):
|
||||
sender_action = 'changed'
|
||||
else:
|
||||
return
|
||||
logger.debug('%s %s, updating inventory computed fields: %r %r',
|
||||
sender_name, sender_action, sender, kwargs)
|
||||
try:
|
||||
inventory = instance.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
@@ -124,7 +89,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
connection.on_commit(
|
||||
lambda: update_inventory_computed_fields.delay(inventory.id)
|
||||
)
|
||||
|
||||
|
||||
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
|
||||
@@ -189,17 +156,26 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
||||
|
||||
def save_related_job_templates(sender, instance, **kwargs):
|
||||
'''save_related_job_templates loops through all of the
|
||||
job templates that use an Inventory or Project that have had their
|
||||
job templates that use an Inventory that have had their
|
||||
Organization updated. This triggers the rebuilding of the RBAC hierarchy
|
||||
and ensures the proper access restrictions.
|
||||
'''
|
||||
if sender not in (Project, Inventory):
|
||||
if sender is not Inventory:
|
||||
raise ValueError('This signal callback is only intended for use with Project or Inventory')
|
||||
|
||||
update_fields = kwargs.get('update_fields', None)
|
||||
if ((update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or
|
||||
kwargs.get('created', False)):
|
||||
return
|
||||
|
||||
if instance._prior_values_store.get('organization_id') != instance.organization_id:
|
||||
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
|
||||
for jt in jtq:
|
||||
update_role_parentage_for_instance(jt)
|
||||
parents_added, parents_removed = update_role_parentage_for_instance(jt)
|
||||
if parents_added or parents_removed:
|
||||
logger.info('Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
|
||||
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
|
||||
))
|
||||
|
||||
|
||||
def connect_computed_field_signals():
|
||||
@@ -207,10 +183,6 @@ def connect_computed_field_signals():
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -219,7 +191,6 @@ def connect_computed_field_signals():
|
||||
|
||||
connect_computed_field_signals()
|
||||
|
||||
post_save.connect(save_related_job_templates, sender=Project)
|
||||
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||
@@ -347,10 +318,6 @@ def disable_computed_fields():
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -625,16 +592,6 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
||||
@receiver(post_save, sender=Session)
|
||||
def save_user_session_membership(sender, **kwargs):
|
||||
session = kwargs.get('instance', None)
|
||||
if pkg_resources.get_distribution('channels').version >= '2':
|
||||
# If you get into this code block, it means we upgraded channels, but
|
||||
# didn't make the settings.SESSIONS_PER_USER feature work
|
||||
raise RuntimeError(
|
||||
'save_user_session_membership must be updated for channels>=2: '
|
||||
'http://channels.readthedocs.io/en/latest/one-to-two.html#requirements'
|
||||
)
|
||||
if 'runworker' in sys.argv:
|
||||
# don't track user session membership for websocket per-channel sessions
|
||||
return
|
||||
if not session:
|
||||
return
|
||||
user_id = session.get_decoded().get(SESSION_KEY, None)
|
||||
|
||||
@@ -26,7 +26,7 @@ import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -52,13 +52,14 @@ import ansible_runner
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
Inventory, InventorySource, SmartInventoryMembership,
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob,
|
||||
JobEvent, ProjectUpdateEvent, InventoryUpdateEvent, AdHocCommandEvent, SystemJobEvent,
|
||||
build_safe_env
|
||||
build_safe_env, enforce_bigint_pk_migration
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError
|
||||
@@ -134,6 +135,12 @@ def dispatch_startup():
|
||||
if Instance.objects.me().is_controller():
|
||||
awx_isolated_heartbeat()
|
||||
|
||||
# at process startup, detect the need to migrate old event records from int
|
||||
# to bigint; at *some point* in the future, once certain versions of AWX
|
||||
# and Tower fall out of use/support, we can probably just _assume_ that
|
||||
# everybody has moved to bigint, and remove this code entirely
|
||||
enforce_bigint_pk_migration()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@@ -150,7 +157,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
started_waiting = time.time()
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
@@ -263,7 +270,7 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
@@ -274,7 +281,7 @@ def handle_setting_changes(setting_keys):
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
# TODO: possibly implement some retry logic
|
||||
lock_file = project_path + '.lock'
|
||||
@@ -292,7 +299,7 @@ def delete_project_files(project_path):
|
||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast_all')
|
||||
def profile_sql(threshold=1, minutes=1):
|
||||
if threshold == 0:
|
||||
cache.delete('awx-profile-sql-threshold')
|
||||
@@ -306,7 +313,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -335,19 +342,33 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.debug('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
else:
|
||||
last_time = None
|
||||
gather_time = now()
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
with advisory_lock('gather_analytics_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -474,10 +495,10 @@ def awx_isolated_heartbeat():
|
||||
# Slow pass looping over isolated IGs and their isolated instances
|
||||
if len(isolated_instance_qs) > 0:
|
||||
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
|
||||
isolated_manager.IsolatedManager().health_check(isolated_instance_qs)
|
||||
isolated_manager.IsolatedManager(CallbackQueueDispatcher.dispatch).health_check(isolated_instance_qs)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@@ -499,7 +520,7 @@ def awx_periodic_scheduler():
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
@@ -534,7 +555,7 @@ def awx_periodic_scheduler():
|
||||
state.save()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -547,7 +568,7 @@ def handle_work_success(task_actual):
|
||||
schedule_task_manager()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
@@ -569,7 +590,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag:
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
@@ -587,8 +608,27 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_success_and_failure_notifications(job_id):
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
return
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
@@ -599,7 +639,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
@@ -629,7 +669,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -642,10 +682,52 @@ def update_host_smart_inventory_memberships():
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def migrate_legacy_event_data(tblname):
|
||||
if 'event' not in tblname:
|
||||
return
|
||||
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
return
|
||||
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
|
||||
|
||||
def _remaining():
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
|
||||
return cursor.fetchone()[0]
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
return None
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
total_rows = _remaining()
|
||||
while total_rows:
|
||||
with transaction.atomic():
|
||||
cursor.execute(
|
||||
f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;'
|
||||
)
|
||||
last_insert_pk = cursor.fetchone()
|
||||
if last_insert_pk is None:
|
||||
# this means that the SELECT from the old table was
|
||||
# empty, and there was nothing to insert (so we're done)
|
||||
break
|
||||
last_insert_pk = last_insert_pk[0]
|
||||
cursor.execute(
|
||||
f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});'
|
||||
)
|
||||
logger.warn(
|
||||
f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)'
|
||||
)
|
||||
|
||||
if _remaining() is None:
|
||||
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
|
||||
logger.warn(f'{tblname} primary key migration to bigint has finished')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -1130,7 +1212,23 @@ class BaseTask(object):
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
should_write_event = False
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -1142,13 +1240,17 @@ class BaseTask(object):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
return should_write_event
|
||||
return False
|
||||
|
||||
def cancel_callback(self):
|
||||
'''
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
'''
|
||||
self.instance = self.update_model(self.instance.pk)
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
@@ -1338,6 +1440,7 @@ class BaseTask(object):
|
||||
if not params[v]:
|
||||
del params[v]
|
||||
|
||||
self.dispatcher = CallbackQueueDispatcher()
|
||||
if self.instance.is_isolated() or containerized:
|
||||
module_args = None
|
||||
if 'module_args' in params:
|
||||
@@ -1352,6 +1455,7 @@ class BaseTask(object):
|
||||
|
||||
ansible_runner.utils.dump_artifacts(params)
|
||||
isolated_manager_instance = isolated_manager.IsolatedManager(
|
||||
self.event_handler,
|
||||
canceled_callback=lambda: self.update_model(self.instance.pk).cancel_flag,
|
||||
check_callback=self.check_handler,
|
||||
pod_manager=pod_manager
|
||||
@@ -1361,11 +1465,9 @@ class BaseTask(object):
|
||||
params.get('playbook'),
|
||||
params.get('module'),
|
||||
module_args,
|
||||
event_data_key=self.event_data_key,
|
||||
ident=str(self.instance.pk))
|
||||
self.event_ct = len(isolated_manager_instance.handled_events)
|
||||
else:
|
||||
self.dispatcher = CallbackQueueDispatcher()
|
||||
res = ansible_runner.interface.run(**params)
|
||||
status = res.status
|
||||
rc = res.rc
|
||||
@@ -1443,7 +1545,7 @@ class BaseTask(object):
|
||||
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunJob(BaseTask):
|
||||
'''
|
||||
Run a job using ansible-playbook.
|
||||
@@ -1656,8 +1758,12 @@ class RunJob(BaseTask):
|
||||
args.append('--vault-id')
|
||||
args.append('{}@prompt'.format(vault_id))
|
||||
|
||||
if job.forks: # FIXME: Max limit?
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.forks:
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
if job.limit:
|
||||
@@ -1769,7 +1875,7 @@ class RunJob(BaseTask):
|
||||
current_revision = git_repo.head.commit.hexsha
|
||||
if desired_revision == current_revision:
|
||||
job_revision = desired_revision
|
||||
logger.info('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
else:
|
||||
sync_needs = all_sync_needs
|
||||
except (ValueError, BadGitName):
|
||||
@@ -1868,10 +1974,11 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
|
||||
model = ProjectUpdate
|
||||
@@ -1977,8 +2084,9 @@ class RunProjectUpdate(BaseTask):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2231,7 +2339,7 @@ class RunProjectUpdate(BaseTask):
|
||||
# force option is necessary because remote refs are not counted, although no information is lost
|
||||
git_repo.delete_head(tmp_branch_name, force=True)
|
||||
else:
|
||||
copy_tree(project_path, destination_folder)
|
||||
copy_tree(project_path, destination_folder, preserve_symlinks=1)
|
||||
|
||||
def post_run_hook(self, instance, status):
|
||||
# To avoid hangs, very important to release lock even if errors happen here
|
||||
@@ -2280,7 +2388,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
|
||||
model = InventoryUpdate
|
||||
@@ -2548,7 +2656,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
'''
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -2738,7 +2846,7 @@ class RunAdHocCommand(BaseTask):
|
||||
isolated_manager_instance.cleanup()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
|
||||
model = SystemJob
|
||||
@@ -2812,11 +2920,16 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(
|
||||
model_module, model_name, obj_pk, new_obj_pk,
|
||||
user_pk, sub_obj_list, permission_check_func=None
|
||||
user_pk, uuid, permission_check_func=None
|
||||
):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
@@ -2851,4 +2964,4 @@ def deep_copy_model_obj(
|
||||
), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id, True)
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
@@ -10,6 +10,8 @@ group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
|
||||
@@ -2,6 +2,9 @@ from django.db import connection
|
||||
from django.db.models.signals import post_migrate
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from unittest import mock
|
||||
|
||||
import contextlib
|
||||
|
||||
|
||||
def app_post_migration(sender, app_config, **kwargs):
|
||||
@@ -23,3 +26,13 @@ if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
@@ -599,9 +599,9 @@ class TestControlledBySCM:
|
||||
delete(inv_src.get_absolute_url(), admin_user, expect=204)
|
||||
assert scm_inventory.inventory_sources.count() == 0
|
||||
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, admin_user):
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, project, admin_user):
|
||||
post(reverse('api:inventory_inventory_sources_list', kwargs={'pk': scm_inventory.id}),
|
||||
{'name': 'new inv src', 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
{'name': 'new inv src', 'source_project': project.pk, 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
admin_user, expect=201)
|
||||
|
||||
def test_adding_inv_src_prohibited(self, post, scm_inventory, project, admin_user):
|
||||
|
||||
@@ -39,6 +39,26 @@ def test_extra_credentials(get, organization_factory, job_template_factory, cred
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_permission_denied_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
|
||||
jt.credentials.add(machine_credential)
|
||||
jt_user = User.objects.create(username='jobtemplateuser')
|
||||
jt.execute_role.members.add(jt_user)
|
||||
with impersonate(jt_user):
|
||||
job = jt.create_unified_job()
|
||||
|
||||
# User capability is shown for this
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_prompts_not_accepted_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
|
||||
jt.credentials.add(machine_credential)
|
||||
jt_user = User.objects.create(username='jobtemplateuser')
|
||||
@@ -53,8 +73,6 @@ def test_job_relaunch_permission_denied_response(
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields' in r.data['detail']
|
||||
assert 'do not have permission' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -153,7 +171,8 @@ def test_summary_fields_recent_jobs(job_template, admin_user, get):
|
||||
'id': job.id,
|
||||
'status': 'failed',
|
||||
'finished': job.finished,
|
||||
'type': 'job'
|
||||
'canceled_on': None,
|
||||
'type': 'job'
|
||||
} for job in jobs[-10:][::-1]]
|
||||
|
||||
|
||||
@@ -208,7 +227,8 @@ def test_block_related_unprocessed_events(mocker, organization, project, delete,
|
||||
status='finished',
|
||||
finished=time_of_finish,
|
||||
job_template=job_template,
|
||||
project=project
|
||||
project=project,
|
||||
organization=project.organization
|
||||
)
|
||||
view = RelatedJobsPreventDeleteMixin()
|
||||
time_of_request = time_of_finish + relativedelta(seconds=2)
|
||||
|
||||
@@ -264,18 +264,6 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim
|
||||
dict(credentials=runtime_data['credentials']), rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
|
||||
job_template = job_template_prompts(True)
|
||||
|
||||
# Assure that changing the type of a scan job blocks the launch
|
||||
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
|
||||
dict(job_type='scan'), admin_user, expect=400)
|
||||
|
||||
assert 'job_type' in response.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
|
||||
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
|
||||
|
||||
@@ -6,7 +6,7 @@ import pytest
|
||||
# AWX
|
||||
from awx.api.serializers import JobTemplateSerializer
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
@@ -30,14 +30,19 @@ def test_create(post, project, machine_credential, inventory, alice, grant_proje
|
||||
project.use_role.members.add(alice)
|
||||
if grant_inventory:
|
||||
inventory.use_role.members.add(alice)
|
||||
project.organization.job_template_admin_role.members.add(alice)
|
||||
|
||||
r = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
}, alice)
|
||||
assert r.status_code == expect
|
||||
post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml'
|
||||
},
|
||||
user=alice,
|
||||
expect=expect
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -118,6 +123,26 @@ def test_extra_credential_unique_type_xfail(get, post, organization_factory, job
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
inventory.use_role.members.add(alice)
|
||||
settings.MAX_FORKS = 10
|
||||
response = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
},
|
||||
user=alice,
|
||||
expect=400
|
||||
)
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
@@ -494,6 +519,72 @@ def test_job_template_unset_custom_virtualenv(get, patch, organization_factory,
|
||||
assert resp.data['custom_virtualenv'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_organization_follows_project(post, patch, admin_user):
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
project_common = dict(scm_type='git', playbook_files=['helloworld.yml'])
|
||||
project1 = Project.objects.create(name='proj1', organization=org1, **project_common)
|
||||
project2 = Project.objects.create(name='proj2', organization=org2, **project_common)
|
||||
r = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
"name": "fooo",
|
||||
"ask_inventory_on_launch": True,
|
||||
"project": project1.pk,
|
||||
"playbook": "helloworld.yml"
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
data = r.data
|
||||
assert data['organization'] == project1.organization_id
|
||||
data['project'] = project2.id
|
||||
jt = JobTemplate.objects.get(pk=data['id'])
|
||||
r = patch(
|
||||
url=jt.get_absolute_url(),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['organization'] == project2.organization_id
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_organization_field_is_read_only(patch, post, project, admin_user):
|
||||
org = project.organization
|
||||
jt = JobTemplate.objects.create(
|
||||
name='foo_jt',
|
||||
ask_inventory_on_launch=True,
|
||||
project=project, playbook='helloworld.yml'
|
||||
)
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
r = patch(
|
||||
url=jt.get_absolute_url(),
|
||||
data={'organization': org2.id},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['organization'] == org.id
|
||||
assert JobTemplate.objects.get(pk=jt.pk).organization == org
|
||||
|
||||
# similar test, but on creation
|
||||
r = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'foobar',
|
||||
'project': project.id,
|
||||
'organization': org2.id,
|
||||
'ask_inventory_on_launch': True,
|
||||
'playbook': 'helloworld.yml'
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
assert r.data['organization'] == org.id
|
||||
assert JobTemplate.objects.get(pk=r.data['id']).organization == org
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_callback_disallowed_null_inventory(project):
|
||||
jt = JobTemplate.objects.create(
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
import base64
|
||||
import contextlib
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -12,22 +10,11 @@ from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.models.oauth import (OAuth2Application as Application,
|
||||
OAuth2AccessToken as AccessToken)
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
from awx.sso.models import UserEnterpriseAuth
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_personal_access_token_creation(oauth_application, post, alice):
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user