mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
775 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
beb8021580 | ||
|
|
36078651d3 | ||
|
|
8b768bcb01 | ||
|
|
16d9e1cfc7 | ||
|
|
0fd9153cf7 | ||
|
|
c95624e27f | ||
|
|
5cf33f57a4 | ||
|
|
5c331934e2 | ||
|
|
7ac21b4922 | ||
|
|
04fe18d840 | ||
|
|
b9829e2bde | ||
|
|
a99d4a8419 | ||
|
|
676b29346c | ||
|
|
208dbc1f92 | ||
|
|
2cb5046ec6 | ||
|
|
356b674a49 | ||
|
|
185c581007 | ||
|
|
789397d56c | ||
|
|
e816f73ecf | ||
|
|
bbe5789e70 | ||
|
|
ad1a7fc9c9 | ||
|
|
dd5f25186b | ||
|
|
ecb7147614 | ||
|
|
87396f968c | ||
|
|
87bfb82907 | ||
|
|
65e988b44c | ||
|
|
4381a7d75c | ||
|
|
3a6528dc0d | ||
|
|
c113c2db52 | ||
|
|
b7d2d6ad89 | ||
|
|
01d77d5407 | ||
|
|
87c6ed52cd | ||
|
|
0d5a46a6e1 | ||
|
|
f3ab3de1be | ||
|
|
871695ea5e | ||
|
|
0f5b694514 | ||
|
|
9567dc9e17 | ||
|
|
24d35e9593 | ||
|
|
b6be8ca445 | ||
|
|
beb10feb0c | ||
|
|
6ec9c45341 | ||
|
|
9a394a5726 | ||
|
|
25f4aa19b7 | ||
|
|
7ff5bacce5 | ||
|
|
3e820a88e1 | ||
|
|
c1ab118481 | ||
|
|
3952be9429 | ||
|
|
35f414ccf2 | ||
|
|
304bf6805b | ||
|
|
e733506477 | ||
|
|
f4366be419 | ||
|
|
64018a71bb | ||
|
|
0c9c349fb9 | ||
|
|
6dd4d04bf5 | ||
|
|
21b4455ee6 | ||
|
|
314e345825 | ||
|
|
90e047821d | ||
|
|
01fe89e43c | ||
|
|
1f2edd81ab | ||
|
|
862de0b6f3 | ||
|
|
d75c2d9b44 | ||
|
|
1b8ff1f846 | ||
|
|
a93b1aa339 | ||
|
|
4c6191041c | ||
|
|
edb3f6df55 | ||
|
|
7a3ece7fd2 | ||
|
|
73e867b6f5 | ||
|
|
acc34c1393 | ||
|
|
3d5a002676 | ||
|
|
bb6d9af90b | ||
|
|
da94b2dc9e | ||
|
|
a1c2db3db5 | ||
|
|
d849e81891 | ||
|
|
a5afac62ca | ||
|
|
66c98ca9bc | ||
|
|
a00e7c7050 | ||
|
|
cd1ff6b16a | ||
|
|
b560a21ca3 | ||
|
|
63fa367e9d | ||
|
|
d33daeee91 | ||
|
|
9d449c419b | ||
|
|
e34e88549f | ||
|
|
c073e39c69 | ||
|
|
4fcd2c594c | ||
|
|
457dc956f1 | ||
|
|
3e5428131c | ||
|
|
d08f59272e | ||
|
|
8b95d7be94 | ||
|
|
6c22ddf608 | ||
|
|
8227054d11 | ||
|
|
73b33e1435 | ||
|
|
deaf2028ad | ||
|
|
d812972d3c | ||
|
|
54b553b78c | ||
|
|
3e08bbeb93 | ||
|
|
22524589e3 | ||
|
|
85ec73bf4b | ||
|
|
ccd36b9560 | ||
|
|
61755e2838 | ||
|
|
be56a1d3df | ||
|
|
46c86ea6c0 | ||
|
|
401c7c3da2 | ||
|
|
f1120d39db | ||
|
|
80617df22d | ||
|
|
b5e5fea117 | ||
|
|
e3ec63e8e5 | ||
|
|
e232cd392c | ||
|
|
8301254f57 | ||
|
|
9cdfc19215 | ||
|
|
c50705a2dc | ||
|
|
9f948e90d9 | ||
|
|
e7f36eb2ea | ||
|
|
c261d6acf0 | ||
|
|
32ef805e23 | ||
|
|
d009ce49f5 | ||
|
|
d14bf00f6c | ||
|
|
5dc4e30820 | ||
|
|
afbeacf499 | ||
|
|
fc80cf5241 | ||
|
|
4a6db13daa | ||
|
|
d5372dae36 | ||
|
|
0b702ede4e | ||
|
|
3c7f596288 | ||
|
|
6207dad226 | ||
|
|
2b48e43946 | ||
|
|
4709f57f46 | ||
|
|
b055aad641 | ||
|
|
acfa6d056f | ||
|
|
51a069fcc4 | ||
|
|
fc89b627d7 | ||
|
|
e90ee5113d | ||
|
|
4ccca08cda | ||
|
|
b757fdebf8 | ||
|
|
3ee6f1f3c7 | ||
|
|
d4ba32d0c5 | ||
|
|
d97f516c3a | ||
|
|
52a8935b20 | ||
|
|
07752f48f6 | ||
|
|
10b5a10728 | ||
|
|
e11ff69f3e | ||
|
|
d3fa34c665 | ||
|
|
48a615231b | ||
|
|
b09ac71647 | ||
|
|
d5dd3c521f | ||
|
|
db43341f96 | ||
|
|
3234f246db | ||
|
|
6d6d99bcf8 | ||
|
|
a6cd32522f | ||
|
|
1fe28463da | ||
|
|
51a6194b8d | ||
|
|
e75f7b0beb | ||
|
|
179c62e2f3 | ||
|
|
98f5525d28 | ||
|
|
60a137225a | ||
|
|
c1bfcd73fb | ||
|
|
322b4ee1e4 | ||
|
|
98dc6179f5 | ||
|
|
07807c2dec | ||
|
|
16ecf17c69 | ||
|
|
1f0acef844 | ||
|
|
5a164cae15 | ||
|
|
b57405b696 | ||
|
|
5fdf6cf60f | ||
|
|
c1c382a941 | ||
|
|
a997b40852 | ||
|
|
99cd2e601d | ||
|
|
fc402aff29 | ||
|
|
2ec035f918 | ||
|
|
fe046b47b5 | ||
|
|
3e0e4b6c8f | ||
|
|
7fe57268f6 | ||
|
|
9eecb24c32 | ||
|
|
a8a45fca84 | ||
|
|
33df6f8ad2 | ||
|
|
44223003aa | ||
|
|
a60e7a7855 | ||
|
|
e971ec993b | ||
|
|
989ef3538e | ||
|
|
4db3e823bf | ||
|
|
c374316648 | ||
|
|
5dba49a7bc | ||
|
|
7b880c6552 | ||
|
|
5574cf0595 | ||
|
|
e706e0a2e2 | ||
|
|
5364e78397 | ||
|
|
f93ca814ac | ||
|
|
3bf1ad3028 | ||
|
|
e096ad18cb | ||
|
|
5ca73f1101 | ||
|
|
7e8fb29658 | ||
|
|
258689a9ed | ||
|
|
e80e3f7410 | ||
|
|
154b9c36ac | ||
|
|
deced917cf | ||
|
|
88b7256e96 | ||
|
|
033848a605 | ||
|
|
0e663921d6 | ||
|
|
0582079606 | ||
|
|
6536f5a453 | ||
|
|
c5079607aa | ||
|
|
26dcb000f6 | ||
|
|
8ba4f728c0 | ||
|
|
ee090d34fa | ||
|
|
bd30951a4f | ||
|
|
43cce83ba1 | ||
|
|
946d643795 | ||
|
|
1a6ea99d37 | ||
|
|
350046d495 | ||
|
|
b532012748 | ||
|
|
1c4042340c | ||
|
|
787c4af222 | ||
|
|
768280c9ba | ||
|
|
2e4e687d69 | ||
|
|
d8513a4e86 | ||
|
|
badd667efa | ||
|
|
7908f25747 | ||
|
|
0eef67713f | ||
|
|
6591efc160 | ||
|
|
fcc679489e | ||
|
|
94df58a55b | ||
|
|
0685b2fa35 | ||
|
|
232ea1e50c | ||
|
|
3423db6ed0 | ||
|
|
c32452d6b6 | ||
|
|
018dd4c1c3 | ||
|
|
4fc2c58ae7 | ||
|
|
b4014ebabf | ||
|
|
9955ee6548 | ||
|
|
c08d402e66 | ||
|
|
1c505beba6 | ||
|
|
8a0432efb7 | ||
|
|
320276f8ca | ||
|
|
f89061da41 | ||
|
|
c23d605a7a | ||
|
|
6d90cac3f9 | ||
|
|
89e92bd337 | ||
|
|
9271127c53 | ||
|
|
9fa5942791 | ||
|
|
e028ed878e | ||
|
|
838b2b7d1e | ||
|
|
7c0ad461a5 | ||
|
|
68926dad27 | ||
|
|
ceb6f6c47d | ||
|
|
167e99fce9 | ||
|
|
c930011616 | ||
|
|
aaaca63f83 | ||
|
|
d8a9f663b1 | ||
|
|
b0d0ccf44f | ||
|
|
c57754a29b | ||
|
|
65057c1fb7 | ||
|
|
d8be6490c2 | ||
|
|
b34208d1b6 | ||
|
|
0d5a9e9c8c | ||
|
|
22d4e60028 | ||
|
|
eaa766df77 | ||
|
|
7e5776c66f | ||
|
|
8b1806d4ca | ||
|
|
07232f3694 | ||
|
|
37a33f931a | ||
|
|
4912cbd2da | ||
|
|
4c40819791 | ||
|
|
a65fd497c6 | ||
|
|
d824209485 | ||
|
|
7ae1c7c3d2 | ||
|
|
341c6ae767 | ||
|
|
e6a94ed0cf | ||
|
|
3e6b6c05a6 | ||
|
|
544d4cd3b0 | ||
|
|
e0df2f511e | ||
|
|
255fd0a9cb | ||
|
|
f31adf8a85 | ||
|
|
a2b169626a | ||
|
|
6ffc78bcb0 | ||
|
|
8e9fc550f6 | ||
|
|
779d190855 | ||
|
|
89a4b03d45 | ||
|
|
ccd4cdd71a | ||
|
|
31dbf38a35 | ||
|
|
d0bec97bbb | ||
|
|
22307bba97 | ||
|
|
b4f5d44f65 | ||
|
|
d469870686 | ||
|
|
f561bf5754 | ||
|
|
2e3547d5cf | ||
|
|
ce8897d3e8 | ||
|
|
df77147d65 | ||
|
|
9b11df04b3 | ||
|
|
58c06d5aea | ||
|
|
1d3bb97b07 | ||
|
|
ba3253e2e2 | ||
|
|
e6f0c01aa6 | ||
|
|
9310d59e0a | ||
|
|
f2e1e71302 | ||
|
|
e6e31a9fc6 | ||
|
|
801aaf9323 | ||
|
|
2a8679234a | ||
|
|
54ab671512 | ||
|
|
866dd6b259 | ||
|
|
eba893c99b | ||
|
|
fd3f410cc6 | ||
|
|
03aaf93cef | ||
|
|
9aef57003a | ||
|
|
6065eb0e65 | ||
|
|
7e4634c81f | ||
|
|
a03d73776f | ||
|
|
f14eb4327d | ||
|
|
4ebd721cc5 | ||
|
|
21a92176b9 | ||
|
|
ad04b02e24 | ||
|
|
bc0511fe66 | ||
|
|
1accb9f939 | ||
|
|
9253f16e36 | ||
|
|
42387166bf | ||
|
|
0b5f892193 | ||
|
|
1a0d36a6fd | ||
|
|
cf3ed0dc88 | ||
|
|
8d26d7861e | ||
|
|
8e0ad2ef6e | ||
|
|
0aba4c36af | ||
|
|
44cd199078 | ||
|
|
ce909093c0 | ||
|
|
df13a8fea9 | ||
|
|
ff823c9fdb | ||
|
|
a42ff9865b | ||
|
|
7e13f78567 | ||
|
|
e2fb83db98 | ||
|
|
06eb1b6683 | ||
|
|
d62994ec02 | ||
|
|
f20859c85f | ||
|
|
b5b8adb451 | ||
|
|
70b287490b | ||
|
|
0976e9e569 | ||
|
|
83a96757db | ||
|
|
9013dcfea7 | ||
|
|
4ebc2573a3 | ||
|
|
fe9b03a189 | ||
|
|
d2f6c367f0 | ||
|
|
34b717d00c | ||
|
|
0d31b05f98 | ||
|
|
87a0e40331 | ||
|
|
764c0b2e15 | ||
|
|
23677b4963 | ||
|
|
96d9d41f19 | ||
|
|
a737f35653 | ||
|
|
ed8133be2d | ||
|
|
7c8c6b5333 | ||
|
|
46fceb03a5 | ||
|
|
4dee5eddeb | ||
|
|
709482bdac | ||
|
|
62ef1baace | ||
|
|
1fc3d2e914 | ||
|
|
d271a8c9fa | ||
|
|
3bd7b3b0f8 | ||
|
|
8075cda34c | ||
|
|
09d6da117a | ||
|
|
8f6b679c85 | ||
|
|
32e017bd03 | ||
|
|
74a31224e0 | ||
|
|
667b27fe78 | ||
|
|
4c8a4013cc | ||
|
|
5e4d73b6a3 | ||
|
|
da486d7788 | ||
|
|
30d97e2fa8 | ||
|
|
3a95114c3a | ||
|
|
1f3ad85403 | ||
|
|
90cb02e0bf | ||
|
|
6e2bd828a1 | ||
|
|
fbbf5046ac | ||
|
|
47abb6f85f | ||
|
|
717698b659 | ||
|
|
6a29a0898a | ||
|
|
1833872be9 | ||
|
|
4d06c812e6 | ||
|
|
3b71d2a37b | ||
|
|
0c0cacb0d6 | ||
|
|
f57fff732e | ||
|
|
54ddeaf046 | ||
|
|
69a1a02c70 | ||
|
|
c824f0d590 | ||
|
|
c336c989e7 | ||
|
|
f6523ab5a0 | ||
|
|
47c783da37 | ||
|
|
74afc7b424 | ||
|
|
4ac5a1e15a | ||
|
|
48eeeea7f3 | ||
|
|
aa6857fd38 | ||
|
|
25fe2a2ce6 | ||
|
|
3d1e3741cd | ||
|
|
2ef57e0221 | ||
|
|
bc08c02b89 | ||
|
|
50c74a2ec8 | ||
|
|
887469d73e | ||
|
|
f9debb8f94 | ||
|
|
b3929d1177 | ||
|
|
e3cfdb74ba | ||
|
|
1d0e752989 | ||
|
|
05a3bb0622 | ||
|
|
bc7fd26af6 | ||
|
|
048d4dbd95 | ||
|
|
c70e5357d3 | ||
|
|
7576ba2ade | ||
|
|
877e630a90 | ||
|
|
ef854aabb7 | ||
|
|
fc3f19bd2b | ||
|
|
2bbcd2d663 | ||
|
|
a786118415 | ||
|
|
65429e581a | ||
|
|
eb6f4dca55 | ||
|
|
ce09c4b3cd | ||
|
|
c971e9d61c | ||
|
|
e34bf90ca7 | ||
|
|
700296a558 | ||
|
|
492ea0616e | ||
|
|
eddb6e1faf | ||
|
|
f98b274177 | ||
|
|
662ff41fe9 | ||
|
|
fd146dde30 | ||
|
|
e394d0a6f6 | ||
|
|
5a1a47b7aa | ||
|
|
3d5c32c354 | ||
|
|
01cc0ac8f1 | ||
|
|
5a9248e619 | ||
|
|
1d84d03566 | ||
|
|
50ba4f9759 | ||
|
|
de55af6ae6 | ||
|
|
ca478ac880 | ||
|
|
78ea643460 | ||
|
|
0db0f81e53 | ||
|
|
c94680eaba | ||
|
|
5b4ed6dd59 | ||
|
|
4e811c744a | ||
|
|
cd6d2299a9 | ||
|
|
590199baff | ||
|
|
3b9dd3ba8c | ||
|
|
446021cf22 | ||
|
|
ef3ab29649 | ||
|
|
f4e09eee80 | ||
|
|
af4e4b4064 | ||
|
|
10c6297706 | ||
|
|
73a9541e39 | ||
|
|
3a2a61af82 | ||
|
|
774e7fb248 | ||
|
|
a5e3d9558f | ||
|
|
1ae86ae752 | ||
|
|
1a30a0e397 | ||
|
|
490b76b141 | ||
|
|
3831efb3be | ||
|
|
a8fa816165 | ||
|
|
11ccfd8449 | ||
|
|
c33cc82d53 | ||
|
|
c7516ec50e | ||
|
|
92cc597e84 | ||
|
|
7402ac29a8 | ||
|
|
4a455c7bf7 | ||
|
|
10167eea8d | ||
|
|
46ddc84d2a | ||
|
|
5c3fe51982 | ||
|
|
b8ec3104a9 | ||
|
|
b098127961 | ||
|
|
f61af39f08 | ||
|
|
1f0294d389 | ||
|
|
1ad7e663a1 | ||
|
|
3172176940 | ||
|
|
ca85020b26 | ||
|
|
5d2912605f | ||
|
|
b38ec3599b | ||
|
|
487343a022 | ||
|
|
69049a4427 | ||
|
|
be6b42561f | ||
|
|
e59cb07064 | ||
|
|
0234df055d | ||
|
|
b54c036398 | ||
|
|
eafd40291e | ||
|
|
519956f779 | ||
|
|
0b3e2cc7e3 | ||
|
|
efa9c84806 | ||
|
|
5ed623d682 | ||
|
|
8f77d15a31 | ||
|
|
d06d4d5a8c | ||
|
|
352c8c3cb1 | ||
|
|
94f21a3464 | ||
|
|
ac376f9c87 | ||
|
|
44e4263bee | ||
|
|
b7f3852ef9 | ||
|
|
a934e146ee | ||
|
|
cab25656eb | ||
|
|
0f9c906a22 | ||
|
|
b8226109a7 | ||
|
|
b26de8b922 | ||
|
|
67d8c1a4b5 | ||
|
|
0ef7ef22eb | ||
|
|
47383e05d6 | ||
|
|
3dd97feaa6 | ||
|
|
e530adde67 | ||
|
|
38a08d163c | ||
|
|
7b4adfcc15 | ||
|
|
5d6e1284e3 | ||
|
|
a0ba125ea9 | ||
|
|
ad5d0b92db | ||
|
|
debbac5c78 | ||
|
|
f4f4a7caec | ||
|
|
b00249b515 | ||
|
|
cd49213924 | ||
|
|
9a47a28b80 | ||
|
|
7b9ad1d69a | ||
|
|
6df00e1e4c | ||
|
|
7d2ed7b763 | ||
|
|
b08e5db267 | ||
|
|
8991396d23 | ||
|
|
76a6f84c70 | ||
|
|
a984e5df7a | ||
|
|
282d705c43 | ||
|
|
43e1b4a7db | ||
|
|
71ef7cdec1 | ||
|
|
5decde3f70 | ||
|
|
3f57061509 | ||
|
|
6395d64681 | ||
|
|
f3e2caeaa7 | ||
|
|
ce5c4359ee | ||
|
|
c4ddf50cad | ||
|
|
d250dd0cd6 | ||
|
|
96bbbdd5c9 | ||
|
|
9b4b2167b3 | ||
|
|
028a0a9279 | ||
|
|
30354dbcd0 | ||
|
|
543a87ac88 | ||
|
|
4be7cf66ec | ||
|
|
fd027f87a9 | ||
|
|
dac6e115c1 | ||
|
|
eca516f8ce | ||
|
|
b06645e125 | ||
|
|
fd60cd1a35 | ||
|
|
ad8bcd0de2 | ||
|
|
fdc29eebb7 | ||
|
|
63ae2cac38 | ||
|
|
4e787cc079 | ||
|
|
2de37ce5df | ||
|
|
a419547731 | ||
|
|
04844aa44f | ||
|
|
1b3fbee38d | ||
|
|
6d2a2ab714 | ||
|
|
82dd4a3884 | ||
|
|
4fe9e5da14 | ||
|
|
bbb4701fa9 | ||
|
|
86a39938fe | ||
|
|
987fc26537 | ||
|
|
70cf4cf5d4 | ||
|
|
2d3172f648 | ||
|
|
b2c33e3204 | ||
|
|
f7f648b956 | ||
|
|
780f104ab2 | ||
|
|
4c35adad6c | ||
|
|
cf24c81b3e | ||
|
|
6d792a8234 | ||
|
|
1558c6f942 | ||
|
|
2f75b48c63 | ||
|
|
979418620c | ||
|
|
482e0ac311 | ||
|
|
a36bf4af64 | ||
|
|
3bbce18173 | ||
|
|
e54fd19bca | ||
|
|
d2289fe9c6 | ||
|
|
1c50b8427a | ||
|
|
34d01f02cc | ||
|
|
d182c96c2e | ||
|
|
e59f3982ae | ||
|
|
5435c6ec73 | ||
|
|
5f96aee871 | ||
|
|
eceeeea22d | ||
|
|
a1a864b27b | ||
|
|
0291c476d4 | ||
|
|
638e8c7add | ||
|
|
6389ec50a1 | ||
|
|
ad53f4f5f6 | ||
|
|
9718aa711f | ||
|
|
cacd2c3392 | ||
|
|
1800b49822 | ||
|
|
1e97bb71db | ||
|
|
7055460c4c | ||
|
|
864767d74a | ||
|
|
5170948241 | ||
|
|
370a7f9b25 | ||
|
|
1368835a29 | ||
|
|
48fa5bb2cd | ||
|
|
25105d813d | ||
|
|
bbea43b1fe | ||
|
|
5790aa9780 | ||
|
|
bc97d11270 | ||
|
|
326ed22efe | ||
|
|
b942411dcc | ||
|
|
374c17751f | ||
|
|
ef2fa26126 | ||
|
|
b611164422 | ||
|
|
c7c899375b | ||
|
|
ab3a728032 | ||
|
|
aaf371ee23 | ||
|
|
d6c70e8d3a | ||
|
|
79e65e3e84 | ||
|
|
42c45367a0 | ||
|
|
d759aff4e9 | ||
|
|
6b63f0ac9e | ||
|
|
2df6eab472 | ||
|
|
1c7afb66f7 | ||
|
|
1fbb714cbc | ||
|
|
de75592f2a | ||
|
|
9cb7b0902a | ||
|
|
437d9843d1 | ||
|
|
490492e505 | ||
|
|
3dd8e490c6 | ||
|
|
75c9702caa | ||
|
|
accf000bdf | ||
|
|
a94b30be9f | ||
|
|
3c31e0ed16 | ||
|
|
7d74999851 | ||
|
|
b7ca369356 | ||
|
|
d15f7b76fa | ||
|
|
4e4a535178 | ||
|
|
78b00652bd | ||
|
|
473ab7c01c | ||
|
|
ae82ba53e7 | ||
|
|
d69174b1a6 | ||
|
|
570f549cf4 | ||
|
|
55e720e25d | ||
|
|
8f33f1a6c2 | ||
|
|
7be924d155 | ||
|
|
65f226960f | ||
|
|
84f056294d | ||
|
|
b906f8d757 | ||
|
|
2fae523fd4 | ||
|
|
4d519155bc | ||
|
|
ea8a91893a | ||
|
|
145476c7d9 | ||
|
|
c6595786f5 | ||
|
|
c6159a7c3e | ||
|
|
52638c709a | ||
|
|
a264b1db1f | ||
|
|
49907e337a | ||
|
|
afc1f85668 | ||
|
|
6efa751157 | ||
|
|
10131432b5 | ||
|
|
0d365068ff | ||
|
|
256404ba03 | ||
|
|
3b430c8bdf | ||
|
|
627dae6580 | ||
|
|
44db9ad033 | ||
|
|
21890efca6 | ||
|
|
0a8fe4d812 | ||
|
|
a1d7beca83 | ||
|
|
c35c80b06c | ||
|
|
3c5e9da9a1 | ||
|
|
f9af5e8959 | ||
|
|
c983b6a755 | ||
|
|
e18639b26b | ||
|
|
6d8b843ad0 | ||
|
|
00a9e42001 | ||
|
|
fc5363a140 | ||
|
|
d8d1ccf810 | ||
|
|
046518ab8f | ||
|
|
d33bbdd4f6 | ||
|
|
46e530ceeb | ||
|
|
2a77b8b4b9 | ||
|
|
23b2b136d6 | ||
|
|
d83a786c12 | ||
|
|
5d162b739b | ||
|
|
55e37b4eaa | ||
|
|
b2a0b3fc29 | ||
|
|
d1e1bc7108 | ||
|
|
cb88ea8fd1 | ||
|
|
c2fe3fcf13 | ||
|
|
6654a116d0 | ||
|
|
b77ab8a6ca | ||
|
|
1e796076f5 | ||
|
|
8fa38d1a2e | ||
|
|
44e176dde8 | ||
|
|
1ce197041f | ||
|
|
0952bae09f | ||
|
|
12509cd652 | ||
|
|
b094c063ae | ||
|
|
4e46d5d7cd | ||
|
|
8b10da9589 | ||
|
|
99ce277b06 | ||
|
|
5db6906212 | ||
|
|
652a428438 | ||
|
|
dfc769b8fe | ||
|
|
c45b1ffca6 | ||
|
|
ceed6f8d9b | ||
|
|
03cfb7bf9a | ||
|
|
49d1fa82d3 | ||
|
|
08a195ba08 | ||
|
|
77d1c711bf | ||
|
|
ad73174029 | ||
|
|
a6539d66d4 | ||
|
|
cb3ab67361 | ||
|
|
078dc666c1 | ||
|
|
e806da25c1 | ||
|
|
ef36b4fffd | ||
|
|
cc2ba09d3a | ||
|
|
790942c0f2 | ||
|
|
fd1e574fcb | ||
|
|
2daefcd94e | ||
|
|
46a7ca4dc3 | ||
|
|
5e4c997c41 | ||
|
|
8d4d718f7d | ||
|
|
cf34a81af7 | ||
|
|
11af21972d | ||
|
|
8850687d1b | ||
|
|
792f68eaec | ||
|
|
113aa2e11e | ||
|
|
1bf0bc8203 | ||
|
|
03cd7472af | ||
|
|
d549c217bb | ||
|
|
e7fead0f2c | ||
|
|
14990f7e98 | ||
|
|
d35eba8afb | ||
|
|
b0722311e8 | ||
|
|
946c16916f | ||
|
|
8ef5a6b0e1 | ||
|
|
6fa4d6462d | ||
|
|
525fd889e9 | ||
|
|
93a4e5ef05 | ||
|
|
06ce5a16ce | ||
|
|
15c665ea52 | ||
|
|
9a420820eb | ||
|
|
fa043100bd | ||
|
|
db0d748302 | ||
|
|
e8a95a1dac | ||
|
|
f911fb2046 | ||
|
|
a0304eeb16 | ||
|
|
a6f063b199 | ||
|
|
3977ec42e1 | ||
|
|
b7a064b05d | ||
|
|
aa5532f7b5 | ||
|
|
f79b6d3708 | ||
|
|
6d075b8874 | ||
|
|
3040a25932 | ||
|
|
0f0d9ba00d | ||
|
|
053897042f | ||
|
|
64186e881e | ||
|
|
0d98a1980e | ||
|
|
2b02b1affd | ||
|
|
bf3042e85a | ||
|
|
bdc25c14f6 | ||
|
|
6e5028587a | ||
|
|
8c8713885b | ||
|
|
bc5ef7f1c8 | ||
|
|
b9b6dad0b3 | ||
|
|
829e9054d6 | ||
|
|
be68a199ec | ||
|
|
44c0eb867b | ||
|
|
773b976f8a | ||
|
|
1220847c27 | ||
|
|
ec1c2a8391 | ||
|
|
2bc6521eee | ||
|
|
107d2da845 | ||
|
|
568606d2c8 | ||
|
|
78e2cd7084 | ||
|
|
79b8e6b6f0 | ||
|
|
d72896f9a6 | ||
|
|
7b3d36ba53 | ||
|
|
9ecb704e10 | ||
|
|
1b726a1b2f | ||
|
|
1cc4e302f9 | ||
|
|
1289ca9103 | ||
|
|
b18ca5ac1f | ||
|
|
193a041ef9 | ||
|
|
7219c17d30 | ||
|
|
79f0f1940f | ||
|
|
edc65cdc36 | ||
|
|
3684975ef9 | ||
|
|
8bfcef01df | ||
|
|
bbf9c13952 | ||
|
|
dfa578fcde | ||
|
|
33bc9e63c4 | ||
|
|
919475a4c7 | ||
|
|
1db88fe4f6 |
40
CHANGELOG.md
40
CHANGELOG.md
@@ -2,6 +2,46 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 9.3.0 (Mar 12, 2020)
|
||||
- Added the ability to specify an OAuth2 token description in the AWX CLI (https://github.com/ansible/awx/issues/6122)
|
||||
- Added support for K8S service account annotations to the installer (https://github.com/ansible/awx/pull/6007)
|
||||
- Added support for K8S imagePullSecrets to the installer (https://github.com/ansible/awx/pull/5989)
|
||||
- Launching jobs (and workflows) using the --monitor flag in the AWX CLI now returns a non-zero exit code on job failure (https://github.com/ansible/awx/issues/5920)
|
||||
- Improved UI performance for various job views when many simultaneous users are logged into AWX (https://github.com/ansible/awx/issues/5883)
|
||||
- Updated to the latest version of Django to address a few open CVEs (https://github.com/ansible/awx/pull/6080)
|
||||
- Fixed a critical bug which can cause AWX to hang and stop launching playbooks after a periodic of time (https://github.com/ansible/awx/issues/5617)
|
||||
- Fixed a bug which caused delays in project update stdout for certain large SCM clones (as of Ansible 2.9+) (https://github.com/ansible/awx/pull/6254)
|
||||
- Fixed a bug which caused certain smart inventory filters to mistakenly return duplicate hosts (https://github.com/ansible/awx/pull/5972)
|
||||
- Fixed an unclear server error when creating smart inventories with the AWX collection (https://github.com/ansible/awx/issues/6250)
|
||||
- Fixed a bug that broke Grafana notification support (https://github.com/ansible/awx/issues/6137)
|
||||
- Fixed a UI bug which prevent users with read access to an organization from editing credentials for that organization (https://github.com/ansible/awx/pull/6241)
|
||||
- Fixed a bug which prevent workflow approval records from recording a `started` and `elapsed` date (https://github.com/ansible/awx/issues/6202)
|
||||
- Fixed a bug which caused workflow nodes to have a confusing option for `verbosity` (https://github.com/ansible/awx/issues/6196)
|
||||
- Fixed an RBAC bug which prevented projects and inventory schedules from being created by certain users in certain contexts (https://github.com/ansible/awx/issues/5717)
|
||||
- Fixed a bug that caused `role_path` in a project's config to not be respected due to an error processing `/etc/ansible/ansible.cfg` (https://github.com/ansible/awx/pull/6038)
|
||||
- Fixed a bug that broke inventory updates for installs with custom home directories for the awx user (https://github.com/ansible/awx/pull/6152)
|
||||
- Fixed a bug that broke fact data collection when AWX encounters invalid/unexpected fact data (https://github.com/ansible/awx/issues/5935)
|
||||
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
- Added the ability to specify AZURE_PUBLIC_CLOUD (for e.g., Azure Government KeyVault support) for the Azure credential plugin https://github.com/ansible/awx/issues/5138
|
||||
- Added support for several additional parameters for Satellite dynamic inventory https://github.com/ansible/awx/pull/5598
|
||||
- Added a new field to jobs for tracking the date/time a job is cancelled https://github.com/ansible/awx/pull/5610
|
||||
- Made a series of additional optimizations to the callback receiver to further improve stdout write speed for running playbooks https://github.com/ansible/awx/pull/5677 https://github.com/ansible/awx/pull/5739
|
||||
- Updated AWX to be compatible with Helm 3.x (https://github.com/ansible/awx/pull/5776)
|
||||
- Optimized AWX's job dependency/scheduling code to drastically improve processing time in scenarios where there are many pending jobs scheduled simultaneously https://github.com/ansible/awx/issues/5154
|
||||
- Fixed a bug which could cause SCM authentication details (basic auth passwords) to be reported to external loggers in certain failure scenarios (e.g., when a git clone fails and ansible itself prints an error message to stdout) https://github.com/ansible/awx/pull/5812
|
||||
- Fixed a k8s installer bug that caused installs to fail in certain situations https://github.com/ansible/awx/issues/5574
|
||||
- Fixed a number of issues that caused analytics gathering and reporting to run more often than necessary https://github.com/ansible/awx/pull/5721
|
||||
- Fixed a bug in the AWX CLI that prevented JSON-type settings from saving properly https://github.com/ansible/awx/issues/5528
|
||||
- Improved support for fetching custom virtualenv dependencies when AWX is installed behind a proxy https://github.com/ansible/awx/pull/5805
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
- Fixed a bug that caused database migrations on Kubernetes installs to hang https://github.com/ansible/awx/pull/5579
|
||||
|
||||
@@ -2,96 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Upgrades using Django migrations are not expected to work in AWX. As a result, to upgrade to a new version, it is necessary to export resources from the old AWX node and import them into a freshly-installed node with the new version. The recommended way to do this is to use the tower-cli send/receive feature.
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
This tool does __not__ support export/import of the following:
|
||||
* Logs/history
|
||||
* Credential passwords
|
||||
* LDAP/AWX config
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
### Install & Configure Tower-CLI
|
||||
|
||||
In terminal, pip install tower-cli (if you do not have pip already, install [here](https://pip.pypa.io/en/stable/installing/)):
|
||||
```
|
||||
$ pip install --upgrade ansible-tower-cli
|
||||
```
|
||||
|
||||
The AWX host URL, user, and password must be set for the AWX instance to be exported:
|
||||
```
|
||||
$ tower-cli config host http://<old-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
```
|
||||
|
||||
For more information on installing tower-cli look [here](http://tower-cli.readthedocs.io/en/latest/quickstart.html).
|
||||
|
||||
|
||||
### Export Resources
|
||||
|
||||
Export all objects
|
||||
|
||||
```$ tower-cli receive --all > assets.json```
|
||||
|
||||
|
||||
|
||||
### Teardown Old AWX
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
### Install New AWX version
|
||||
|
||||
If you are installing AWX as a dev container, pull down the latest code or version you want from GitHub, build
|
||||
the image locally, then start the container
|
||||
|
||||
```
|
||||
git pull # retrieve latest AWX changes from repository
|
||||
make docker-compose-build # build AWX image
|
||||
make docker-compose # run container
|
||||
```
|
||||
For other install methods, refer to the [Install.md](https://github.com/ansible/awx/blob/devel/INSTALL.md).
|
||||
|
||||
|
||||
### Import Resources
|
||||
|
||||
|
||||
Configure tower-cli for your new AWX host as shown earlier. Import from a JSON file named assets.json
|
||||
|
||||
```
|
||||
$ tower-cli config host http://<new-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
$ tower-cli send assets.json
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Additional Info
|
||||
|
||||
If you have two running AWX hosts, it is possible to copy all assets from one instance to another
|
||||
|
||||
```$ tower-cli receive --tower-host old-awx-host.example.com --all | tower-cli send --tower-host new-awx-host.example.com```
|
||||
|
||||
|
||||
|
||||
#### More Granular Exports:
|
||||
|
||||
Export all credentials
|
||||
|
||||
```$ tower-cli receive --credential all > credentials.json```
|
||||
> Note: This exports the credentials with blank strings for passwords and secrets
|
||||
|
||||
Export a credential named "My Credential"
|
||||
|
||||
```$ tower-cli receive --credential "My Credential"```
|
||||
|
||||
#### More Granular Imports:
|
||||
|
||||
|
||||
You could import anything except an organization defined in a JSON file named assets.json
|
||||
|
||||
```$ tower-cli send --prevent organization assets.json```
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
|
||||
@@ -506,10 +506,6 @@ If you wish to tag and push built images to a Docker registry, set the following
|
||||
|
||||
> Username of the user that will push images to the registry. Defaults to *developer*.
|
||||
|
||||
*docker_remove_local_images*
|
||||
|
||||
> Due to the way that the docker_image module behaves, images will not be pushed to a remote repository if they are present locally. Set this to delete local versions of the images that will be pushed to the remote. This will fail if containers are currently running from those images.
|
||||
|
||||
**Note**
|
||||
|
||||
> These settings are ignored if using official images
|
||||
|
||||
28
Makefile
28
Makefile
@@ -122,7 +122,7 @@ clean-api:
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@@ -167,8 +167,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP) && \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) flit; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -403,6 +402,7 @@ prepare_collection_venv:
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
|
||||
test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -415,20 +415,26 @@ flake8_collection:
|
||||
|
||||
test_collection_all: prepare_collection_venv test_collection flake8_collection
|
||||
|
||||
test_collection_sanity:
|
||||
rm -rf sanity
|
||||
mkdir -p sanity/ansible_collections/awx
|
||||
cp -Ra awx_collection sanity/ansible_collections/awx/awx # symlinks do not work
|
||||
cd sanity/ansible_collections/awx/awx && git init && git add . # requires both this file structure and a git repo, so there you go
|
||||
cd sanity/ansible_collections/awx/awx && ansible-test sanity
|
||||
# WARNING: symlinking a collection is fundamentally unstable
|
||||
# this is for rapid development iteration with playbooks, do not use with other test targets
|
||||
symlink_collection:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE) # in case it does not exist
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-galaxy collection install awx_collection/awx-awx-$(VERSION).tar.gz
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
|
||||
@@ -9,7 +9,7 @@ from functools import reduce
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@@ -63,19 +63,19 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
def get_fields_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and also the revised lookup path
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of field at the end of the line as well as a corrected
|
||||
path, for special cases we do substitutions
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
'''
|
||||
# Store of all the fields used to detect repeats
|
||||
field_set = set([])
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
@@ -111,13 +111,24 @@ def get_field_from_path(model, path):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_set:
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_set.add(field)
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field, '__'.join(new_parts)
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
'''
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
@@ -133,7 +144,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_WHITELIST = (CharField, IntegerField, BooleanField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
@@ -147,11 +162,16 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field, new_lookup
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_text(value)
|
||||
@@ -182,7 +202,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = (not all(isinstance(f, self.NO_DUPLICATES_WHITELIST) for f in field_list))
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
@@ -211,10 +234,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
@@ -225,6 +248,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
@@ -256,9 +280,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@@ -282,7 +309,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key = self.value_to_python(queryset.model, key, value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
@@ -332,7 +361,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q = Q(**{k:v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
|
||||
@@ -192,7 +192,7 @@ class APIView(views.APIView):
|
||||
response.data['detail'] += ' To establish a login session, visit /api/login/.'
|
||||
logger.info(status_msg)
|
||||
else:
|
||||
logger.warn(status_msg)
|
||||
logger.warning(status_msg)
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework.fields import JSONField as DRFJSONField
|
||||
from rest_framework.request import clone_request
|
||||
|
||||
# AWX
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import InventorySource, NotificationTemplate
|
||||
from awx.main.scheduler.kubernetes import PodManager
|
||||
@@ -96,7 +97,15 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info['children'] = self.get_serializer_info(field)
|
||||
|
||||
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
|
||||
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
|
||||
choices = [
|
||||
(choice_value, choice_name) for choice_value, choice_name in field.choices.items()
|
||||
]
|
||||
if not any(choice in ('', None) for choice, _ in choices):
|
||||
if field.allow_blank:
|
||||
choices = [("", "---------")] + choices
|
||||
if field.allow_null and not isinstance(field, ChoiceNullField):
|
||||
choices = [(None, "---------")] + choices
|
||||
field_info['choices'] = choices
|
||||
|
||||
# Indicate if a field is write-only.
|
||||
if getattr(field, 'write_only', False):
|
||||
|
||||
@@ -98,26 +98,19 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -125,7 +118,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
|
||||
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
|
||||
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
|
||||
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
@@ -139,7 +132,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
}
|
||||
@@ -719,7 +712,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
'result_traceback', 'event_processing_finished')
|
||||
@@ -1549,20 +1542,15 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'insights_credential', 'pending_deletion',)
|
||||
'has_inventory_sources', 'total_inventory_sources',
|
||||
'inventory_sources_with_failures', 'insights_credential',
|
||||
'pending_deletion',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySerializer, self).get_related(obj)
|
||||
@@ -1612,7 +1600,7 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
})
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
raise models.base.ValidationError(str(e))
|
||||
return host_filter
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -1644,6 +1632,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
has_active_failures = serializers.SerializerMethodField()
|
||||
has_inventory_sources = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
@@ -1757,6 +1748,14 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ret['last_job_host_summary'] = None
|
||||
return ret
|
||||
|
||||
def get_has_active_failures(self, obj):
|
||||
return bool(
|
||||
obj.last_job_host_summary and obj.last_job_host_summary.failed
|
||||
)
|
||||
|
||||
def get_has_inventory_sources(self, obj):
|
||||
return obj.inventory_sources.exists()
|
||||
|
||||
|
||||
class AnsibleFactsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -1769,17 +1768,10 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['copy', 'edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = ('*', 'inventory', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources')
|
||||
fields = ('*', 'inventory', 'variables')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -2123,7 +2115,13 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') != 'scm':
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if (('source' in attrs or 'source_project' in attrs) and
|
||||
get_field_from_model_or_attrs('source_project') is None):
|
||||
raise serializers.ValidationError(
|
||||
{"source_project": _("Project required for scm type sources.")}
|
||||
)
|
||||
else:
|
||||
redundant_scm_fields = list(filter(
|
||||
lambda x: attrs.get(x, None),
|
||||
['source_project', 'source_path', 'update_on_project_update']
|
||||
@@ -2823,7 +2821,7 @@ class JobTemplateMixin(object):
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
@@ -3685,7 +3683,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3725,7 +3723,7 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
'all_parents_must_converge', 'do_not_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -3833,7 +3831,7 @@ class JobEventSerializer(BaseSerializer):
|
||||
model = JobEvent
|
||||
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
|
||||
'event_display', 'event_data', 'event_level', 'failed',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
|
||||
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity')
|
||||
|
||||
@@ -3842,13 +3840,9 @@ class JobEventSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
|
||||
))
|
||||
if obj.parent_id:
|
||||
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
|
||||
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
|
||||
if obj.host_id:
|
||||
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
|
||||
if obj.hosts.exists():
|
||||
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -4060,6 +4054,13 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
**attrs)
|
||||
self._ignored_fields = rejected
|
||||
|
||||
# Basic validation - cannot run a playbook without a playbook
|
||||
if not template.project:
|
||||
errors['project'] = _("A project is required to run a job.")
|
||||
elif template.project.status in ('error', 'failed'):
|
||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||
|
||||
# cannot run a playbook without an inventory
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
|
||||
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
|
||||
|
||||
@@ -81,7 +81,8 @@ from awx.main.utils import (
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
@@ -204,20 +205,15 @@ class DashboardView(APIView):
|
||||
'failed': ec2_inventory_failed.count()}
|
||||
|
||||
user_groups = get_user_queryset(request.user, models.Group)
|
||||
groups_job_failed = (
|
||||
models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0)
|
||||
).count()
|
||||
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
|
||||
data['groups'] = {'url': reverse('api:group_list', request=request),
|
||||
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
|
||||
'total': user_groups.count(),
|
||||
'job_failed': groups_job_failed,
|
||||
'inventory_failed': groups_inventory_failed}
|
||||
|
||||
user_hosts = get_user_queryset(request.user, models.Host)
|
||||
user_hosts_failed = user_hosts.filter(has_active_failures=True)
|
||||
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
|
||||
data['hosts'] = {'url': reverse('api:host_list', request=request),
|
||||
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
|
||||
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
|
||||
'total': user_hosts.count(),
|
||||
'failed': user_hosts_failed.count()}
|
||||
|
||||
@@ -1611,7 +1607,8 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -2150,7 +2147,7 @@ class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2177,7 +2174,7 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -3268,7 +3265,7 @@ class WorkflowJobRelaunch(GenericAPIView):
|
||||
jt = obj.job_template
|
||||
if not jt:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
elif not jt.inventory or min(jt.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
@@ -3819,6 +3816,12 @@ class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
@@ -3841,8 +3844,7 @@ class HostJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
parent_obj = self.get_parent_object()
|
||||
self.check_parent_access(parent_obj)
|
||||
qs = self.request.user.get_queryset(self.model).filter(
|
||||
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3858,9 +3860,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events
|
||||
qs = qs.select_related('host')
|
||||
qs = qs.prefetch_related('hosts', 'children')
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
|
||||
|
||||
@@ -4303,7 +4303,7 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
if obj.notification_type in ('email', 'pagerduty'):
|
||||
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
elif obj.notification_type == 'webhook':
|
||||
elif obj.notification_type in ('webhook', 'grafana'):
|
||||
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
else:
|
||||
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@@ -37,6 +38,7 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -190,7 +192,8 @@ class ApiV2SubscriptionView(APIView):
|
||||
data['rh_password'] = settings.REDHAT_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('rh_username'), data.get('rh_password')
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.REDHAT_USERNAME = data['rh_username']
|
||||
if pw:
|
||||
@@ -202,10 +205,15 @@ class ApiV2SubscriptionView(APIView):
|
||||
getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
if isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
elif isinstance(exc, requests.exceptions.ConnectionError):
|
||||
msg = _("Could not connect to subscription service.")
|
||||
elif isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
msg = exc.args[0]
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
else:
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(validated)
|
||||
@@ -302,7 +310,8 @@ class ApiV2ConfigView(APIView):
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
if not settings_registry.is_setting_read_only('TOWER_URL_BASE'):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField,
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -28,8 +25,6 @@ from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
|
||||
import cachetools
|
||||
|
||||
# FIXME: Gracefully handle when settings are accessed before the database is
|
||||
# ready (or during migrations).
|
||||
|
||||
@@ -91,42 +86,11 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -138,12 +102,13 @@ def filter_sensitive(registry, key, value):
|
||||
return value
|
||||
|
||||
|
||||
# settings.__getattr__ is called *constantly*, and the LOG_AGGREGATOR_ ones are
|
||||
# so ubiquitous when external logging is enabled that they should kept in memory
|
||||
# with a short TTL to avoid even having to contact memcached
|
||||
# the primary use case for this optimization is the callback receiver
|
||||
# when external logging is enabled
|
||||
LOGGING_SETTINGS_CACHE = cachetools.TTLCache(maxsize=50, ttl=1)
|
||||
class TransientSetting(object):
|
||||
|
||||
__slots__ = ('pk', 'value')
|
||||
|
||||
def __init__(self, pk, value):
|
||||
self.pk = pk
|
||||
self.value = value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
@@ -173,7 +138,6 @@ class EncryptedCacheProxy(object):
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
@@ -196,8 +160,6 @@ class EncryptedCacheProxy(object):
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
@@ -447,17 +409,11 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
cached = LOGGING_SETTINGS_CACHE.get(name)
|
||||
if cached:
|
||||
return cached
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
if name.startswith('LOG_AGGREGATOR_'):
|
||||
LOGGING_SETTINGS_CACHE[name] = value
|
||||
return value
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -307,7 +307,7 @@ class BaseAccess(object):
|
||||
|
||||
return True # User has access to both, permission check passed
|
||||
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True):
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True, quiet=False):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
@@ -317,8 +317,10 @@ class BaseAccess(object):
|
||||
validation_info['time_remaining'] = 99999999
|
||||
validation_info['grace_period_remaining'] = 99999999
|
||||
|
||||
report_violation = lambda message: logger.error(message)
|
||||
|
||||
if quiet:
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if (
|
||||
validation_info.get('trial', False) is True or
|
||||
validation_info['instance_count'] == 10 # basic 10 license
|
||||
@@ -907,7 +909,7 @@ class HostAccess(BaseAccess):
|
||||
model = Host
|
||||
select_related = ('created_by', 'modified_by', 'inventory',
|
||||
'last_job__job_template', 'last_job_host_summary__job',)
|
||||
prefetch_related = ('groups',)
|
||||
prefetch_related = ('groups', 'inventory_sources')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -2238,7 +2240,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
prefetch_related = ('job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -2427,6 +2429,9 @@ class ScheduleAccess(BaseAccess):
|
||||
def can_add(self, data):
|
||||
if not JobLaunchConfigAccess(self.user).can_add(data):
|
||||
return False
|
||||
if not data:
|
||||
return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists()
|
||||
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
|
||||
|
||||
@check_superuser
|
||||
|
||||
@@ -31,7 +31,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
'''
|
||||
|
||||
|
||||
@register('config', '1.0')
|
||||
@register('config', '1.1')
|
||||
def config(since):
|
||||
license_info = get_license(show_key=False)
|
||||
install_type = 'traditional'
|
||||
@@ -53,6 +53,7 @@ def config(since):
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
|
||||
@@ -15,7 +15,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
@@ -169,12 +169,13 @@ def ship(path):
|
||||
s = requests.Session()
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
if response.status_code != 202:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
|
||||
@@ -616,6 +616,18 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_FORKS',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
@@ -787,6 +799,28 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
@@ -811,10 +845,7 @@ def galaxy_validate(serializer, attrs):
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
@@ -823,10 +854,22 @@ def galaxy_validate(serializer, attrs):
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
errors = {}
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
|
||||
@@ -43,7 +43,7 @@ aim_inputs = {
|
||||
'id': 'object_query',
|
||||
'label': _('Object Query'),
|
||||
'type': 'string',
|
||||
'help_text': _('Lookup query for the object. Ex: "Safe=TestSafe;Object=testAccountName123"'),
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
}, {
|
||||
'id': 'object_query_format',
|
||||
'label': _('Object Query Format'),
|
||||
|
||||
@@ -3,6 +3,16 @@ from .plugin import CredentialPlugin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
clouds = [
|
||||
vars(azure_cloud)[n]
|
||||
for n in dir(azure_cloud)
|
||||
if n.startswith("AZURE_") and n.endswith("_CLOUD")
|
||||
]
|
||||
default_cloud = vars(azure_cloud)["AZURE_PUBLIC_CLOUD"]
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
@@ -24,6 +34,12 @@ azure_keyvault_inputs = {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_name',
|
||||
'label': _('Cloud Environment'),
|
||||
'help_text': _('Specify which azure cloud environment to use.'),
|
||||
'choices': list(set([default_cloud.name] + [c.name for c in clouds])),
|
||||
'default': default_cloud.name
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
@@ -42,6 +58,7 @@ azure_keyvault_inputs = {
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
@@ -49,7 +66,7 @@ def azure_keyvault_backend(**kwargs):
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
resource = f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
56
awx/main/dispatch/periodic.py
Normal file
56
awx/main/dispatch/periodic.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warn(f'periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
|
||||
process = Process(target=run)
|
||||
process.daemon = True
|
||||
process.start()
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
scheduler.run_continuously()
|
||||
@@ -1,7 +1,9 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -72,9 +74,6 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
@@ -247,7 +246,7 @@ class WorkerPool(object):
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% if "age" in task %}for: {{ "%.1f" % task["age"] }}s {% endif %}{% else %}queued {% endif %}'
|
||||
'{{ task["uuid"] }} '
|
||||
'{% if "task" in task %}'
|
||||
'{{ task["task"].rsplit(".", 1)[-1] }}'
|
||||
@@ -368,6 +367,26 @@ class AutoscalePool(WorkerPool):
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
# if we discover a task manager invocation that's been running
|
||||
# too long, reap it (because otherwise it'll just hold the postgres
|
||||
# advisory lock forever); the goal of this code is to discover
|
||||
# deadlocks or other serious issues in the task manager that cause
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[
|
||||
current_task['uuid']
|
||||
]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(
|
||||
f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}'
|
||||
) # noqa
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
|
||||
@@ -61,7 +61,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
logger.warn('Consumer received control message {}'.format(body))
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
@@ -148,7 +148,6 @@ class BaseWorker(object):
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
@@ -32,6 +37,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
@@ -42,6 +48,26 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
|
||||
self.prof = False
|
||||
logger.error(f'profiling is disabled, wrote {filepath}')
|
||||
else:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
logger.error('profiling is enabled')
|
||||
|
||||
def work_loop(self, *args, **kw):
|
||||
if settings.AWX_CALLBACK_PROFILE:
|
||||
signal.signal(signal.SIGUSR1, self.toggle_profiling)
|
||||
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
|
||||
@@ -370,33 +370,32 @@ class IsolatedManager(object):
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
@@ -9,6 +9,13 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
@@ -17,19 +24,15 @@ class Command(BaseCommand):
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
lines.append(relation)
|
||||
for label, interval in (
|
||||
('last minute: ', '1 minute'),
|
||||
('last 5 minutes:', '5 minutes'),
|
||||
('last hour: ', '1 hour'),
|
||||
):
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE modified > now() - '{interval}'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ {label} {events}')
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
if clear:
|
||||
for i in range(20):
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
|
||||
@@ -16,13 +16,10 @@ from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -921,11 +921,14 @@ class Command(BaseCommand):
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
if license_info.get('trial', False) is True:
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
@@ -938,15 +941,11 @@ class Command(BaseCommand):
|
||||
'new_count': new_count,
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
if (
|
||||
license_info.get('trial', False) is True or
|
||||
license_info['instance_count'] == 10 # basic 10 license
|
||||
):
|
||||
raise CommandError('License count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
@@ -1007,12 +1006,6 @@ class Command(BaseCommand):
|
||||
except re.error:
|
||||
raise CommandError('invalid regular expression for --host-filter')
|
||||
|
||||
'''
|
||||
TODO: Remove this deprecation when we remove support for rax.py
|
||||
'''
|
||||
if self.source == "rax.py":
|
||||
logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
|
||||
begin = time.time()
|
||||
self.load_inventory_from_database()
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.db import connection as django_connection
|
||||
from kombu import Exchange, Queue
|
||||
|
||||
from awx.main.utils.handlers import AWXProxyHandler
|
||||
@@ -16,6 +14,7 @@ from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -36,71 +35,6 @@ class Command(BaseCommand):
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
sched_file = '/var/lib/awx/beat.db'
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
|
||||
# celery in py3 seems to have a bug where the celerybeat schedule
|
||||
# shelve can become corrupted; we've _only_ seen this in Ubuntu and py36
|
||||
# it can be avoided by detecting and removing the corrupted file
|
||||
# at some point, we'll just stop using celerybeat, because it's clearly
|
||||
# buggy, too -_-
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4777
|
||||
sched = AWXScheduler(schedule_filename=sched_file, app=app)
|
||||
try:
|
||||
sched.setup_schedule()
|
||||
except Exception:
|
||||
logger.exception('{} is corrupted, removing.'.format(sched_file))
|
||||
sched._remove_db()
|
||||
finally:
|
||||
try:
|
||||
sched.close()
|
||||
except Exception:
|
||||
logger.exception('{} failed to sync/close'.format(sched_file))
|
||||
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule=sched_file, scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
@@ -116,9 +50,10 @@ class Command(BaseCommand):
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
|
||||
@@ -78,8 +78,7 @@ class HostManager(models.Manager):
|
||||
self.core_filters = {}
|
||||
|
||||
qs = qs & q
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
return qs.order_by('name', 'pk').distinct('name')
|
||||
return qs
|
||||
|
||||
|
||||
|
||||
@@ -3,15 +3,17 @@ from uuid import uuid4
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.models import Instance
|
||||
|
||||
|
||||
def _generate_new_uuid_for_iso_nodes(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
for instance in Instance.objects.all():
|
||||
if instance.is_isolated():
|
||||
# The below code is a copy paste of instance.is_isolated()
|
||||
# We can't call is_isolated because we are using the "old" version
|
||||
# of the Instance definition.
|
||||
if instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
instance.uuid = str(uuid4())
|
||||
instance.save()
|
||||
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
|
||||
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-25 20:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0101_v370_generate_new_uuids_for_iso_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='canceled_on',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False, help_text='The date and time when the cancel request was sent.', null=True),
|
||||
),
|
||||
]
|
||||
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-21 17:35
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0102_v370_unifiedjob_canceled'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='hosts_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_groups',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_hosts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobhostsummary',
|
||||
name='failed',
|
||||
field=models.BooleanField(db_index=True, default=False, editable=False),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 20:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def cleanup_scan_jts(apps, schema_editor):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
JobTemplate.objects.filter(job_type='scan').update(job_type='run')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0103_v370_remove_computed_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(cleanup_scan_jts),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(choices=[('run', 'Run'), ('check', 'Check')], default='run', max_length=64),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 18:01
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0104_v370_cleanup_old_scan_jts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='parent',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='hosts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-27 12:39
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0105_v370_remove_jobevent_parent_and_hosts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.4 on 2020-01-08 22:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0106_v370_remove_inventory_groups_with_active_failures'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-06 16:43
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0107_v370_workflow_convergence_api_toggle'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='dependencies_processed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
|
||||
),
|
||||
]
|
||||
@@ -1136,7 +1136,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.')
|
||||
},{
|
||||
'id': 'bearer_token',
|
||||
'label': ugettext_noop('API authentication bearer token.'),
|
||||
'label': ugettext_noop('API authentication bearer token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},{
|
||||
|
||||
@@ -360,11 +360,10 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
if isinstance(self, JobEvent):
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
@@ -450,19 +449,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
@@ -617,6 +603,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
kwargs.pop('workflow_job_id', None)
|
||||
event = cls(**kwargs)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -123,12 +122,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of groups in this inventory.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of groups in this inventory with active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -339,139 +332,17 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
return data
|
||||
|
||||
def update_host_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all hosts in this inventory.
|
||||
'''
|
||||
hosts_to_update = {}
|
||||
hosts_qs = self.hosts
|
||||
# Define queryset of all hosts with active failures.
|
||||
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_active_failures flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = True
|
||||
# Find all hosts that need the has_active_failures flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = False
|
||||
# Define queryset of all hosts with cloud inventory sources.
|
||||
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_inventory_sources flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = True
|
||||
# Find all hosts that need the has_inventory_sources flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = False
|
||||
# Now apply updates to hosts where needed (in batches).
|
||||
all_update_pks = list(hosts_to_update.keys())
|
||||
|
||||
def _chunk(items, chunk_size):
|
||||
for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size):
|
||||
yield (g[1] for g in group)
|
||||
|
||||
for update_pks in _chunk(all_update_pks, 500):
|
||||
for host in hosts_qs.filter(pk__in=update_pks):
|
||||
host_updates = hosts_to_update[host.pk]
|
||||
for field, value in host_updates.items():
|
||||
setattr(host, field, value)
|
||||
host.save(update_fields=host_updates.keys())
|
||||
|
||||
def update_group_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all active groups in this inventory.
|
||||
'''
|
||||
group_children_map = self.get_group_children_map()
|
||||
group_hosts_map = self.get_group_hosts_map()
|
||||
active_host_pks = set(self.hosts.values_list('pk', flat=True))
|
||||
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
|
||||
# active_group_pks = set(self.groups.values_list('pk', flat=True))
|
||||
failed_group_pks = set() # Update below as we check each group.
|
||||
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
|
||||
groups_to_update = {}
|
||||
|
||||
# Build list of group pks to check, starting with the groups at the
|
||||
# deepest level within the tree.
|
||||
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
|
||||
group_depths = {} # pk: max_depth
|
||||
|
||||
def update_group_depths(group_pk, current_depth=0):
|
||||
max_depth = group_depths.get(group_pk, -1)
|
||||
# Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000).
|
||||
if current_depth > 100:
|
||||
return
|
||||
if current_depth > max_depth:
|
||||
group_depths[group_pk] = current_depth
|
||||
for child_pk in group_children_map.get(group_pk, set()):
|
||||
update_group_depths(child_pk, current_depth + 1)
|
||||
for group_pk in root_group_pks:
|
||||
update_group_depths(group_pk)
|
||||
group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)]
|
||||
|
||||
for group_pk in group_pks_to_check:
|
||||
# Get all children and host pks for this group.
|
||||
parent_pks_to_check = set([group_pk])
|
||||
parent_pks_checked = set()
|
||||
child_pks = set()
|
||||
host_pks = set()
|
||||
while parent_pks_to_check:
|
||||
for parent_pk in list(parent_pks_to_check):
|
||||
c_ids = group_children_map.get(parent_pk, set())
|
||||
child_pks.update(c_ids)
|
||||
parent_pks_to_check.remove(parent_pk)
|
||||
parent_pks_checked.add(parent_pk)
|
||||
parent_pks_to_check.update(c_ids - parent_pks_checked)
|
||||
h_ids = group_hosts_map.get(parent_pk, set())
|
||||
host_pks.update(h_ids)
|
||||
# Define updates needed for this group.
|
||||
group_updates = groups_to_update.setdefault(group_pk, {})
|
||||
group_updates.update({
|
||||
'total_hosts': len(active_host_pks & host_pks),
|
||||
'has_active_failures': bool(failed_host_pks & host_pks),
|
||||
'hosts_with_active_failures': len(failed_host_pks & host_pks),
|
||||
'total_groups': len(child_pks),
|
||||
'groups_with_active_failures': len(failed_group_pks & child_pks),
|
||||
'has_inventory_sources': bool(group_pk in groups_with_cloud_pks),
|
||||
})
|
||||
if group_updates['has_active_failures']:
|
||||
failed_group_pks.add(group_pk)
|
||||
|
||||
# Now apply updates to each group as needed (in batches).
|
||||
all_update_pks = list(groups_to_update.keys())
|
||||
for offset in range(0, len(all_update_pks), 500):
|
||||
update_pks = all_update_pks[offset:(offset + 500)]
|
||||
for group in self.groups.filter(pk__in=update_pks):
|
||||
group_updates = groups_to_update[group.pk]
|
||||
for field, value in list(group_updates.items()):
|
||||
if getattr(group, field) != value:
|
||||
setattr(group, field, value)
|
||||
else:
|
||||
group_updates.pop(field)
|
||||
if group_updates:
|
||||
group.save(update_fields=group_updates.keys())
|
||||
|
||||
def update_computed_fields(self, update_groups=True, update_hosts=True):
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk))
|
||||
start_time = time.time()
|
||||
if update_hosts:
|
||||
self.update_host_computed_fields()
|
||||
if update_groups:
|
||||
self.update_group_computed_fields()
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
@@ -482,7 +353,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'total_hosts': active_hosts.count(),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
'total_inventory_sources': active_inventory_sources.count(),
|
||||
'inventory_sources_with_failures': failed_inventory_sources.count(),
|
||||
@@ -545,7 +415,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.update_computed_fields()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -631,18 +501,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether the last job failed for this host.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this host was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='hosts',
|
||||
@@ -673,34 +531,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def update_computed_fields(self, update_inventory=True, update_groups=True):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
has_active_failures = bool(self.last_job_host_summary and
|
||||
self.last_job_host_summary.failed)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'has_active_failures': has_active_failures,
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
# Groups and inventory may also need to be updated when host fields
|
||||
# change.
|
||||
# NOTE: I think this is no longer needed
|
||||
# if update_groups:
|
||||
# for group in self.all_groups:
|
||||
# group.update_computed_fields()
|
||||
# if update_inventory:
|
||||
# self.inventory.update_computed_fields(update_groups=False,
|
||||
# update_hosts=False)
|
||||
# Rebuild summary fields cache
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
@property
|
||||
@@ -815,42 +645,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
blank=True,
|
||||
help_text=_('Hosts associated directly with this group.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of hosts directly or indirectly in this group.'),
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group has any hosts with active failures.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of hosts in this group with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of child groups contained within this group.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of child groups within this group that have active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='groups',
|
||||
@@ -925,32 +719,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
mark_actual()
|
||||
activity_stream_delete(None, self)
|
||||
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
active_hosts = self.all_hosts
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.all_children
|
||||
# FIXME: May not be accurate unless we always update groups depth-first.
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'total_hosts': active_hosts.count(),
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
def get_all_parents(self, except_pks=None):
|
||||
@@ -1556,7 +1324,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.inventory.update_computed_fields()
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.source:
|
||||
@@ -2616,6 +2384,9 @@ class satellite6(PluginFileInjector):
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
@@ -2625,6 +2396,12 @@ class satellite6(PluginFileInjector):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
@@ -2636,9 +2413,11 @@ class satellite6(PluginFileInjector):
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
|
||||
@@ -13,6 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -28,7 +29,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search, accepts_json,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
@@ -204,6 +205,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
default='run',
|
||||
)
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -293,6 +299,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def resources_needed_to_start(self):
|
||||
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
|
||||
|
||||
def clean_forks(self):
|
||||
if settings.MAX_FORKS > 0 and self.forks > settings.MAX_FORKS:
|
||||
raise ValidationError(_(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.'))
|
||||
return self.forks
|
||||
|
||||
def create_job(self, **kwargs):
|
||||
'''
|
||||
Create a new job based on this template.
|
||||
@@ -818,8 +829,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
ansible_local_system_id = ansible_facts.get('ansible_local', {}).get('insights', {}).get('system_id', None)
|
||||
ansible_facts_system_id = ansible_facts.get('insights', {}).get('system_id', None)
|
||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
||||
ansible_facts = ansible_facts.get('insights', {})
|
||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
||||
if ansible_local_system_id:
|
||||
print("Setting local {}".format(ansible_local_system_id))
|
||||
logger.debug("Insights system_id {} found for host <{}, {}> in"
|
||||
@@ -1060,7 +1073,7 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False, db_index=True)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
@@ -1095,7 +1108,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
|
||||
@@ -270,21 +270,19 @@ class JobNotificationMixin(object):
|
||||
'elapsed', 'job_explanation', 'execution_node', 'controller_node', 'allow_simultaneous',
|
||||
'scm_revision', 'diff_mode', 'job_slice_number', 'job_slice_count', 'custom_virtualenv',
|
||||
'approval_status', 'approval_node_name', 'workflow_url',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failures', 'dark']},
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark'
|
||||
'processed', 'rescued', 'ignored']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
{'project_update': ['id', 'name', 'description', 'status', 'failed']},
|
||||
{'job_template': ['id', 'name', 'description']},
|
||||
{'unified_job_template': ['id', 'name', 'description', 'unified_job_type']},
|
||||
{'instance_group': ['name', 'id']},
|
||||
{'created_by': ['id', 'username', 'first_name', 'last_name']},
|
||||
{'labels': ['count', 'results']},
|
||||
{'source_workflow_job': ['description', 'elapsed', 'failed', 'id', 'name', 'status']}]}]
|
||||
{'labels': ['count', 'results']}]}]
|
||||
|
||||
@classmethod
|
||||
def context_stub(cls):
|
||||
@@ -303,7 +301,7 @@ class JobNotificationMixin(object):
|
||||
'finished': False,
|
||||
'force_handlers': False,
|
||||
'forks': 0,
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0},
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0, 'failed': False, 'processed': 0, 'rescued': 0},
|
||||
'id': 42,
|
||||
'job_explanation': 'Sample job explanation',
|
||||
'job_slice_count': 1,
|
||||
@@ -314,7 +312,6 @@ class JobNotificationMixin(object):
|
||||
'limit': 'bar_limit',
|
||||
'modified': datetime.datetime(2018, 12, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc),
|
||||
'name': 'Stub JobTemplate',
|
||||
'playbook_counts': {'play_count': 5, 'task_count': 10},
|
||||
'playbook': 'ping.yml',
|
||||
'scm_revision': '',
|
||||
'skip_tags': '',
|
||||
@@ -327,7 +324,6 @@ class JobNotificationMixin(object):
|
||||
'username': 'admin'},
|
||||
'instance_group': {'id': 1, 'name': 'tower'},
|
||||
'inventory': {'description': 'Sample inventory description',
|
||||
'groups_with_active_failures': 0,
|
||||
'has_active_failures': False,
|
||||
'has_inventory_sources': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
@@ -348,18 +344,10 @@ class JobNotificationMixin(object):
|
||||
'name': 'Stub project',
|
||||
'scm_type': 'git',
|
||||
'status': 'successful'},
|
||||
'project_update': {'id': 5, 'name': 'Stub Project Update', 'description': 'Project Update',
|
||||
'status': 'running', 'failed': False},
|
||||
'unified_job_template': {'description': 'Sample unified job template description',
|
||||
'id': 39,
|
||||
'name': 'Stub Job Template',
|
||||
'unified_job_type': 'job'},
|
||||
'source_workflow_job': {'description': 'Sample workflow job description',
|
||||
'elapsed': 0.000,
|
||||
'failed': False,
|
||||
'id': 88,
|
||||
'name': 'Stub WorkflowJobTemplate',
|
||||
'status': 'running'}},
|
||||
'unified_job_type': 'job'}},
|
||||
'timeout': 0,
|
||||
'type': 'job',
|
||||
'url': '/api/v2/jobs/13/',
|
||||
@@ -393,10 +381,20 @@ class JobNotificationMixin(object):
|
||||
The context will contain whitelisted content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_WHITELIST), the job's friendly name,
|
||||
and a url to the job run."""
|
||||
context = {'job': {},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)}
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
if hasattr(self, 'job_host_summaries'):
|
||||
summary = self.job_host_summaries.first()
|
||||
if summary:
|
||||
from awx.api.serializers import JobHostSummarySerializer
|
||||
summary_data = JobHostSummarySerializer(summary).to_representation(summary)
|
||||
job_context['host_status_counts'] = summary_data
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)
|
||||
}
|
||||
|
||||
def build_context(node, fields, whitelisted_fields):
|
||||
for safe_field in whitelisted_fields:
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Python
|
||||
from io import StringIO
|
||||
import datetime
|
||||
import codecs
|
||||
import json
|
||||
import logging
|
||||
@@ -623,6 +624,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The date and time the job was queued for starting."),
|
||||
)
|
||||
dependencies_processed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
|
||||
)
|
||||
finished = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
@@ -630,6 +636,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
help_text=_("The date and time the job finished execution."),
|
||||
db_index=True,
|
||||
)
|
||||
canceled_on = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("The date and time when the cancel request was sent."),
|
||||
db_index=True,
|
||||
)
|
||||
elapsed = models.DecimalField(
|
||||
max_digits=12,
|
||||
decimal_places=3,
|
||||
@@ -833,7 +846,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
if 'canceled_on' not in update_fields:
|
||||
update_fields.append('canceled_on')
|
||||
# Okay; we're done. Perform the actual save.
|
||||
result = super(UnifiedJob, self).save(*args, **kwargs)
|
||||
|
||||
@@ -997,6 +1015,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
dir=settings.JOBOUTPUT_ROOT,
|
||||
encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
@@ -1199,12 +1219,17 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
status_data['instance_group_name'] = self.instance_group.name
|
||||
else:
|
||||
status_data['instance_group_name'] = None
|
||||
elif status in ['successful', 'failed', 'canceled'] and self.finished:
|
||||
status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
status_data.update(self.websocket_emit_data())
|
||||
status_data['group_name'] = 'jobs'
|
||||
if getattr(self, 'unified_job_template_id', None):
|
||||
status_data['unified_job_template_id'] = self.unified_job_template_id
|
||||
emit_channel_notification('jobs-status_changed', status_data)
|
||||
|
||||
if self.spawned_by_workflow:
|
||||
status_data['group_name'] = "workflow_events"
|
||||
status_data['workflow_job_template_id'] = self.unified_job_template.id
|
||||
emit_channel_notification('workflow_events-' + str(self.workflow_job_id), status_data)
|
||||
except IOError: # includes socket errors
|
||||
logger.exception('%s failed to emit channel msg about status change', self.log_format)
|
||||
|
||||
@@ -79,6 +79,11 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
symmetrical=False,
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("If enabled then the node will only run if all of the parent nodes "
|
||||
"have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
related_name='%(class)ss',
|
||||
@@ -102,7 +107,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
'''
|
||||
return ['workflow_job', 'unified_job_template',
|
||||
'extra_data', 'survey_passwords',
|
||||
'inventory', 'credentials', 'char_prompts']
|
||||
'inventory', 'credentials', 'char_prompts', 'all_parents_must_converge']
|
||||
|
||||
def create_workflow_job_node(self, **kwargs):
|
||||
'''
|
||||
@@ -130,7 +135,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts'
|
||||
'char_prompts', 'all_parents_must_converge'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
@@ -745,6 +750,8 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
def signal_start(self, **kwargs):
|
||||
can_start = super(WorkflowApproval, self).signal_start(**kwargs)
|
||||
self.send_approval_notification('running')
|
||||
self.started = self.created
|
||||
self.save(update_fields=['started'])
|
||||
return can_start
|
||||
|
||||
def send_approval_notification(self, approval_status):
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
import dateutil.parser as dp
|
||||
@@ -23,6 +24,33 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
recipient_parameter = "grafana_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {
|
||||
"started": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"success": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"error": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"workflow_approval": {
|
||||
"running": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG, "body": None
|
||||
},
|
||||
"approved": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG, "body": None
|
||||
},
|
||||
"timed_out": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None
|
||||
},
|
||||
"denied": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG, "body": None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
@@ -34,6 +62,13 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
self.isRegion = isRegion
|
||||
|
||||
def format_body(self, body):
|
||||
# expect body to be a string representing a dict
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
body = {}
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
@@ -41,14 +76,16 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
for m in messages:
|
||||
grafana_data = {}
|
||||
grafana_headers = {}
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if 'started' in m.body:
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = grafana_data['timeEnd'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
if m.body.get('finished'):
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
grafana_data['isRegion'] = self.isRegion
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
grafana_data['panelId'] = self.panelId
|
||||
|
||||
@@ -8,7 +8,7 @@ REPLACE_STR = '$encrypted$'
|
||||
|
||||
class UriCleaner(object):
|
||||
REPLACE_STR = REPLACE_STR
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w+:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w{1,20}:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
|
||||
@staticmethod
|
||||
def remove_sensitive(cleartext):
|
||||
|
||||
@@ -89,8 +89,8 @@ class SimpleDAG(object):
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for label, edges in self.node_from_edges_by_label.items():
|
||||
for from_node, to_nodes in edges.items():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
@@ -140,36 +140,36 @@ class SimpleDAG(object):
|
||||
def find_ord(self, obj):
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
def _get_children_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
def get_children(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependencies_by_label(this_ord, l))
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
def get_parents(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependents_by_label(this_ord, l))
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
@@ -188,7 +188,7 @@ class SimpleDAG(object):
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children = [node['node_object'] for node in self.get_children(node_obj)]
|
||||
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
|
||||
|
||||
if children_to_add:
|
||||
@@ -212,7 +212,7 @@ class SimpleDAG(object):
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
for child in self.get_children(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
@@ -55,7 +55,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
@@ -69,33 +69,55 @@ class WorkflowDAG(SimpleDAG):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _all_parents_met_convergence_criteria(self, node):
|
||||
# This function takes any node and checks that all it's parents have met their criteria to run the child.
|
||||
# This returns a boolean and is really only useful if the node is an ALL convergence node and is
|
||||
# intended to be used in conjuction with the node property `all_parents_must_converge`
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
#node has a status
|
||||
if p.job and p.job.status in ["successful", "failed"]:
|
||||
if p.job and p.job.status == "successful":
|
||||
status = "success_nodes"
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)] and
|
||||
p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
# This catches ALL convergence nodes
|
||||
elif obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
if self._all_parents_met_convergence_criteria(n):
|
||||
nodes_found.append(n)
|
||||
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
@@ -135,8 +157,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if (len(self.get_children(obj, 'failure_nodes')) +
|
||||
len(self.get_children(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
@@ -145,8 +167,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
failed_path_nodes_id_status.append((str(obj.id), obj.job.status))
|
||||
|
||||
if res is True:
|
||||
s = _("No error handle path for workflow job node(s) [{node_status}] workflow job "
|
||||
"node(s) missing unified job template and error handle path [{no_ufjt}].")
|
||||
s = _("No error handling path for workflow job node(s) [{node_status}]. Workflow job "
|
||||
"node(s) missing unified job template and error handling path [{no_ufjt}].")
|
||||
parms = {
|
||||
'node_status': '',
|
||||
'no_ufjt': '',
|
||||
@@ -190,35 +212,48 @@ class WorkflowDAG(SimpleDAG):
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'success_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
elif not p.do_not_run and p.unified_job_template is None:
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
'''
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
if not obj.do_not_run and not obj.job and node not in root_nodes:
|
||||
if obj.all_parents_must_converge:
|
||||
if any(p.do_not_run for p in parent_nodes) or not self._all_parents_met_convergence_criteria(node):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
else:
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -23,6 +23,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
@@ -74,21 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
@@ -200,9 +186,6 @@ class TaskManager():
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
@@ -364,10 +347,6 @@ class TaskManager():
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
@@ -393,8 +372,6 @@ class TaskManager():
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
@@ -426,18 +403,21 @@ class TaskManager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
@@ -452,56 +432,20 @@ class TaskManager():
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
if not rampart_group.is_containerized:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = [t for t in dependency_tasks if t != task]
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.filter(pk__in = [task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
@@ -574,13 +518,6 @@ class TaskManager():
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
@@ -598,6 +535,9 @@ class TaskManager():
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
|
||||
@@ -10,6 +10,7 @@ import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
pre_save,
|
||||
@@ -71,41 +72,6 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
logger.debug("In update inventory computed fields")
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
instance = kwargs['instance']
|
||||
if sender == Group.hosts.through:
|
||||
sender_name = 'group.hosts'
|
||||
elif sender == Group.parents.through:
|
||||
sender_name = 'group.parents'
|
||||
elif sender == Host.inventory_sources.through:
|
||||
sender_name = 'host.inventory_sources'
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
sender_action = 'saved'
|
||||
elif kwargs['signal'] == post_delete:
|
||||
sender_action = 'deleted'
|
||||
elif kwargs['signal'] == m2m_changed and kwargs['action'] in ('post_add', 'post_remove', 'post_clear'):
|
||||
sender_action = 'changed'
|
||||
else:
|
||||
return
|
||||
logger.debug('%s %s, updating inventory computed fields: %r %r',
|
||||
sender_name, sender_action, sender, kwargs)
|
||||
try:
|
||||
inventory = instance.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
@@ -124,7 +90,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
connection.on_commit(
|
||||
lambda: update_inventory_computed_fields.delay(inventory.id)
|
||||
)
|
||||
|
||||
|
||||
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
|
||||
@@ -207,10 +175,6 @@ def connect_computed_field_signals():
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -347,10 +311,6 @@ def disable_computed_fields():
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
|
||||
@@ -52,6 +52,7 @@ import ansible_runner
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
@@ -337,17 +338,31 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task()
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.debug('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
else:
|
||||
last_time = None
|
||||
gather_time = now()
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
with advisory_lock('gather_analytics_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -499,7 +514,7 @@ def awx_periodic_scheduler():
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
@@ -569,7 +584,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag:
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
@@ -588,7 +603,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
@@ -599,7 +614,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
@@ -642,7 +657,7 @@ def update_host_smart_inventory_memberships():
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1130,6 +1145,23 @@ class BaseTask(object):
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
should_write_event = False
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
@@ -1148,7 +1180,11 @@ class BaseTask(object):
|
||||
'''
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
'''
|
||||
self.instance = self.update_model(self.instance.pk)
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
@@ -1656,8 +1692,12 @@ class RunJob(BaseTask):
|
||||
args.append('--vault-id')
|
||||
args.append('{}@prompt'.format(vault_id))
|
||||
|
||||
if job.forks: # FIXME: Max limit?
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.forks:
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
if job.limit:
|
||||
@@ -1769,7 +1809,7 @@ class RunJob(BaseTask):
|
||||
current_revision = git_repo.head.commit.hexsha
|
||||
if desired_revision == current_revision:
|
||||
job_revision = desired_revision
|
||||
logger.info('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
else:
|
||||
sync_needs = all_sync_needs
|
||||
except (ValueError, BadGitName):
|
||||
@@ -1868,7 +1908,8 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1977,8 +2018,9 @@ class RunProjectUpdate(BaseTask):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2851,4 +2893,4 @@ def deep_copy_model_obj(
|
||||
), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id, True)
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
@@ -10,6 +10,8 @@ group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
|
||||
@@ -2,6 +2,9 @@ from django.db import connection
|
||||
from django.db.models.signals import post_migrate
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from unittest import mock
|
||||
|
||||
import contextlib
|
||||
|
||||
|
||||
def app_post_migration(sender, app_config, **kwargs):
|
||||
@@ -23,3 +26,13 @@ if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
@@ -599,9 +599,9 @@ class TestControlledBySCM:
|
||||
delete(inv_src.get_absolute_url(), admin_user, expect=204)
|
||||
assert scm_inventory.inventory_sources.count() == 0
|
||||
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, admin_user):
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, project, admin_user):
|
||||
post(reverse('api:inventory_inventory_sources_list', kwargs={'pk': scm_inventory.id}),
|
||||
{'name': 'new inv src', 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
{'name': 'new inv src', 'source_project': project.pk, 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
admin_user, expect=201)
|
||||
|
||||
def test_adding_inv_src_prohibited(self, post, scm_inventory, project, admin_user):
|
||||
|
||||
@@ -153,7 +153,8 @@ def test_summary_fields_recent_jobs(job_template, admin_user, get):
|
||||
'id': job.id,
|
||||
'status': 'failed',
|
||||
'finished': job.finished,
|
||||
'type': 'job'
|
||||
'canceled_on': None,
|
||||
'type': 'job'
|
||||
} for job in jobs[-10:][::-1]]
|
||||
|
||||
|
||||
|
||||
@@ -264,18 +264,6 @@ def test_job_launch_fails_without_credential_access(job_template_prompts, runtim
|
||||
dict(credentials=runtime_data['credentials']), rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_block_scan_job_type_change(job_template_prompts, post, admin_user):
|
||||
job_template = job_template_prompts(True)
|
||||
|
||||
# Assure that changing the type of a scan job blocks the launch
|
||||
response = post(reverse('api:job_template_launch', kwargs={'pk':job_template.pk}),
|
||||
dict(job_type='scan'), admin_user, expect=400)
|
||||
|
||||
assert 'job_type' in response.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_with_validation(machine_credential, credential, deploy_jobtemplate):
|
||||
deploy_jobtemplate.extra_vars = '{"job_template_var": 3}'
|
||||
|
||||
@@ -118,6 +118,22 @@ def test_extra_credential_unique_type_xfail(get, post, organization_factory, job
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
inventory.use_role.members.add(alice)
|
||||
settings.MAX_FORKS = 10
|
||||
response = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
}, alice)
|
||||
assert response.status_code == 400
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
import base64
|
||||
import contextlib
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -12,22 +10,11 @@ from awx.main.utils.encryption import decrypt_value, get_encryption_key
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.main.models.oauth import (OAuth2Application as Application,
|
||||
OAuth2AccessToken as AccessToken)
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
from awx.sso.models import UserEnterpriseAuth
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def immediate_on_commit():
|
||||
"""
|
||||
Context manager executing transaction.on_commit() hooks immediately as
|
||||
if the connection was in auto-commit mode.
|
||||
"""
|
||||
def on_commit(func):
|
||||
func()
|
||||
with mock.patch('django.db.connection.on_commit', side_effect=on_commit) as patch:
|
||||
yield patch
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_personal_access_token_creation(oauth_application, post, alice):
|
||||
url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
|
||||
@@ -365,3 +365,77 @@ def test_zoneinfo(get, admin_user):
|
||||
url = reverse('api:schedule_zoneinfo')
|
||||
r = get(url, admin_user, expect=200)
|
||||
assert {'name': 'America/New_York'} in r.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_jt_schedule(options, post, project, inventory, alice):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='test-jt',
|
||||
project=project,
|
||||
playbook='helloworld.yml',
|
||||
inventory=inventory
|
||||
)
|
||||
jt.save()
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the JT because we don't have execute rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': jt.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to execute the JT
|
||||
jt.execute_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_project_schedule(options, post, project, alice):
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the project because we don't have update rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': project.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# use role does *not* grant the ability to schedule
|
||||
project.use_role.members.add(alice)
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to update project
|
||||
project.update_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_inventory_update_schedule(options, post, inventory_source, alice):
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the project because we don't have update rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': inventory_source.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# use role does *not* grant the ability to schedule
|
||||
inventory_source.inventory.use_role.members.add(alice)
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to update project
|
||||
inventory_source.inventory.update_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
@@ -125,9 +125,9 @@ def project_playbooks():
|
||||
@pytest.fixture
|
||||
def run_computed_fields_right_away(request):
|
||||
|
||||
def run_me(inventory_id, should_update_hosts=True):
|
||||
def run_me(inventory_id):
|
||||
i = Inventory.objects.get(id=inventory_id)
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
|
||||
mocked = mock.patch(
|
||||
'awx.main.signals.update_inventory_computed_fields.delay',
|
||||
|
||||
@@ -11,6 +11,7 @@ from awx.main.signals import (
|
||||
# AWX models
|
||||
from awx.main.models.organization import Organization
|
||||
from awx.main.models import ActivityStream, Job
|
||||
from awx.main.tests.functional import immediate_on_commit
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -34,9 +35,10 @@ class TestComputedFields:
|
||||
|
||||
def test_computed_fields_normal_use(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id, True)
|
||||
with immediate_on_commit():
|
||||
with mocker.patch.object(update_inventory_computed_fields, 'delay'):
|
||||
job.delete()
|
||||
update_inventory_computed_fields.delay.assert_called_once_with(inventory.id)
|
||||
|
||||
def test_disable_computed_fields(self, mocker, inventory):
|
||||
job = Job.objects.create(name='fake-job', inventory=inventory)
|
||||
|
||||
@@ -23,8 +23,11 @@ class TestJobNotificationMixin(object):
|
||||
'finished': bool,
|
||||
'force_handlers': bool,
|
||||
'forks': int,
|
||||
'host_status_counts': {'skipped': int, 'ok': int, 'changed': int,
|
||||
'failures': int, 'dark': int},
|
||||
'host_status_counts': {
|
||||
'skipped': int, 'ok': int, 'changed': int,
|
||||
'failures': int, 'dark': int, 'processed': int,
|
||||
'rescued': int, 'failed': bool
|
||||
},
|
||||
'id': int,
|
||||
'job_explanation': str,
|
||||
'job_slice_count': int,
|
||||
@@ -36,7 +39,6 @@ class TestJobNotificationMixin(object):
|
||||
'modified': datetime.datetime,
|
||||
'name': str,
|
||||
'playbook': str,
|
||||
'playbook_counts': {'play_count': int, 'task_count': int},
|
||||
'scm_revision': str,
|
||||
'skip_tags': str,
|
||||
'start_at_task': str,
|
||||
@@ -48,7 +50,6 @@ class TestJobNotificationMixin(object):
|
||||
'username': str},
|
||||
'instance_group': {'id': int, 'name': str},
|
||||
'inventory': {'description': str,
|
||||
'groups_with_active_failures': int,
|
||||
'has_active_failures': bool,
|
||||
'has_inventory_sources': bool,
|
||||
'hosts_with_active_failures': int,
|
||||
@@ -69,17 +70,10 @@ class TestJobNotificationMixin(object):
|
||||
'name': str,
|
||||
'scm_type': str,
|
||||
'status': str},
|
||||
'project_update': {'id': int, 'name': str, 'description': str, 'status': str, 'failed': bool},
|
||||
'unified_job_template': {'description': str,
|
||||
'id': int,
|
||||
'name': str,
|
||||
'unified_job_type': str},
|
||||
'source_workflow_job': {'description': str,
|
||||
'elapsed': float,
|
||||
'failed': bool,
|
||||
'id': int,
|
||||
'name': str,
|
||||
'status': str}},
|
||||
'unified_job_type': str}},
|
||||
|
||||
'timeout': int,
|
||||
'type': str,
|
||||
|
||||
@@ -283,13 +283,13 @@ class TestTaskImpact:
|
||||
|
||||
def test_limit_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(5, 2)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.inventory.total_hosts == 5
|
||||
assert job.task_impact == 2 + 1 # forks becomes constraint
|
||||
|
||||
def test_host_task_impact(self, job_host_limit, run_computed_fields_right_away):
|
||||
job = job_host_limit(3, 5)
|
||||
job.inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
job.inventory.update_computed_fields()
|
||||
assert job.task_impact == 3 + 1 # hosts becomes constraint
|
||||
|
||||
def test_shard_task_impact(self, slice_job_factory, run_computed_fields_right_away):
|
||||
@@ -304,6 +304,7 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [1, 1, 1]
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [2, 2, 2] # plus one base task impact
|
||||
# Uneven distribution - first job takes the extra host
|
||||
jobs[0].inventory.hosts.create(name='remainder_foo')
|
||||
@@ -311,5 +312,5 @@ class TestTaskImpact:
|
||||
len(jobs[0].inventory.get_script_data(slice_number=i + 1, slice_count=3)['all']['hosts'])
|
||||
for i in range(3)
|
||||
] == [2, 1, 1]
|
||||
jobs[0].inventory.refresh_from_db() # FIXME: computed fields operates on reloaded inventory
|
||||
jobs[0].inventory.update_computed_fields()
|
||||
assert [job.task_impact for job in jobs] == [3, 2, 2]
|
||||
|
||||
@@ -67,7 +67,7 @@ def test_multi_group_with_shared_dependency(instance_factory, default_instance_g
|
||||
pu = p.project_updates.first()
|
||||
TaskManager.start_task.assert_called_once_with(pu,
|
||||
default_instance_group,
|
||||
[j1],
|
||||
[j1,j2],
|
||||
default_instance_group.instances.all()[0])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
@@ -193,7 +193,7 @@ def test_instance_group_basic_policies(instance_factory, instance_group_factory)
|
||||
ig2 = InstanceGroup.objects.get(id=ig2.id)
|
||||
ig3 = InstanceGroup.objects.get(id=ig3.id)
|
||||
assert len(ig0.instances.all()) == 1
|
||||
assert i0 in ig0.instances.all()
|
||||
assert i0 in ig0.instances.all()
|
||||
assert len(InstanceGroup.objects.get(id=ig1.id).instances.all()) == 2
|
||||
assert i1 in ig1.instances.all()
|
||||
assert i2 in ig1.instances.all()
|
||||
|
||||
@@ -6,7 +6,7 @@ from datetime import timedelta
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate
|
||||
from awx.main.models import WorkflowJobTemplate, JobTemplate, Job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -307,8 +307,8 @@ def test_shared_dependencies_launch(default_instance_group, job_template_factory
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.first()
|
||||
iu = ii.inventory_updates.first()
|
||||
TaskManager.start_task.assert_has_calls([mock.call(pu, default_instance_group, [iu, j1], instance),
|
||||
mock.call(iu, default_instance_group, [pu, j1], instance)])
|
||||
TaskManager.start_task.assert_has_calls([mock.call(iu, default_instance_group, [j1, j2, pu], instance),
|
||||
mock.call(pu, default_instance_group, [j1, j2, iu], instance)])
|
||||
pu.status = "successful"
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.save()
|
||||
@@ -383,3 +383,35 @@ def test_job_not_blocking_inventory_update(default_instance_group, job_template_
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(inventory_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_generate_dependencies_only_once(job_template_factory):
|
||||
objects = job_template_factory('jt', organization='org1')
|
||||
|
||||
job = objects.job_template.create_job()
|
||||
job.status = "pending"
|
||||
job.name = "job_gen_dep"
|
||||
job.save()
|
||||
|
||||
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
# job starts with dependencies_processed as False
|
||||
assert not job.dependencies_processed
|
||||
# run one cycle of ._schedule() to generate dependencies
|
||||
TaskManager()._schedule()
|
||||
|
||||
# make sure dependencies_processed is now True
|
||||
job = Job.objects.filter(name="job_gen_dep")[0]
|
||||
assert job.dependencies_processed
|
||||
|
||||
# Run ._schedule() again, but make sure .generate_dependencies() is not
|
||||
# called with job in the argument list
|
||||
tm = TaskManager()
|
||||
tm.generate_dependencies = mock.MagicMock()
|
||||
tm._schedule()
|
||||
|
||||
# .call_args is tuple, (positional_args, kwargs), [0][0] then is
|
||||
# the first positional arg, i.e. the first argument of
|
||||
# .generate_dependencies()
|
||||
assert tm.generate_dependencies.call_args[0][0] == []
|
||||
|
||||
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
6
awx/main/tests/functional/test_credential_plugins.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def test_imported_azure_cloud_sdk_vars():
|
||||
from awx.main.credential_plugins import azure_kv
|
||||
assert len(azure_kv.clouds) > 0
|
||||
assert all([hasattr(c, 'name') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c, 'suffixes') for c in azure_kv.clouds])
|
||||
assert all([hasattr(c.suffixes, 'keyvault_dns') for c in azure_kv.clouds])
|
||||
@@ -60,7 +60,11 @@ INI_TEST_VARS = {
|
||||
'satellite6': {
|
||||
'satellite6_group_patterns': 'foo_group_patterns',
|
||||
'satellite6_group_prefix': 'foo_group_prefix',
|
||||
'satellite6_want_hostcollections': True
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': True
|
||||
|
||||
},
|
||||
'cloudforms': {
|
||||
'version': '2.4',
|
||||
|
||||
@@ -57,7 +57,7 @@ def test_empty_in(empty_value):
|
||||
@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,'])
|
||||
def test_valid_in(valid_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
|
||||
@@ -89,6 +89,27 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
hosts[1].save.assert_called_once_with()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_malformed_fact(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
job.start_job_fact_cache(fact_cache, modified_times, 0)
|
||||
|
||||
for h in hosts:
|
||||
h.save = mocker.Mock()
|
||||
|
||||
for h in hosts:
|
||||
filepath = os.path.join(fact_cache, h.name)
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump({'ansible_local': {'insights': 'this is an unexpected error from ansible'}}, f)
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, modified_times)
|
||||
|
||||
for h in hosts:
|
||||
assert h.insights_system_id is None
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
modified_times = {}
|
||||
|
||||
@@ -171,6 +171,7 @@ class TestWorkflowJobCreate:
|
||||
with mocker.patch('awx.main.models.WorkflowJobNode.objects.create', mock_create):
|
||||
wfjt_node_no_prompts.create_workflow_job_node(workflow_job=workflow_job_unit)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_no_prompts.char_prompts,
|
||||
@@ -185,6 +186,7 @@ class TestWorkflowJobCreate:
|
||||
workflow_job=workflow_job_unit
|
||||
)
|
||||
mock_create.assert_called_once_with(
|
||||
all_parents_must_converge=False,
|
||||
extra_data={},
|
||||
survey_passwords={},
|
||||
char_prompts=wfjt_node_with_prompts.char_prompts,
|
||||
|
||||
@@ -19,6 +19,7 @@ class WorkflowNode(object):
|
||||
self.job = job
|
||||
self.do_not_run = do_not_run
|
||||
self.unified_job_template = unified_job_template
|
||||
self.all_parents_must_converge = False
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -94,7 +95,7 @@ class TestDNR():
|
||||
(g, nodes) = workflow_dag_1
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -113,7 +114,7 @@ class TestDNR():
|
||||
assert 0 == len(do_not_run_nodes)
|
||||
|
||||
r'''
|
||||
S0
|
||||
0
|
||||
/\
|
||||
S / \
|
||||
/ \
|
||||
@@ -133,6 +134,260 @@ class TestDNR():
|
||||
assert nodes[3] == do_not_run_nodes[0]
|
||||
|
||||
|
||||
class TestAllWorkflowNodes():
|
||||
# test workflow convergence is functioning as expected
|
||||
@pytest.fixture
|
||||
def simple_all_convergence(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
|
||||
r'''
|
||||
0
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
1 2
|
||||
\ /
|
||||
F \ / S
|
||||
\/
|
||||
3
|
||||
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_simple_all_convergence(self, simple_all_convergence):
|
||||
(g, nodes) = simple_all_convergence
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "no nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 3, and only node 3, should be chosen to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Only node 3 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_1(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ F
|
||||
| \
|
||||
S| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "failure_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_1(self, workflow_all_converge_1):
|
||||
(g, nodes) = workflow_all_converge_1
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 2 == len(dnr_nodes), "node[1] and node[2] should be marked DNR"
|
||||
assert nodes[1] == dnr_nodes[0], "Node 1 should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[1], "Node 2 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_2(self, wf_node_generator):
|
||||
"""The ordering of _1 and this test, _2, is _slightly_ different.
|
||||
The hope is that topological sorting results in 2 being processed before 3
|
||||
and/or 3 before 2.
|
||||
"""
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(3)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0
|
||||
|\ S
|
||||
| \
|
||||
F| 1
|
||||
| /
|
||||
|/ A
|
||||
2
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[1], "success_nodes")
|
||||
g.add_edge(nodes[0], nodes[2], "failure_nodes")
|
||||
g.add_edge(nodes[1], nodes[2], "always_nodes")
|
||||
nodes[2].all_parents_must_converge = True
|
||||
nodes[0].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_all_converge_edge_case_2(self, workflow_all_converge_2):
|
||||
(g, nodes) = workflow_all_converge_2
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[2] == dnr_nodes[0], "Node 3 should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Node 2, and only node 2, should be chosen to run"
|
||||
assert nodes[1] == nodes_to_run[0], "Only node 2 should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_will_run(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / S
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='failed')
|
||||
nodes[2].job = Job(status='running')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_will_run(self, workflow_all_converge_will_run):
|
||||
(g, nodes) = workflow_all_converge_will_run
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
nodes[2].job.status = 'successful'
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "1 and only 1 node should want to run"
|
||||
assert nodes[3] == nodes_to_run[0], "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_dnr(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(4)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
S \ F | / F
|
||||
\ | /
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='running')
|
||||
nodes[2].job = Job(status='failed')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_while_parent_runs(self, workflow_all_converge_dnr):
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should get marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "No nodes should run yet"
|
||||
|
||||
def test_workflow_all_converge_with_incorrect_parent(self, workflow_all_converge_dnr):
|
||||
# Another tick of the scheduler
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'successful'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 1 == len(dnr_nodes), "1 and only 1 node should be marked DNR"
|
||||
assert nodes[3] == dnr_nodes[0], "Convergence node should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "Convergence node should NOT be chosen to run because it is DNR"
|
||||
|
||||
def test_workflow_all_converge_runs(self, workflow_all_converge_dnr):
|
||||
# Trick the scheduler again to make sure the convergence node acutally runs
|
||||
(g, nodes) = workflow_all_converge_dnr
|
||||
nodes[1].job.status = 'failed'
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
assert 0 == len(dnr_nodes), "No nodes should be marked DNR"
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 1 == len(nodes_to_run), "Convergence node should be chosen to run"
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_all_converge_deep_dnr_tree(self, wf_node_generator):
|
||||
g = WorkflowDAG()
|
||||
nodes = [wf_node_generator() for i in range(7)]
|
||||
for n in nodes:
|
||||
g.add_node(n)
|
||||
r'''
|
||||
0 1 2
|
||||
\ | /
|
||||
S \ S| / F
|
||||
\ | /
|
||||
\|/
|
||||
|
|
||||
3
|
||||
/\
|
||||
S / \ S
|
||||
/ \
|
||||
4| | 5
|
||||
\ /
|
||||
S \ / S
|
||||
\/
|
||||
6
|
||||
'''
|
||||
g.add_edge(nodes[0], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[1], nodes[3], "success_nodes")
|
||||
g.add_edge(nodes[2], nodes[3], "failure_nodes")
|
||||
g.add_edge(nodes[3], nodes[4], "success_nodes")
|
||||
g.add_edge(nodes[3], nodes[5], "success_nodes")
|
||||
g.add_edge(nodes[4], nodes[6], "success_nodes")
|
||||
g.add_edge(nodes[5], nodes[6], "success_nodes")
|
||||
nodes[3].all_parents_must_converge = True
|
||||
nodes[4].all_parents_must_converge = True
|
||||
nodes[5].all_parents_must_converge = True
|
||||
nodes[6].all_parents_must_converge = True
|
||||
|
||||
nodes[0].job = Job(status='successful')
|
||||
nodes[1].job = Job(status='successful')
|
||||
nodes[2].job = Job(status='successful')
|
||||
return (g, nodes)
|
||||
|
||||
def test_workflow_all_converge_deep_dnr_tree(self, workflow_all_converge_deep_dnr_tree):
|
||||
(g, nodes) = workflow_all_converge_deep_dnr_tree
|
||||
dnr_nodes = g.mark_dnr_nodes()
|
||||
|
||||
assert 4 == len(dnr_nodes), "All nodes w/ no jobs should be marked DNR"
|
||||
assert nodes[3] in dnr_nodes
|
||||
assert nodes[4] in dnr_nodes
|
||||
assert nodes[5] in dnr_nodes
|
||||
assert nodes[6] in dnr_nodes
|
||||
|
||||
nodes_to_run = g.bfs_nodes_to_run()
|
||||
assert 0 == len(nodes_to_run), "All non-run nodes should be DNR and NOT candidates to run"
|
||||
|
||||
|
||||
class TestIsWorkflowDone():
|
||||
@pytest.fixture
|
||||
def workflow_dag_2(self, workflow_dag_1):
|
||||
@@ -212,8 +467,8 @@ class TestIsWorkflowDone():
|
||||
|
||||
assert g.is_workflow_done() is True
|
||||
assert g.has_workflow_failed() == \
|
||||
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
|
||||
" missing unified job template and error handle path [].").format(nodes[2].id, nodes[2].job.status)))
|
||||
(True, smart_text(_("No error handling path for workflow job node(s) [({},{})]. Workflow job node(s)"
|
||||
" missing unified job template and error handling path [].").format(nodes[2].id, nodes[2].job.status)))
|
||||
|
||||
def test_is_workflow_done_no_unified_job_tempalte_end(self, workflow_dag_failed):
|
||||
(g, nodes) = workflow_dag_failed
|
||||
@@ -222,8 +477,8 @@ class TestIsWorkflowDone():
|
||||
|
||||
assert g.is_workflow_done() is True
|
||||
assert g.has_workflow_failed() == \
|
||||
(True, smart_text(_("No error handle path for workflow job node(s) [] workflow job node(s) missing"
|
||||
" unified job template and error handle path [{}].").format(nodes[2].id)))
|
||||
(True, smart_text(_("No error handling path for workflow job node(s) []. Workflow job node(s) missing"
|
||||
" unified job template and error handling path [{}].").format(nodes[2].id)))
|
||||
|
||||
def test_is_workflow_done_no_unified_job_tempalte_begin(self, workflow_dag_1):
|
||||
(g, nodes) = workflow_dag_1
|
||||
@@ -233,22 +488,22 @@ class TestIsWorkflowDone():
|
||||
|
||||
assert g.is_workflow_done() is True
|
||||
assert g.has_workflow_failed() == \
|
||||
(True, smart_text(_("No error handle path for workflow job node(s) [] workflow job node(s) missing"
|
||||
" unified job template and error handle path [{}].").format(nodes[0].id)))
|
||||
(True, smart_text(_("No error handling path for workflow job node(s) []. Workflow job node(s) missing"
|
||||
" unified job template and error handling path [{}].").format(nodes[0].id)))
|
||||
|
||||
def test_canceled_should_fail(self, workflow_dag_canceled):
|
||||
(g, nodes) = workflow_dag_canceled
|
||||
|
||||
assert g.has_workflow_failed() == \
|
||||
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
|
||||
" missing unified job template and error handle path [].").format(nodes[0].id, nodes[0].job.status)))
|
||||
(True, smart_text(_("No error handling path for workflow job node(s) [({},{})]. Workflow job node(s)"
|
||||
" missing unified job template and error handling path [].").format(nodes[0].id, nodes[0].job.status)))
|
||||
|
||||
def test_failure_should_fail(self, workflow_dag_failure):
|
||||
(g, nodes) = workflow_dag_failure
|
||||
|
||||
assert g.has_workflow_failed() == \
|
||||
(True, smart_text(_("No error handle path for workflow job node(s) [({},{})] workflow job node(s)"
|
||||
" missing unified job template and error handle path [].").format(nodes[0].id, nodes[0].job.status)))
|
||||
(True, smart_text(_("No error handling path for workflow job node(s) [({},{})]. Workflow job node(s)"
|
||||
" missing unified job template and error handling path [].").format(nodes[0].id, nodes[0].job.status)))
|
||||
|
||||
|
||||
class TestBFSNodesToRun():
|
||||
|
||||
@@ -197,36 +197,6 @@ def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit):
|
||||
})
|
||||
|
||||
|
||||
def test_jt_add_scan_job_check(job_template_with_ids, user_unit):
|
||||
"Assure that permissions to add scan jobs work correctly"
|
||||
|
||||
access = JobTemplateAccess(user_unit)
|
||||
project = job_template_with_ids.project
|
||||
inventory = job_template_with_ids.inventory
|
||||
project.use_role = Role()
|
||||
inventory.use_role = Role()
|
||||
organization = Organization(name='test-org')
|
||||
inventory.organization = organization
|
||||
organization.admin_role = Role()
|
||||
|
||||
def mock_get_object(Class, **kwargs):
|
||||
if Class == Project:
|
||||
return project
|
||||
elif Class == Inventory:
|
||||
return inventory
|
||||
else:
|
||||
raise Exception('Item requested has not been mocked')
|
||||
|
||||
|
||||
with mock.patch('awx.main.models.rbac.Role.__contains__', return_value=True):
|
||||
with mock.patch('awx.main.access.get_object_or_400', mock_get_object):
|
||||
assert access.can_add({
|
||||
'project': project.pk,
|
||||
'inventory': inventory.pk,
|
||||
'job_type': 'scan'
|
||||
})
|
||||
|
||||
|
||||
def mock_raise_none(self, add_host=False, feature=None, check_expiration=True):
|
||||
return None
|
||||
|
||||
|
||||
@@ -152,3 +152,10 @@ def test_uri_scm_cleartext_redact_and_replace(test_data):
|
||||
# Ensure the host didn't get redacted
|
||||
assert redacted_str.count(uri.host) == test_data['host_occurrences']
|
||||
|
||||
|
||||
@pytest.mark.timeout(1)
|
||||
def test_large_string_performance():
|
||||
length = 100000
|
||||
redacted = UriCleaner.remove_sensitive('x' * length)
|
||||
assert len(redacted) == length
|
||||
|
||||
|
||||
@@ -2146,7 +2146,14 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"satellite6_group_patterns": "[a,b,c]", "satellite6_group_prefix": "hey_", "satellite6_want_hostcollections": True}'
|
||||
inventory_update.source_vars = {
|
||||
'satellite6_group_patterns': '[a,b,c]',
|
||||
'satellite6_group_prefix': 'hey_',
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@@ -2159,6 +2166,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert config.get('ansible', 'group_patterns') == '[a,b,c]'
|
||||
assert config.get('ansible', 'group_prefix') == 'hey_'
|
||||
assert config.get('ansible', 'want_hostcollections') == 'True'
|
||||
assert config.get('ansible', 'want_ansible_ssh_host') == 'True'
|
||||
assert config.get('ansible', 'rich_params') == 'True'
|
||||
assert config.get('ansible', 'want_facts') == 'False'
|
||||
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
|
||||
@@ -79,8 +79,8 @@ class mockHost:
|
||||
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
||||
class TestSmartFilterQueryFromString():
|
||||
@mock.patch(
|
||||
'awx.api.filters.get_field_from_path',
|
||||
lambda model, path: (model, path) # disable field filtering, because a__b isn't a real Host field
|
||||
'awx.api.filters.get_fields_from_path',
|
||||
lambda model, path: ([model], path) # disable field filtering, because a__b isn't a real Host field
|
||||
)
|
||||
@pytest.mark.parametrize("filter_string,q_expected", [
|
||||
('facts__facts__blank=""', Q(**{u"facts__facts__blank": u""})),
|
||||
|
||||
@@ -106,7 +106,7 @@ def could_be_inventory(project_path, dir_path, filename):
|
||||
def read_ansible_config(project_path, variables_of_interest):
|
||||
fnames = ['/etc/ansible/ansible.cfg']
|
||||
if project_path:
|
||||
fnames.insert(0, os.path.join(project_path, 'ansible.cfg'))
|
||||
fnames.append(os.path.join(project_path, 'ansible.cfg'))
|
||||
values = {}
|
||||
try:
|
||||
parser = ConfigParser()
|
||||
|
||||
@@ -107,6 +107,17 @@ class LogstashFormatterBase(logging.Formatter):
|
||||
|
||||
class LogstashFormatter(LogstashFormatterBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cluster_host_id = settings.CLUSTER_HOST_ID
|
||||
self.tower_uuid = None
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
self.tower_uuid = uuid
|
||||
super(LogstashFormatter, self).__init__(*args, **kwargs)
|
||||
|
||||
def reformat_data_for_log(self, raw_data, kind=None):
|
||||
'''
|
||||
Process dictionaries from various contexts (job events, activity stream
|
||||
@@ -128,37 +139,6 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data = json.loads(data)
|
||||
data_for_log = {}
|
||||
|
||||
def index_by_name(alist):
|
||||
"""Takes a list of dictionaries with `name` as a key in each dict
|
||||
and returns a dictionary indexed by those names"""
|
||||
adict = {}
|
||||
for item in alist:
|
||||
subdict = copy(item)
|
||||
if 'name' in subdict:
|
||||
name = subdict.get('name', None)
|
||||
elif 'path' in subdict:
|
||||
name = subdict.get('path', None)
|
||||
if name:
|
||||
# Logstash v2 can not accept '.' in a name
|
||||
name = name.replace('.', '_')
|
||||
adict[name] = subdict
|
||||
return adict
|
||||
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
if kind == 'job_events':
|
||||
job_event = raw_data['python_objects']['job_event']
|
||||
for field_object in job_event._meta.fields:
|
||||
@@ -198,6 +178,21 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data_for_log['host_name'] = raw_data['host_name']
|
||||
data_for_log['job_id'] = raw_data['job_id']
|
||||
elif kind == 'performance':
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
val = val[:-1] if val.endswith('s') else val
|
||||
try:
|
||||
return float(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is int:
|
||||
try:
|
||||
return int(val)
|
||||
except ValueError:
|
||||
return val
|
||||
elif t is str:
|
||||
return val
|
||||
|
||||
request = raw_data['python_objects']['request']
|
||||
response = raw_data['python_objects']['response']
|
||||
|
||||
@@ -231,21 +226,8 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
log_kind = record.name[len('awx.analytics.'):]
|
||||
fields = self.reformat_data_for_log(fields, kind=log_kind)
|
||||
# General AWX metadata
|
||||
for log_name, setting_name in [
|
||||
('type', 'LOG_AGGREGATOR_TYPE'),
|
||||
('cluster_host_id', 'CLUSTER_HOST_ID'),
|
||||
('tower_uuid', 'LOG_AGGREGATOR_TOWER_UUID')]:
|
||||
if hasattr(settings, setting_name):
|
||||
fields[log_name] = getattr(settings, setting_name, None)
|
||||
elif log_name == 'type':
|
||||
fields[log_name] = 'other'
|
||||
|
||||
uuid = (
|
||||
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
|
||||
getattr(settings, 'INSTALL_UUID', None)
|
||||
)
|
||||
if uuid:
|
||||
fields['tower_uuid'] = uuid
|
||||
fields['cluster_host_id'] = self.cluster_host_id
|
||||
fields['tower_uuid'] = self.tower_uuid
|
||||
return fields
|
||||
|
||||
def format(self, record):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import threading
|
||||
@@ -18,6 +19,7 @@ from django.conf import settings
|
||||
|
||||
# requests futures, a dependency used by these handlers
|
||||
from requests_futures.sessions import FuturesSession
|
||||
import cachetools
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.formatters import LogstashFormatter
|
||||
@@ -273,6 +275,16 @@ HANDLER_MAPPING = {
|
||||
}
|
||||
|
||||
|
||||
TTLCache = cachetools.TTLCache
|
||||
|
||||
if 'py.test' in os.environ.get('_', ''):
|
||||
# don't cache settings in unit tests
|
||||
class TTLCache(TTLCache):
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise KeyError()
|
||||
|
||||
|
||||
class AWXProxyHandler(logging.Handler):
|
||||
'''
|
||||
Handler specific to the AWX external logging feature
|
||||
@@ -316,6 +328,7 @@ class AWXProxyHandler(logging.Handler):
|
||||
def get_handler_class(self, protocol):
|
||||
return HANDLER_MAPPING.get(protocol, AWXNullHandler)
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'get_handler')
|
||||
def get_handler(self, custom_settings=None, force_create=False):
|
||||
new_kwargs = {}
|
||||
use_settings = custom_settings or settings
|
||||
@@ -342,10 +355,14 @@ class AWXProxyHandler(logging.Handler):
|
||||
self._handler.setFormatter(self.formatter)
|
||||
return self._handler
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'should_audit')
|
||||
def should_audit(self):
|
||||
return settings.LOG_AGGREGATOR_AUDIT
|
||||
|
||||
def emit(self, record):
|
||||
if AWXProxyHandler.thread_local.enabled:
|
||||
actual_handler = self.get_handler()
|
||||
if settings.LOG_AGGREGATOR_AUDIT:
|
||||
if self.should_audit():
|
||||
self.auditor.setLevel(settings.LOG_AGGREGATOR_LEVEL)
|
||||
self.auditor.emit(record)
|
||||
return actual_handler.emit(record)
|
||||
|
||||
@@ -366,6 +366,7 @@ class VMWareInventory(object):
|
||||
def _get_instances(self, inkwargs):
|
||||
''' Make API calls '''
|
||||
instances = []
|
||||
si = None
|
||||
try:
|
||||
si = SmartConnect(**inkwargs)
|
||||
except ssl.SSLError as connection_error:
|
||||
|
||||
@@ -5,7 +5,6 @@ import os
|
||||
import re # noqa
|
||||
import sys
|
||||
from datetime import timedelta
|
||||
from celery.schedules import crontab
|
||||
|
||||
# global settings
|
||||
from django.conf import global_settings
|
||||
@@ -435,13 +434,9 @@ CELERYBEAT_SCHEDULE = {
|
||||
'schedule': timedelta(seconds=60),
|
||||
'options': {'expires': 50,}
|
||||
},
|
||||
'purge_stdout_files': {
|
||||
'task': 'awx.main.tasks.purge_old_stdout_files',
|
||||
'schedule': timedelta(days=7)
|
||||
},
|
||||
'gather_analytics': {
|
||||
'task': 'awx.main.tasks.gather_analytics',
|
||||
'schedule': crontab(hour='*/6')
|
||||
'schedule': timedelta(minutes=5)
|
||||
},
|
||||
'task_manager': {
|
||||
'task': 'awx.main.scheduler.tasks.run_task_manager',
|
||||
@@ -455,7 +450,6 @@ CELERYBEAT_SCHEDULE = {
|
||||
},
|
||||
# 'isolated_heartbeat': set up at the end of production.py and development.py
|
||||
}
|
||||
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3
|
||||
|
||||
AWX_CELERY_QUEUES_STATIC = [
|
||||
CELERY_DEFAULT_QUEUE,
|
||||
@@ -665,6 +659,9 @@ PENDO_TRACKING_STATE = "off"
|
||||
# Note: This setting may be overridden by database settings.
|
||||
INSIGHTS_TRACKING_STATE = False
|
||||
|
||||
# Last gather date for Analytics
|
||||
AUTOMATION_ANALYTICS_LAST_GATHER = None
|
||||
AUTOMATION_ANALYTICS_INTERVAL = 14400
|
||||
|
||||
# Default list of modules allowed for ad hoc commands.
|
||||
# Note: This setting may be overridden by database settings.
|
||||
@@ -1142,8 +1139,7 @@ LOGGING = {
|
||||
'handlers': ['null']
|
||||
},
|
||||
'awx.main.commands.run_callback_receiver': {
|
||||
'handlers': ['callback_receiver'],
|
||||
'level': 'INFO' # in debug mode, includes full callback data
|
||||
'handlers': ['callback_receiver'], # level handled by dynamic_level_filter
|
||||
},
|
||||
'awx.main.dispatch': {
|
||||
'handlers': ['dispatcher'],
|
||||
@@ -1221,6 +1217,9 @@ AWX_REQUEST_PROFILE = False
|
||||
#
|
||||
AWX_REQUEST_PROFILE_WITH_DOT = False
|
||||
|
||||
# Allow profiling callback workers via SIGUSR1
|
||||
AWX_CALLBACK_PROFILE = False
|
||||
|
||||
# Delete temporary directories created to store playbook run-time
|
||||
AWX_CLEANUP_PATHS = True
|
||||
|
||||
|
||||
@@ -179,3 +179,4 @@ else:
|
||||
os.environ['SDB_NOTIFY_HOST'] = os.popen('ip route').read().split(' ')[2]
|
||||
|
||||
WEBSOCKET_ORIGIN_WHITELIST = ['https://localhost:8043', 'https://localhost:3000']
|
||||
AWX_CALLBACK_PROFILE = True
|
||||
|
||||
@@ -44,7 +44,7 @@ JOBOUTPUT_ROOT = '/var/lib/awx/job_status/'
|
||||
SCHEDULE_METADATA_LOCATION = '/var/lib/awx/.tower_cycle'
|
||||
|
||||
# Ansible base virtualenv paths and enablement
|
||||
BASE_VENV_PATH = "/var/lib/awx/venv"
|
||||
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
|
||||
ANSIBLE_VENV_PATH = os.path.join(BASE_VENV_PATH, "ansible")
|
||||
|
||||
# Tower base virtualenv paths and enablement
|
||||
|
||||
@@ -72,7 +72,7 @@ function AddEditCredentialsController (
|
||||
vm.form.credential_type._displayValue = credentialType.get('name');
|
||||
vm.isTestable = (isEditable && credentialType.get('kind') === 'external');
|
||||
|
||||
if (credential.get('related.input_sources.results.length' > 0)) {
|
||||
if (credential.get('related.input_sources.results').length > 0) {
|
||||
vm.form.credential_type._disabled = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ function CredentialsResolve (
|
||||
return $q.all(promises)
|
||||
.then(models => {
|
||||
const typeId = models.credential.get('credential_type');
|
||||
const orgId = models.credential.get('organization');
|
||||
|
||||
Rest.setUrl(GetBasePath('credentials'));
|
||||
const params = { target_input_sources__target_credential: id };
|
||||
@@ -48,7 +47,9 @@ function CredentialsResolve (
|
||||
|
||||
const dependents = {
|
||||
credentialType: new CredentialType('get', typeId),
|
||||
organization: new Organization('get', orgId),
|
||||
organization: new Organization('get', {
|
||||
resource: models.credential.get('summary_fields.organization')
|
||||
}),
|
||||
credentialInputSources: models.credential.extend('GET', 'input_sources'),
|
||||
sourceCredentials: sourceCredentialsPromise
|
||||
};
|
||||
|
||||
@@ -25,15 +25,15 @@ function ListJobsController (
|
||||
|
||||
vm.strings = strings;
|
||||
|
||||
let newJobs = [];
|
||||
|
||||
// smart-search
|
||||
const name = 'jobs';
|
||||
const iterator = 'job';
|
||||
let paginateQuerySet = {};
|
||||
|
||||
let launchModalOpen = false;
|
||||
let refreshAfterLaunchClose = false;
|
||||
let pendingRefresh = false;
|
||||
let refreshTimerRunning = false;
|
||||
let newJobsTimerRunning = false;
|
||||
|
||||
vm.searchBasePath = SearchBasePath;
|
||||
|
||||
@@ -104,23 +104,53 @@ function ListJobsController (
|
||||
$scope.$emit('updateCount', vm.job_dataset.count, 'jobs');
|
||||
});
|
||||
|
||||
$scope.$on('ws-jobs', () => {
|
||||
if (!launchModalOpen) {
|
||||
if (!refreshTimerRunning) {
|
||||
refreshJobs();
|
||||
} else {
|
||||
pendingRefresh = true;
|
||||
const canAddRowsDynamically = () => {
|
||||
const orderByValue = _.get($state.params, 'job_search.order_by');
|
||||
const pageValue = _.get($state.params, 'job_search.page');
|
||||
const idInValue = _.get($state.params, 'job_search.id__in');
|
||||
|
||||
return (!idInValue && (!pageValue || pageValue === '1')
|
||||
&& (orderByValue === '-finished' || orderByValue === '-started'));
|
||||
};
|
||||
|
||||
const updateJobRow = (msg) => {
|
||||
// Loop across the jobs currently shown and update the row
|
||||
// if it exists
|
||||
for (let i = 0; i < vm.jobs.length; i++) {
|
||||
if (vm.jobs[i].id === msg.unified_job_id) {
|
||||
// Update the job status.
|
||||
vm.jobs[i].status = msg.status;
|
||||
if (msg.finished) {
|
||||
vm.jobs[i].finished = msg.finished;
|
||||
const orderByValue = _.get($state.params, 'job_search.order_by');
|
||||
if (orderByValue === '-finished') {
|
||||
// Attempt to sort the rows in the list by their finish
|
||||
// timestamp in descending order
|
||||
vm.jobs.sort((a, b) =>
|
||||
(!b.finished) - (!a.finished)
|
||||
|| new Date(b.finished) - new Date(a.finished));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
refreshAfterLaunchClose = true;
|
||||
}
|
||||
};
|
||||
|
||||
$scope.$on('ws-jobs', (e, msg) => {
|
||||
if (msg.status === 'pending' && canAddRowsDynamically()) {
|
||||
newJobs.push(msg.unified_job_id);
|
||||
if (!launchModalOpen && !newJobsTimerRunning) {
|
||||
fetchNewJobs();
|
||||
}
|
||||
} else if (!newJobs.includes(msg.unified_job_id)) {
|
||||
updateJobRow(msg);
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('launchModalOpen', (evt, isOpen) => {
|
||||
evt.stopPropagation();
|
||||
if (!isOpen && refreshAfterLaunchClose) {
|
||||
refreshAfterLaunchClose = false;
|
||||
refreshJobs();
|
||||
if (!isOpen && newJobs.length > 0) {
|
||||
fetchNewJobs();
|
||||
}
|
||||
launchModalOpen = isOpen;
|
||||
});
|
||||
@@ -289,22 +319,49 @@ function ListJobsController (
|
||||
});
|
||||
};
|
||||
|
||||
function refreshJobs () {
|
||||
qs.search(SearchBasePath, $state.params.job_search, { 'X-WS-Session-Quiet': true })
|
||||
const fetchNewJobs = () => {
|
||||
newJobsTimerRunning = true;
|
||||
const newJobIdsFilter = newJobs.join(',');
|
||||
newJobs = [];
|
||||
const newJobsSearchParams = Object.assign({}, $state.params.job_search);
|
||||
newJobsSearchParams.count_disabled = 1;
|
||||
newJobsSearchParams.id__in = newJobIdsFilter;
|
||||
delete newJobsSearchParams.page_size;
|
||||
const stringifiedSearchParams = qs.encodeQueryset(newJobsSearchParams, false);
|
||||
Rest.setUrl(`${vm.searchBasePath}${stringifiedSearchParams}`);
|
||||
Rest.get()
|
||||
.then(({ data }) => {
|
||||
vm.jobs = data.results;
|
||||
vm.job_dataset = data;
|
||||
vm.job_dataset.count += data.results.length;
|
||||
const pageSize = parseInt($state.params.job_search.page_size, 10) || 20;
|
||||
const joinedJobs = data.results.concat(vm.jobs);
|
||||
vm.jobs = joinedJobs.length > pageSize
|
||||
? joinedJobs.slice(0, pageSize)
|
||||
: joinedJobs;
|
||||
$timeout(() => {
|
||||
if (canAddRowsDynamically()) {
|
||||
if (newJobs.length > 0 && !launchModalOpen) {
|
||||
fetchNewJobs();
|
||||
} else {
|
||||
newJobsTimerRunning = false;
|
||||
}
|
||||
} else {
|
||||
// Bail out - one of [order_by, page, id__in] params has changed since we
|
||||
// received these new job messages
|
||||
newJobs = [];
|
||||
newJobsTimerRunning = false;
|
||||
}
|
||||
}, 5000);
|
||||
})
|
||||
.catch(({ data, status }) => {
|
||||
ProcessErrors($scope, data, status, null, {
|
||||
hdr: strings.get('error.HEADER'),
|
||||
msg: strings.get('error.CALL', {
|
||||
path: `${vm.searchBasePath}${stringifiedSearchParams}`,
|
||||
status
|
||||
})
|
||||
});
|
||||
});
|
||||
pendingRefresh = false;
|
||||
refreshTimerRunning = true;
|
||||
$timeout(() => {
|
||||
if (pendingRefresh) {
|
||||
refreshJobs();
|
||||
} else {
|
||||
refreshTimerRunning = false;
|
||||
}
|
||||
}, 5000);
|
||||
}
|
||||
};
|
||||
|
||||
vm.isCollapsed = true;
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ export const OUTPUT_ELEMENT_LAST = '#atStdoutMenuLast';
|
||||
export const OUTPUT_MAX_BUFFER_LENGTH = 1000;
|
||||
export const OUTPUT_MAX_LAG = 120;
|
||||
export const OUTPUT_NO_COUNT_JOB_TYPES = ['ad_hoc_command', 'system_job', 'inventory_update'];
|
||||
export const OUTPUT_ORDER_BY = 'counter';
|
||||
export const OUTPUT_ORDER_BY = 'start_line';
|
||||
export const OUTPUT_PAGE_CACHE = true;
|
||||
export const OUTPUT_PAGE_LIMIT = 5;
|
||||
export const OUTPUT_PAGE_SIZE = 50;
|
||||
|
||||
@@ -113,11 +113,6 @@ function projectsListController (
|
||||
// And we found the affected project
|
||||
$log.debug(`Received event for project: ${project.name}`);
|
||||
$log.debug(`Status changed to: ${data.status}`);
|
||||
if (data.status === 'successful' || data.status === 'failed' || data.status === 'canceled') {
|
||||
reloadList();
|
||||
} else {
|
||||
project.scm_update_tooltip = vm.strings.get('update.UPDATE_RUNNING');
|
||||
}
|
||||
project.status = data.status;
|
||||
buildTooltips(project);
|
||||
}
|
||||
|
||||
@@ -153,7 +153,10 @@ function TemplatesStrings (BaseString) {
|
||||
TIMED_OUT: t.s('APPROVAL TIMED OUT'),
|
||||
TIMEOUT: t.s('Timeout'),
|
||||
APPROVED: t.s('APPROVED'),
|
||||
DENIED: t.s('DENIED')
|
||||
DENIED: t.s('DENIED'),
|
||||
CONVERGENCE: t.s('Convergence'),
|
||||
ALL: t.s('All'),
|
||||
ANY: t.s('Any'),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ function ListTemplatesController(
|
||||
qs,
|
||||
GetBasePath,
|
||||
ngToast,
|
||||
$timeout
|
||||
) {
|
||||
const vm = this || {};
|
||||
const [jobTemplate, workflowTemplate] = resolvedModels;
|
||||
@@ -32,10 +31,6 @@ function ListTemplatesController(
|
||||
const choices = workflowTemplate.options('actions.GET.type.choices')
|
||||
.concat(jobTemplate.options('actions.GET.type.choices'));
|
||||
|
||||
let launchModalOpen = false;
|
||||
let refreshAfterLaunchClose = false;
|
||||
let pendingRefresh = false;
|
||||
let refreshTimerRunning = false;
|
||||
let paginateQuerySet = {};
|
||||
|
||||
vm.strings = strings;
|
||||
@@ -120,25 +115,39 @@ function ListTemplatesController(
|
||||
setToolbarSort();
|
||||
}, true);
|
||||
|
||||
$scope.$on(`ws-jobs`, () => {
|
||||
if (!launchModalOpen) {
|
||||
if (!refreshTimerRunning) {
|
||||
refreshTemplates();
|
||||
} else {
|
||||
pendingRefresh = true;
|
||||
}
|
||||
} else {
|
||||
refreshAfterLaunchClose = true;
|
||||
}
|
||||
});
|
||||
$scope.$on(`ws-jobs`, (e, msg) => {
|
||||
if (msg.unified_job_template_id && vm.templates) {
|
||||
const template = vm.templates.find((t) => t.id === msg.unified_job_template_id);
|
||||
if (template) {
|
||||
if (msg.status === 'pending') {
|
||||
// This is a new job - add it to the front of the
|
||||
// recent_jobs array
|
||||
if (template.summary_fields.recent_jobs.length === 10) {
|
||||
template.summary_fields.recent_jobs.pop();
|
||||
}
|
||||
|
||||
$scope.$on('launchModalOpen', (evt, isOpen) => {
|
||||
evt.stopPropagation();
|
||||
if (!isOpen && refreshAfterLaunchClose) {
|
||||
refreshAfterLaunchClose = false;
|
||||
refreshTemplates();
|
||||
template.summary_fields.recent_jobs.unshift({
|
||||
id: msg.unified_job_id,
|
||||
status: msg.status,
|
||||
type: msg.type
|
||||
});
|
||||
} else {
|
||||
// This is an update to an existing job. Check to see
|
||||
// if we have it in our array of recent_jobs
|
||||
for (let i=0; i<template.summary_fields.recent_jobs.length; i++) {
|
||||
const recentJob = template.summary_fields.recent_jobs[i];
|
||||
if (recentJob.id === msg.unified_job_id) {
|
||||
recentJob.status = msg.status;
|
||||
if (msg.finished) {
|
||||
recentJob.finished = msg.finished;
|
||||
template.last_job_run = msg.finished;
|
||||
}
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
launchModalOpen = isOpen;
|
||||
});
|
||||
|
||||
vm.isInvalid = (template) => {
|
||||
@@ -265,15 +274,6 @@ function ListTemplatesController(
|
||||
vm.templates = vm.dataset.results;
|
||||
})
|
||||
.finally(() => Wait('stop'));
|
||||
pendingRefresh = false;
|
||||
refreshTimerRunning = true;
|
||||
$timeout(() => {
|
||||
if (pendingRefresh) {
|
||||
refreshTemplates();
|
||||
} else {
|
||||
refreshTimerRunning = false;
|
||||
}
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
function createErrorHandler(path, action) {
|
||||
@@ -483,8 +483,7 @@ ListTemplatesController.$inject = [
|
||||
'Wait',
|
||||
'QuerySet',
|
||||
'GetBasePath',
|
||||
'ngToast',
|
||||
'$timeout'
|
||||
'ngToast'
|
||||
];
|
||||
|
||||
export default ListTemplatesController;
|
||||
|
||||
@@ -598,6 +598,11 @@ table, tbody {
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--groups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x auto;
|
||||
}
|
||||
|
||||
.List-staticColumnLayout--hostNestedGroups {
|
||||
display: grid;
|
||||
grid-template-columns: @at-space @at-space-5x @at-space-5x auto;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const SUPPORTED_LOCALES = ['en', 'es', 'fr', 'ja', 'nl'];
|
||||
const SUPPORTED_LOCALES = ['en', 'es', 'fr', 'ja', 'nl', 'zh'];
|
||||
const DEFAULT_LOCALE = 'en';
|
||||
const BASE_PATH = global.$basePath ? `${global.$basePath}languages/` : '/static/languages/';
|
||||
|
||||
|
||||
@@ -58,6 +58,10 @@ export default ['i18n', function(i18n) {
|
||||
type: 'text',
|
||||
reset: 'ANSIBLE_FACT_CACHE_TIMEOUT',
|
||||
},
|
||||
MAX_FORKS: {
|
||||
type: 'text',
|
||||
reset: 'MAX_FORKS',
|
||||
},
|
||||
PROJECT_UPDATE_VVV: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
|
||||
@@ -79,6 +79,12 @@ export default ['i18n', function(i18n) {
|
||||
AUTOMATION_ANALYTICS_URL: {
|
||||
type: 'text',
|
||||
reset: 'AUTOMATION_ANALYTICS_URL',
|
||||
},
|
||||
AUTOMATION_ANALYTICS_GATHER_INTERVAL: {
|
||||
type: 'number',
|
||||
integer: true,
|
||||
min: 1800,
|
||||
reset: 'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ export default
|
||||
label: i18n._("Hosts")
|
||||
},
|
||||
{
|
||||
url: "/#/hosts?host_search=has_active_failures:true",
|
||||
url: "/#/hosts?host_search=last_job_host_summary__failed:true",
|
||||
number: scope.data.hosts.failed,
|
||||
label: i18n._("Failed Hosts"),
|
||||
isFailureCount: true
|
||||
|
||||
@@ -11,7 +11,7 @@ export default
|
||||
templateUrl: templateUrl('home/dashboard/lists/job-templates/job-templates-list')
|
||||
};
|
||||
|
||||
function link(scope, element, attr) {
|
||||
function link(scope) {
|
||||
|
||||
scope.$watch("data", function(data) {
|
||||
if (data) {
|
||||
@@ -22,7 +22,7 @@ export default
|
||||
scope.noJobTemplates = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
}, true);
|
||||
|
||||
scope.canAddJobTemplate = false;
|
||||
let url = GetBasePath('job_templates');
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user