mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
1097 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a455c7bf7 | ||
|
|
10167eea8d | ||
|
|
46ddc84d2a | ||
|
|
b8ec3104a9 | ||
|
|
b098127961 | ||
|
|
1f0294d389 | ||
|
|
3172176940 | ||
|
|
b38ec3599b | ||
|
|
487343a022 | ||
|
|
69049a4427 | ||
|
|
be6b42561f | ||
|
|
e59cb07064 | ||
|
|
0234df055d | ||
|
|
b54c036398 | ||
|
|
eafd40291e | ||
|
|
519956f779 | ||
|
|
0b3e2cc7e3 | ||
|
|
efa9c84806 | ||
|
|
5ed623d682 | ||
|
|
8f77d15a31 | ||
|
|
d06d4d5a8c | ||
|
|
352c8c3cb1 | ||
|
|
94f21a3464 | ||
|
|
ac376f9c87 | ||
|
|
44e4263bee | ||
|
|
b7f3852ef9 | ||
|
|
a934e146ee | ||
|
|
cab25656eb | ||
|
|
0f9c906a22 | ||
|
|
b8226109a7 | ||
|
|
b26de8b922 | ||
|
|
67d8c1a4b5 | ||
|
|
0ef7ef22eb | ||
|
|
47383e05d6 | ||
|
|
3dd97feaa6 | ||
|
|
e530adde67 | ||
|
|
38a08d163c | ||
|
|
7b4adfcc15 | ||
|
|
5d6e1284e3 | ||
|
|
a0ba125ea9 | ||
|
|
ad5d0b92db | ||
|
|
debbac5c78 | ||
|
|
f4f4a7caec | ||
|
|
b00249b515 | ||
|
|
cd49213924 | ||
|
|
9a47a28b80 | ||
|
|
7b9ad1d69a | ||
|
|
6df00e1e4c | ||
|
|
7d2ed7b763 | ||
|
|
b08e5db267 | ||
|
|
8991396d23 | ||
|
|
76a6f84c70 | ||
|
|
a984e5df7a | ||
|
|
282d705c43 | ||
|
|
43e1b4a7db | ||
|
|
71ef7cdec1 | ||
|
|
5decde3f70 | ||
|
|
3f57061509 | ||
|
|
6395d64681 | ||
|
|
f3e2caeaa7 | ||
|
|
ce5c4359ee | ||
|
|
c4ddf50cad | ||
|
|
d250dd0cd6 | ||
|
|
96bbbdd5c9 | ||
|
|
9b4b2167b3 | ||
|
|
028a0a9279 | ||
|
|
30354dbcd0 | ||
|
|
543a87ac88 | ||
|
|
4be7cf66ec | ||
|
|
fd027f87a9 | ||
|
|
dac6e115c1 | ||
|
|
eca516f8ce | ||
|
|
b06645e125 | ||
|
|
fd60cd1a35 | ||
|
|
ad8bcd0de2 | ||
|
|
fdc29eebb7 | ||
|
|
63ae2cac38 | ||
|
|
4e787cc079 | ||
|
|
2de37ce5df | ||
|
|
a419547731 | ||
|
|
04844aa44f | ||
|
|
1b3fbee38d | ||
|
|
6d2a2ab714 | ||
|
|
82dd4a3884 | ||
|
|
4fe9e5da14 | ||
|
|
bbb4701fa9 | ||
|
|
86a39938fe | ||
|
|
987fc26537 | ||
|
|
70cf4cf5d4 | ||
|
|
2d3172f648 | ||
|
|
b2c33e3204 | ||
|
|
f7f648b956 | ||
|
|
780f104ab2 | ||
|
|
4c35adad6c | ||
|
|
cf24c81b3e | ||
|
|
6d792a8234 | ||
|
|
1558c6f942 | ||
|
|
2f75b48c63 | ||
|
|
979418620c | ||
|
|
482e0ac311 | ||
|
|
a36bf4af64 | ||
|
|
3bbce18173 | ||
|
|
e54fd19bca | ||
|
|
d2289fe9c6 | ||
|
|
1c50b8427a | ||
|
|
34d01f02cc | ||
|
|
d182c96c2e | ||
|
|
e59f3982ae | ||
|
|
5435c6ec73 | ||
|
|
5f96aee871 | ||
|
|
eceeeea22d | ||
|
|
a1a864b27b | ||
|
|
0291c476d4 | ||
|
|
638e8c7add | ||
|
|
6389ec50a1 | ||
|
|
ad53f4f5f6 | ||
|
|
9718aa711f | ||
|
|
cacd2c3392 | ||
|
|
1800b49822 | ||
|
|
1e97bb71db | ||
|
|
7055460c4c | ||
|
|
864767d74a | ||
|
|
5170948241 | ||
|
|
370a7f9b25 | ||
|
|
1368835a29 | ||
|
|
48fa5bb2cd | ||
|
|
25105d813d | ||
|
|
bbea43b1fe | ||
|
|
5790aa9780 | ||
|
|
bc97d11270 | ||
|
|
326ed22efe | ||
|
|
b942411dcc | ||
|
|
374c17751f | ||
|
|
ef2fa26126 | ||
|
|
b611164422 | ||
|
|
c7c899375b | ||
|
|
ab3a728032 | ||
|
|
aaf371ee23 | ||
|
|
d6c70e8d3a | ||
|
|
79e65e3e84 | ||
|
|
42c45367a0 | ||
|
|
d759aff4e9 | ||
|
|
6b63f0ac9e | ||
|
|
2df6eab472 | ||
|
|
1c7afb66f7 | ||
|
|
1fbb714cbc | ||
|
|
de75592f2a | ||
|
|
9cb7b0902a | ||
|
|
437d9843d1 | ||
|
|
490492e505 | ||
|
|
3dd8e490c6 | ||
|
|
75c9702caa | ||
|
|
accf000bdf | ||
|
|
a94b30be9f | ||
|
|
3c31e0ed16 | ||
|
|
7d74999851 | ||
|
|
b7ca369356 | ||
|
|
d15f7b76fa | ||
|
|
4e4a535178 | ||
|
|
78b00652bd | ||
|
|
473ab7c01c | ||
|
|
ae82ba53e7 | ||
|
|
d69174b1a6 | ||
|
|
570f549cf4 | ||
|
|
55e720e25d | ||
|
|
8f33f1a6c2 | ||
|
|
7be924d155 | ||
|
|
65f226960f | ||
|
|
84f056294d | ||
|
|
b906f8d757 | ||
|
|
2fae523fd4 | ||
|
|
4d519155bc | ||
|
|
ea8a91893a | ||
|
|
145476c7d9 | ||
|
|
c6595786f5 | ||
|
|
c6159a7c3e | ||
|
|
52638c709a | ||
|
|
a264b1db1f | ||
|
|
49907e337a | ||
|
|
afc1f85668 | ||
|
|
6efa751157 | ||
|
|
10131432b5 | ||
|
|
0d365068ff | ||
|
|
256404ba03 | ||
|
|
3b430c8bdf | ||
|
|
627dae6580 | ||
|
|
44db9ad033 | ||
|
|
21890efca6 | ||
|
|
0a8fe4d812 | ||
|
|
a1d7beca83 | ||
|
|
c35c80b06c | ||
|
|
3c5e9da9a1 | ||
|
|
f9af5e8959 | ||
|
|
c983b6a755 | ||
|
|
e18639b26b | ||
|
|
6d8b843ad0 | ||
|
|
00a9e42001 | ||
|
|
fc5363a140 | ||
|
|
d8d1ccf810 | ||
|
|
046518ab8f | ||
|
|
d33bbdd4f6 | ||
|
|
46e530ceeb | ||
|
|
2a77b8b4b9 | ||
|
|
23b2b136d6 | ||
|
|
d83a786c12 | ||
|
|
5d162b739b | ||
|
|
55e37b4eaa | ||
|
|
b2a0b3fc29 | ||
|
|
d1e1bc7108 | ||
|
|
cb88ea8fd1 | ||
|
|
c2fe3fcf13 | ||
|
|
6654a116d0 | ||
|
|
b77ab8a6ca | ||
|
|
1e796076f5 | ||
|
|
8fa38d1a2e | ||
|
|
44e176dde8 | ||
|
|
1ce197041f | ||
|
|
0952bae09f | ||
|
|
12509cd652 | ||
|
|
b094c063ae | ||
|
|
4e46d5d7cd | ||
|
|
8b10da9589 | ||
|
|
99ce277b06 | ||
|
|
5db6906212 | ||
|
|
652a428438 | ||
|
|
dfc769b8fe | ||
|
|
c45b1ffca6 | ||
|
|
ceed6f8d9b | ||
|
|
03cfb7bf9a | ||
|
|
49d1fa82d3 | ||
|
|
08a195ba08 | ||
|
|
77d1c711bf | ||
|
|
ad73174029 | ||
|
|
a6539d66d4 | ||
|
|
cb3ab67361 | ||
|
|
078dc666c1 | ||
|
|
e806da25c1 | ||
|
|
ef36b4fffd | ||
|
|
cc2ba09d3a | ||
|
|
790942c0f2 | ||
|
|
fd1e574fcb | ||
|
|
2daefcd94e | ||
|
|
46a7ca4dc3 | ||
|
|
5e4c997c41 | ||
|
|
8d4d718f7d | ||
|
|
cf34a81af7 | ||
|
|
11af21972d | ||
|
|
8850687d1b | ||
|
|
792f68eaec | ||
|
|
113aa2e11e | ||
|
|
1bf0bc8203 | ||
|
|
03cd7472af | ||
|
|
d549c217bb | ||
|
|
e7fead0f2c | ||
|
|
14990f7e98 | ||
|
|
d35eba8afb | ||
|
|
b0722311e8 | ||
|
|
946c16916f | ||
|
|
8ef5a6b0e1 | ||
|
|
6fa4d6462d | ||
|
|
525fd889e9 | ||
|
|
93a4e5ef05 | ||
|
|
06ce5a16ce | ||
|
|
15c665ea52 | ||
|
|
9a420820eb | ||
|
|
fa043100bd | ||
|
|
db0d748302 | ||
|
|
e8a95a1dac | ||
|
|
f911fb2046 | ||
|
|
a0304eeb16 | ||
|
|
a6f063b199 | ||
|
|
3977ec42e1 | ||
|
|
b7a064b05d | ||
|
|
aa5532f7b5 | ||
|
|
f79b6d3708 | ||
|
|
6d075b8874 | ||
|
|
3040a25932 | ||
|
|
0f0d9ba00d | ||
|
|
053897042f | ||
|
|
64186e881e | ||
|
|
0d98a1980e | ||
|
|
2b02b1affd | ||
|
|
bf3042e85a | ||
|
|
bdc25c14f6 | ||
|
|
6e5028587a | ||
|
|
8c8713885b | ||
|
|
bc5ef7f1c8 | ||
|
|
b9b6dad0b3 | ||
|
|
829e9054d6 | ||
|
|
be68a199ec | ||
|
|
44c0eb867b | ||
|
|
773b976f8a | ||
|
|
1220847c27 | ||
|
|
ec1c2a8391 | ||
|
|
2bc6521eee | ||
|
|
107d2da845 | ||
|
|
568606d2c8 | ||
|
|
78e2cd7084 | ||
|
|
79b8e6b6f0 | ||
|
|
d72896f9a6 | ||
|
|
7b3d36ba53 | ||
|
|
df5231f527 | ||
|
|
8bd9233d2c | ||
|
|
4dfda92c69 | ||
|
|
9ecb704e10 | ||
|
|
1b726a1b2f | ||
|
|
0d2ae47238 | ||
|
|
b12c2a142d | ||
|
|
306f504fb7 | ||
|
|
862fafab86 | ||
|
|
1cc4e302f9 | ||
|
|
1289ca9103 | ||
|
|
b18ca5ac1f | ||
|
|
193a041ef9 | ||
|
|
7219c17d30 | ||
|
|
79f0f1940f | ||
|
|
f923f07b79 | ||
|
|
4112b20f1a | ||
|
|
18e7b6ce04 | ||
|
|
ebc540a460 | ||
|
|
edc65cdc36 | ||
|
|
3684975ef9 | ||
|
|
b1f56df930 | ||
|
|
95960c8c14 | ||
|
|
488f52b82b | ||
|
|
b4a7cdbb60 | ||
|
|
8bfcef01df | ||
|
|
bbf9c13952 | ||
|
|
04576af6a5 | ||
|
|
dfa578fcde | ||
|
|
33bc9e63c4 | ||
|
|
919475a4c7 | ||
|
|
1db88fe4f6 | ||
|
|
cf9f00ab86 | ||
|
|
200be3297a | ||
|
|
6da5205d73 | ||
|
|
15cb92d58e | ||
|
|
78cc2742b2 | ||
|
|
959d5058fc | ||
|
|
acf54e6102 | ||
|
|
4a9979e2db | ||
|
|
1e344bdf8a | ||
|
|
3cdf274bdb | ||
|
|
068de221c1 | ||
|
|
30b6e318cc | ||
|
|
2c1648f9c9 | ||
|
|
2c953ed7d0 | ||
|
|
2d00623c16 | ||
|
|
51a6ba14f1 | ||
|
|
6edd879a43 | ||
|
|
a31661ce08 | ||
|
|
c69d497093 | ||
|
|
8b9810e466 | ||
|
|
16f9411914 | ||
|
|
f7ba706ec2 | ||
|
|
5455fe3c10 | ||
|
|
8ac8bc8df2 | ||
|
|
ed474df744 | ||
|
|
c33d2a1e00 | ||
|
|
3e58ee068c | ||
|
|
b19e5aab28 | ||
|
|
6ec96a8f4f | ||
|
|
4db2df9691 | ||
|
|
0c696bfd96 | ||
|
|
63ffff3b76 | ||
|
|
c532c6fe61 | ||
|
|
61c2968a7c | ||
|
|
d9e41547a1 | ||
|
|
eec08fdcca | ||
|
|
b74f7f6c26 | ||
|
|
f37ac1dcc9 | ||
|
|
1c09114abd | ||
|
|
c0e1c8aa77 | ||
|
|
d82180605c | ||
|
|
d3b7829e69 | ||
|
|
4a214a7770 | ||
|
|
18bb910e33 | ||
|
|
ca8dcced8b | ||
|
|
0b9b8832a8 | ||
|
|
271b3f00b7 | ||
|
|
477f566da0 | ||
|
|
cf55b6a0ba | ||
|
|
a2acf4d61f | ||
|
|
3dc8c789fb | ||
|
|
7873d08311 | ||
|
|
c4df5f64c1 | ||
|
|
679d531930 | ||
|
|
7d0d000180 | ||
|
|
f0882aba7d | ||
|
|
5c1713460b | ||
|
|
67d19b20ef | ||
|
|
4a6147d4c2 | ||
|
|
d91e72c23f | ||
|
|
8c99321ec8 | ||
|
|
18e9121db4 | ||
|
|
0809c27bd1 | ||
|
|
807f4ea757 | ||
|
|
ef3f98a399 | ||
|
|
0bbf5e4faf | ||
|
|
da440469cf | ||
|
|
8d4425f056 | ||
|
|
1f46878652 | ||
|
|
930b46810f | ||
|
|
c6dc69c68b | ||
|
|
f00344f8b4 | ||
|
|
f9e0600263 | ||
|
|
3ba1ba1c9d | ||
|
|
ecf1d79ca5 | ||
|
|
82fd245ca9 | ||
|
|
df5aa8a47d | ||
|
|
f3c5cb5a2e | ||
|
|
b794fdbefd | ||
|
|
497f46041c | ||
|
|
e688ed813a | ||
|
|
6c3e42a1ac | ||
|
|
bfedbe561c | ||
|
|
6c439bb9ae | ||
|
|
f461a46155 | ||
|
|
eee84b1af7 | ||
|
|
c4ff27cedb | ||
|
|
cf57d596a3 | ||
|
|
a68cd6f0ae | ||
|
|
7ff4d821ce | ||
|
|
23914182c4 | ||
|
|
979328baa4 | ||
|
|
055c02072f | ||
|
|
8ff0902177 | ||
|
|
3d510c5064 | ||
|
|
df47186c43 | ||
|
|
2f7607a080 | ||
|
|
cde39413c9 | ||
|
|
41c9ea3c07 | ||
|
|
3d45f27502 | ||
|
|
0ab61fd3cb | ||
|
|
d0c891764f | ||
|
|
057320aed3 | ||
|
|
6340f9147c | ||
|
|
b8d6991e9d | ||
|
|
2f9742e9de | ||
|
|
e4c3454b98 | ||
|
|
7cc3a7c39d | ||
|
|
9c291c2b50 | ||
|
|
caad204cbb | ||
|
|
86eb541b3f | ||
|
|
05e2386fac | ||
|
|
3c0fd37a4d | ||
|
|
b23ccf7ee1 | ||
|
|
bd8643d599 | ||
|
|
b23856f126 | ||
|
|
c062728359 | ||
|
|
85d185cc8b | ||
|
|
77b8f345ae | ||
|
|
b26b8e7097 | ||
|
|
0052967aee | ||
|
|
63e9aed601 | ||
|
|
d4be8c8168 | ||
|
|
cdf4b0d1ed | ||
|
|
c43a59e475 | ||
|
|
014520ee2b | ||
|
|
c1abc56753 | ||
|
|
c5b4681bf4 | ||
|
|
00b7d6571a | ||
|
|
9ed2534ac5 | ||
|
|
a3bc3986bb | ||
|
|
230933744c | ||
|
|
227a90006e | ||
|
|
112f89660b | ||
|
|
a0910eb6de | ||
|
|
5433af6716 | ||
|
|
9744b89737 | ||
|
|
04c535e3f9 | ||
|
|
259e53f59d | ||
|
|
ac9bf1afcf | ||
|
|
4b62d77015 | ||
|
|
ef5ce0b082 | ||
|
|
1942be7dc3 | ||
|
|
210f9577b0 | ||
|
|
87a05a5b2e | ||
|
|
f8a754cf44 | ||
|
|
3ea37e1c79 | ||
|
|
c997fcfc2c | ||
|
|
a7a3609e48 | ||
|
|
4dd4928aab | ||
|
|
93dda04fd0 | ||
|
|
5aeaabaceb | ||
|
|
0d4e6d7e0b | ||
|
|
9ae038868c | ||
|
|
0b4ae74698 | ||
|
|
0d248a12bc | ||
|
|
7396e2e7ac | ||
|
|
cac3bece56 | ||
|
|
e4145b580c | ||
|
|
74076b99d6 | ||
|
|
5d35506b0c | ||
|
|
e646b46a2c | ||
|
|
475e2605d4 | ||
|
|
c16ad89ff9 | ||
|
|
425d1168b9 | ||
|
|
7ceaa9ec4a | ||
|
|
4b3d3537b4 | ||
|
|
efbff24528 | ||
|
|
1d9ce6cc15 | ||
|
|
794ce96b17 | ||
|
|
181421a2ee | ||
|
|
9c9496a683 | ||
|
|
2b111c81df | ||
|
|
f467e26842 | ||
|
|
7700050d10 | ||
|
|
a8d34b46fb | ||
|
|
bf6c16197c | ||
|
|
25cc341888 | ||
|
|
d899e75ad7 | ||
|
|
732da52239 | ||
|
|
ab2f212b04 | ||
|
|
f94438cf9b | ||
|
|
2569ec4f4f | ||
|
|
b58bff4686 | ||
|
|
6fab3590ae | ||
|
|
846fd67618 | ||
|
|
6254129f0d | ||
|
|
3409d39150 | ||
|
|
9de165a676 | ||
|
|
f54616912d | ||
|
|
c003e89ea9 | ||
|
|
6e64b5c070 | ||
|
|
fcfc34fef1 | ||
|
|
75b7d74f91 | ||
|
|
569b5bc533 | ||
|
|
9ab9c6961b | ||
|
|
2e525f8922 | ||
|
|
9c6300c2de | ||
|
|
f8153393b1 | ||
|
|
cb07e9c757 | ||
|
|
639b297027 | ||
|
|
4341d67fb0 | ||
|
|
6260633974 | ||
|
|
8ec856f3b6 | ||
|
|
5a207f155e | ||
|
|
2a722ba8d0 | ||
|
|
efbd2177a5 | ||
|
|
989e1ca5d6 | ||
|
|
4c89568d71 | ||
|
|
a9688ac805 | ||
|
|
12a8793ddb | ||
|
|
7bbf640389 | ||
|
|
cb6688c685 | ||
|
|
266a4e71c5 | ||
|
|
c29afce54d | ||
|
|
15041e57b2 | ||
|
|
eddee456b3 | ||
|
|
be5a12a318 | ||
|
|
e131e8c151 | ||
|
|
d30ecaa7e3 | ||
|
|
dfc4a0c0e0 | ||
|
|
86ba1639c3 | ||
|
|
f1b4e24833 | ||
|
|
35d36a71c7 | ||
|
|
eadcbe1ce9 | ||
|
|
f0198105c4 | ||
|
|
e34c7acdc4 | ||
|
|
001d469bd0 | ||
|
|
a9e5981cfe | ||
|
|
b36b6978fb | ||
|
|
55a19ffe6a | ||
|
|
c4d358b870 | ||
|
|
5ae7df7757 | ||
|
|
220168f5ee | ||
|
|
3cc9139c6d | ||
|
|
01161c7afd | ||
|
|
6d595cbda6 | ||
|
|
419d32d3e3 | ||
|
|
334c63388b | ||
|
|
32f6f87463 | ||
|
|
0d92b2e703 | ||
|
|
bc6d879976 | ||
|
|
9bae9d32c7 | ||
|
|
b5724adae5 | ||
|
|
1048baa98c | ||
|
|
922ea67541 | ||
|
|
3d105e3b7a | ||
|
|
aceef98601 | ||
|
|
d41322c63c | ||
|
|
d6e5eb356b | ||
|
|
b46a2b43b0 | ||
|
|
6f54044cc6 | ||
|
|
5d1f322cd1 | ||
|
|
c11a8b8ae1 | ||
|
|
2d4df3d50e | ||
|
|
715483c669 | ||
|
|
30f65f38a7 | ||
|
|
aaf093b0e0 | ||
|
|
bd7248d21c | ||
|
|
9bdd49bec5 | ||
|
|
2506db88f2 | ||
|
|
61c38eabf8 | ||
|
|
37a1e5d9b0 | ||
|
|
c439a1ec8f | ||
|
|
a1d110aac7 | ||
|
|
c5e22f9aa3 | ||
|
|
6dc5f91a0f | ||
|
|
8a9ebe2086 | ||
|
|
a859ecfbde | ||
|
|
02fd26520d | ||
|
|
f8b2bcbae7 | ||
|
|
f5157784c4 | ||
|
|
93b49f314d | ||
|
|
55d81cf74d | ||
|
|
f629822596 | ||
|
|
bf2a4d1a2c | ||
|
|
afadfa939d | ||
|
|
02c3e1c32f | ||
|
|
a0d20a5d50 | ||
|
|
b8d27d53b8 | ||
|
|
878659cded | ||
|
|
027ce7fbdb | ||
|
|
540f8ab7d6 | ||
|
|
0362c88e48 | ||
|
|
129374a1c2 | ||
|
|
63fd546f44 | ||
|
|
9856c9154e | ||
|
|
e7a712394a | ||
|
|
208e36f83b | ||
|
|
68a6984fcd | ||
|
|
a90e0e8834 | ||
|
|
8ab6a79b37 | ||
|
|
e68d576fd2 | ||
|
|
ca247182df | ||
|
|
1f628778bb | ||
|
|
dcbb2813b5 | ||
|
|
9cdb281f06 | ||
|
|
8116ec8e1f | ||
|
|
c373420982 | ||
|
|
93a9a0354f | ||
|
|
ee6e28e066 | ||
|
|
ea5d429399 | ||
|
|
3b49dd78bf | ||
|
|
42b019d8c8 | ||
|
|
a1af4e1808 | ||
|
|
ffdcb2f8eb | ||
|
|
7b5f4f51fb | ||
|
|
25c2b9610a | ||
|
|
5935583c4c | ||
|
|
68f17eb370 | ||
|
|
1ad8a49155 | ||
|
|
47ed5ef848 | ||
|
|
a56686ca77 | ||
|
|
211786976d | ||
|
|
7e82f0fad7 | ||
|
|
8612bf79e8 | ||
|
|
78edf51803 | ||
|
|
632810f3a8 | ||
|
|
695eab1fdd | ||
|
|
081a0fc04e | ||
|
|
48f10669d6 | ||
|
|
4f8b624b96 | ||
|
|
c87c0aa712 | ||
|
|
05e6f4ab3c | ||
|
|
1a85874964 | ||
|
|
6f2224c8e5 | ||
|
|
57e155f0f9 | ||
|
|
a6924c1bcf | ||
|
|
4acb28f6f5 | ||
|
|
3ed5d6ec65 | ||
|
|
15bcea7301 | ||
|
|
ce8c0066d0 | ||
|
|
bdd63f36a8 | ||
|
|
24abc1462f | ||
|
|
12363ae175 | ||
|
|
1b50895738 | ||
|
|
1fbae00e37 | ||
|
|
b10a71786b | ||
|
|
0d659b0111 | ||
|
|
deb8714987 | ||
|
|
ee8775a08d | ||
|
|
31650bb0bd | ||
|
|
cbf085ab43 | ||
|
|
78d715efed | ||
|
|
2cb5b0563b | ||
|
|
0eb55f5038 | ||
|
|
daf3bbc7ef | ||
|
|
caa6d0c4d3 | ||
|
|
bc7ae4ca46 | ||
|
|
db2316b791 | ||
|
|
b7efd5a9ab | ||
|
|
83caf99c58 | ||
|
|
285fb2582e | ||
|
|
19180a1bc4 | ||
|
|
9c86f521e9 | ||
|
|
2171823846 | ||
|
|
c4143b0111 | ||
|
|
94fa4deab3 | ||
|
|
53aadd3b96 | ||
|
|
faa0802d97 | ||
|
|
fa144aa98f | ||
|
|
ea4e98c52a | ||
|
|
8ff413efc0 | ||
|
|
804a3c17bf | ||
|
|
da5eb710cd | ||
|
|
57f9b31b2b | ||
|
|
34ba858e3b | ||
|
|
5e24cee0ae | ||
|
|
a026838f77 | ||
|
|
e2cd86089b | ||
|
|
85d5387f31 | ||
|
|
cf13a1b70a | ||
|
|
fed6a86170 | ||
|
|
c8907fb39d | ||
|
|
162e4aeec4 | ||
|
|
c6d2fa86c7 | ||
|
|
f89db3586b | ||
|
|
19742859b6 | ||
|
|
eff46dbc71 | ||
|
|
80b75a163a | ||
|
|
fe65073f3e | ||
|
|
6f2b10daf5 | ||
|
|
ad3d89afd3 | ||
|
|
e3c2c310ef | ||
|
|
c574cdc7dc | ||
|
|
097b59e74a | ||
|
|
8e7d607a47 | ||
|
|
4c32faa448 | ||
|
|
17509d560d | ||
|
|
7b1b656455 | ||
|
|
268b22c550 | ||
|
|
b525d0a6f4 | ||
|
|
c7cabfa785 | ||
|
|
9c2797b34c | ||
|
|
732f7d2292 | ||
|
|
f5fc0871fc | ||
|
|
9458741b72 | ||
|
|
a1f7f967e3 | ||
|
|
91c78d7137 | ||
|
|
b88f4ce27c | ||
|
|
e8606d9478 | ||
|
|
90d38a50de | ||
|
|
a83164cca6 | ||
|
|
e1e7e9047d | ||
|
|
094eef635d | ||
|
|
56bb82e303 | ||
|
|
0290dd3246 | ||
|
|
de8c46cab0 | ||
|
|
9028a48ab2 | ||
|
|
709fa74070 | ||
|
|
5342faa997 | ||
|
|
f0865d69f0 | ||
|
|
ddf9fd581e | ||
|
|
b6745db4b8 | ||
|
|
35a565d09f | ||
|
|
b878aed400 | ||
|
|
1961a8ba15 | ||
|
|
b76018d6e0 | ||
|
|
a40398e6a1 | ||
|
|
97e2fbbe27 | ||
|
|
bcbad06c10 | ||
|
|
1c74773eac | ||
|
|
9701ac1804 | ||
|
|
3d90c6dfcf | ||
|
|
1402a2c8a5 | ||
|
|
6567ad612c | ||
|
|
a15bf9ee41 | ||
|
|
da448f6a0b | ||
|
|
513f54a422 | ||
|
|
05d9220b21 | ||
|
|
9bb9bc682f | ||
|
|
1d6f116687 | ||
|
|
9a9d53d17a | ||
|
|
755ffc9844 | ||
|
|
0ffbb06427 | ||
|
|
c13c5b6c13 | ||
|
|
70979df36a | ||
|
|
83ee39cabd | ||
|
|
b0d31a64aa | ||
|
|
06c53c14be | ||
|
|
a63778e40e | ||
|
|
6f38edf9a3 | ||
|
|
1f05372ac9 | ||
|
|
d0327fc044 | ||
|
|
068dab14d4 | ||
|
|
f64d0dde5a | ||
|
|
7cc0041aa8 | ||
|
|
f66f24eb83 | ||
|
|
e3ee3c5a00 | ||
|
|
1198c067b2 | ||
|
|
d3ea09d60c | ||
|
|
c0abb063f9 | ||
|
|
95cdddd670 | ||
|
|
d91aa8c6cf | ||
|
|
052f101a70 | ||
|
|
c96e88877f | ||
|
|
1564dfc80f | ||
|
|
b0cb3ca9da | ||
|
|
84b5fb89a3 | ||
|
|
5319659d58 | ||
|
|
5d27c28b47 | ||
|
|
68a6315626 | ||
|
|
8bfbd85cf9 | ||
|
|
f7b6d9fdff | ||
|
|
ab4fba7ce9 | ||
|
|
deb6e58397 | ||
|
|
4746bc7c09 | ||
|
|
823a74c98b | ||
|
|
c294a63f32 | ||
|
|
84bce530dc | ||
|
|
6acd3c98b7 | ||
|
|
1e80b2e295 | ||
|
|
e4721d7722 | ||
|
|
0cea8121bb | ||
|
|
eaac54040c | ||
|
|
763ac25b2e | ||
|
|
922723cf39 | ||
|
|
f216c8f90f | ||
|
|
4e31bdd2d2 | ||
|
|
d5e9716ceb | ||
|
|
01963b0ee7 | ||
|
|
a353f2a807 | ||
|
|
69205c5f6b | ||
|
|
941bba2ae0 | ||
|
|
ddccfaa6fe | ||
|
|
24da2b78b8 | ||
|
|
6fee0db17b | ||
|
|
b25fbc5266 | ||
|
|
e6235a4046 | ||
|
|
0f32161df0 | ||
|
|
b570c8ad2a | ||
|
|
9170aa184a | ||
|
|
68c26014cc | ||
|
|
f049b61460 | ||
|
|
458ca69405 | ||
|
|
8a4c85e473 | ||
|
|
09d883f94a | ||
|
|
9ef57ec510 | ||
|
|
5be006f9d3 | ||
|
|
089bafa5d4 | ||
|
|
fa278f83ad | ||
|
|
0d68ca8f14 | ||
|
|
2ec90f17d0 | ||
|
|
ecf340f722 | ||
|
|
713079bd70 | ||
|
|
d77040a7a9 | ||
|
|
d3b137fbc4 | ||
|
|
857faf570d | ||
|
|
5246c842b2 | ||
|
|
1dca4c9098 | ||
|
|
8cb32045f0 | ||
|
|
4962b729de | ||
|
|
ed39a127e7 | ||
|
|
c4b4a4c21a | ||
|
|
bd81fda05c | ||
|
|
83550eeba0 | ||
|
|
4540cb653e | ||
|
|
69597c5654 | ||
|
|
fa61aef194 | ||
|
|
871d87374b | ||
|
|
e35f6b2acb | ||
|
|
b3e056fe55 | ||
|
|
a8140e86d7 | ||
|
|
e5b76c6427 | ||
|
|
4d4ae84e32 | ||
|
|
ae349addfe | ||
|
|
31fdd5e85c | ||
|
|
e4bde24f38 | ||
|
|
9c019e1cc0 | ||
|
|
b3d298269b | ||
|
|
21f7ca21e0 | ||
|
|
43bf370f8c | ||
|
|
6057921e34 | ||
|
|
d645d0894a | ||
|
|
4575cae458 | ||
|
|
6982a8aee7 | ||
|
|
fa1091d089 | ||
|
|
5095816762 | ||
|
|
c605705b39 | ||
|
|
24eae09ed9 | ||
|
|
a2fee252f9 | ||
|
|
ab80c2276d | ||
|
|
f78c9f357d | ||
|
|
da1e43dc12 | ||
|
|
ccc2a616c1 | ||
|
|
51184ba20d | ||
|
|
db33c0e4fa | ||
|
|
e9728f2a78 | ||
|
|
5cdf2f88da | ||
|
|
93e940adfc | ||
|
|
64776f97cf | ||
|
|
fc080732d4 | ||
|
|
d02364a833 | ||
|
|
176da040d9 | ||
|
|
f2b4d87152 | ||
|
|
17798edbc4 | ||
|
|
c1da74cbc0 | ||
|
|
5e6ee4a371 | ||
|
|
288fea8960 | ||
|
|
dca9daf719 | ||
|
|
634504c7a1 | ||
|
|
c019d873b9 | ||
|
|
e4a21b67c7 | ||
|
|
2e6c484a50 | ||
|
|
f8b64f2222 | ||
|
|
6060b62acd | ||
|
|
0dcf6a2b1f | ||
|
|
452c1b53f7 | ||
|
|
cb354c2ef1 | ||
|
|
42d2f72683 | ||
|
|
57e8ba7f3c | ||
|
|
c882cda586 | ||
|
|
784d18705c | ||
|
|
36996584f9 | ||
|
|
0160dbe8bc | ||
|
|
28994d4b0b | ||
|
|
9b09344bae | ||
|
|
84ba383199 | ||
|
|
6dcd87afec | ||
|
|
243ab58902 | ||
|
|
6c877a15e3 | ||
|
|
2ccf0a0004 | ||
|
|
c69db02762 | ||
|
|
59e1c6d492 | ||
|
|
35c27c8b16 | ||
|
|
91edac0d84 | ||
|
|
ae1bd9d1e9 | ||
|
|
cf168b27d2 | ||
|
|
8cb7b388dc | ||
|
|
171f0d6340 | ||
|
|
aff31ac02f | ||
|
|
a23754897e | ||
|
|
3094b67664 | ||
|
|
98d3f3dc8a | ||
|
|
6f2a07a7df | ||
|
|
54ac1905b3 | ||
|
|
1bdae2d1f7 | ||
|
|
2bc2e26cc7 | ||
|
|
5010602e6b | ||
|
|
c103a813bf | ||
|
|
e097bc61c8 | ||
|
|
2ea63eeca0 | ||
|
|
52336c0fe8 | ||
|
|
220354241b | ||
|
|
1ae8fdc15c | ||
|
|
4bbdce3478 | ||
|
|
d25e6249fd | ||
|
|
71d7bac261 | ||
|
|
acba5306c6 | ||
|
|
fca9245536 | ||
|
|
47031da65b | ||
|
|
b024d91c66 | ||
|
|
da7002cf0c | ||
|
|
f4f1762805 | ||
|
|
ad5857e06b | ||
|
|
12d735ec8f | ||
|
|
1e9173e8ef | ||
|
|
4809c40f3c | ||
|
|
4e9ec271c5 | ||
|
|
6cd6a42e20 | ||
|
|
f234c0f771 | ||
|
|
3f49d2c455 | ||
|
|
a0fb9bef3a | ||
|
|
ccaaee61f0 | ||
|
|
70269d9a0d | ||
|
|
ab6322a8f7 | ||
|
|
8bc6367e1e | ||
|
|
b74bf9f266 | ||
|
|
321aa3b01d | ||
|
|
7f1096f711 | ||
|
|
2b6cfd7b3d | ||
|
|
b2b33605cc | ||
|
|
d06b0de74b | ||
|
|
6dfc714c75 | ||
|
|
cf5d3d55f0 | ||
|
|
e91d383165 | ||
|
|
72d19b93a0 | ||
|
|
ff1c96b0e0 | ||
|
|
6aaf906594 | ||
|
|
da7baced50 | ||
|
|
2b10c0f3f2 | ||
|
|
01788263e2 | ||
|
|
8daceabd26 | ||
|
|
712b07c136 | ||
|
|
8fbfed5c55 | ||
|
|
c4a3c0aac1 | ||
|
|
4d0c567d73 | ||
|
|
365f897059 | ||
|
|
7b1158ee8e | ||
|
|
d8814b7162 | ||
|
|
9af3fa557b | ||
|
|
e0d8d35090 | ||
|
|
7e83ddc968 | ||
|
|
b48815d2bb | ||
|
|
ad383cdb44 | ||
|
|
bbbacd62ae | ||
|
|
91afa88b44 | ||
|
|
a6fd3d0c09 | ||
|
|
b575fa4243 | ||
|
|
edf0d4bf85 | ||
|
|
3cab73c574 | ||
|
|
b3af64d66f | ||
|
|
1869b73826 | ||
|
|
5ab09686c9 | ||
|
|
4ed4d85b91 | ||
|
|
cc47afa856 | ||
|
|
e066b688fc | ||
|
|
15111dd24a | ||
|
|
841975d72b | ||
|
|
31a96d20ab | ||
|
|
9a70ac88c0 | ||
|
|
2ec5dda1d8 | ||
|
|
dab80fb842 | ||
|
|
a6404bdd0d | ||
|
|
ee5199f77a | ||
|
|
7f409c6487 | ||
|
|
678ce81487 | ||
|
|
69e0f858bc | ||
|
|
2b12e26b98 | ||
|
|
634550fb0b | ||
|
|
491e4c709e | ||
|
|
480c8516ab | ||
|
|
9eda4efb74 | ||
|
|
a517b15c26 | ||
|
|
609528e8a3 | ||
|
|
e17ee4b58f | ||
|
|
3dc8a10e85 | ||
|
|
dc89479b4c | ||
|
|
e893017e00 | ||
|
|
b51b1a959f | ||
|
|
4a1c121792 | ||
|
|
8de92b152c | ||
|
|
95ab5327c3 | ||
|
|
d39ad9d9ce | ||
|
|
07a5e17284 | ||
|
|
583d1390d2 | ||
|
|
638f8eae21 | ||
|
|
e40f29092b | ||
|
|
b394862210 | ||
|
|
0434c611f0 | ||
|
|
201ae5f948 | ||
|
|
9d93b78296 | ||
|
|
1d7bd835e6 | ||
|
|
4f90406e91 | ||
|
|
53b4dd5dbf | ||
|
|
491f4824b0 | ||
|
|
91721e09df | ||
|
|
2828d31141 | ||
|
|
d10e727b3c | ||
|
|
f57cf03f4b | ||
|
|
8669e87454 | ||
|
|
b319f47048 | ||
|
|
432daa6139 | ||
|
|
835c26f6cb | ||
|
|
b2557c6fd8 | ||
|
|
f1c2a95f0d | ||
|
|
8f5d25a5df | ||
|
|
4c199b0ab2 | ||
|
|
ef7b3fec94 | ||
|
|
93bd1e6705 | ||
|
|
58e84a40e5 | ||
|
|
b13009b9a3 | ||
|
|
fc941eda98 | ||
|
|
32deca2e92 | ||
|
|
ff1a618a93 | ||
|
|
0af79b729e | ||
|
|
76711febd1 | ||
|
|
81e545b720 | ||
|
|
d985b1215a | ||
|
|
157bec1777 | ||
|
|
1754076a56 | ||
|
|
d3132820a5 | ||
|
|
9f4d65891c | ||
|
|
653ec0ffab | ||
|
|
28228a3b57 | ||
|
|
e2470200da | ||
|
|
9c04e08b4d | ||
|
|
cfd7946097 | ||
|
|
bda1abab8d | ||
|
|
8356327c2b | ||
|
|
fb67b8edf9 | ||
|
|
7af2bcc9b0 | ||
|
|
8e83c86d88 | ||
|
|
53cf6cf17c | ||
|
|
8701f83922 | ||
|
|
cafac2338d | ||
|
|
7344ee23ef | ||
|
|
e5dfc62dce | ||
|
|
a0bf3459eb | ||
|
|
facec0fe76 | ||
|
|
11edd43af3 | ||
|
|
6fb09d73b1 | ||
|
|
27d0111a27 | ||
|
|
58367811a0 | ||
|
|
a3519ce1df | ||
|
|
812d00f490 | ||
|
|
5ac2211ef4 | ||
|
|
9c9bf0ed84 | ||
|
|
c013d656c8 | ||
|
|
0c0e172caf | ||
|
|
e38ed6574c | ||
|
|
267e297eca | ||
|
|
a733a59b8d | ||
|
|
1f76a88656 |
1
.github/BOTMETA.yml
vendored
1
.github/BOTMETA.yml
vendored
@@ -1,3 +1,4 @@
|
||||
---
|
||||
files:
|
||||
awx/ui/:
|
||||
labels: component:ui
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -135,9 +135,10 @@ use_dev_supervisor.txt
|
||||
|
||||
|
||||
# Ansible module tests
|
||||
awx_collection_test_venv/
|
||||
awx_collection/*.tar.gz
|
||||
awx_collection/galaxy.yml
|
||||
/awx_collection_test_venv/
|
||||
/awx_collection/*.tar.gz
|
||||
/awx_collection/galaxy.yml
|
||||
/sanity/
|
||||
|
||||
.idea/*
|
||||
*.unison.tmp
|
||||
|
||||
12
.yamllint
Normal file
12
.yamllint
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
ignore: |
|
||||
.tox
|
||||
awx/main/tests/data/inventory/plugins/**
|
||||
# vault files
|
||||
awx/main/tests/data/ansible_utils/playbooks/valid/vault.yml
|
||||
awx/ui/test/e2e/tests/smoke-vars.yml
|
||||
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
147
CHANGELOG.md
Normal file
147
CHANGELOG.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Changelog
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
- Added the ability to specify AZURE_PUBLIC_CLOUD (for e.g., Azure Government KeyVault support) for the Azure credential plugin https://github.com/ansible/awx/issues/5138
|
||||
- Added support for several additional parameters for Satellite dynamic inventory https://github.com/ansible/awx/pull/5598
|
||||
- Added a new field to jobs for tracking the date/time a job is cancelled https://github.com/ansible/awx/pull/5610
|
||||
- Made a series of additional optimizations to the callback receiver to further improve stdout write speed for running playbooks https://github.com/ansible/awx/pull/5677 https://github.com/ansible/awx/pull/5739
|
||||
- Updated AWX to be compatible with Helm 3.x (https://github.com/ansible/awx/pull/5776)
|
||||
- Optimized AWX's job dependency/scheduling code to drastically improve processing time in scenarios where there are many pending jobs scheduled simultaneously https://github.com/ansible/awx/issues/5154
|
||||
- Fixed a bug which could cause SCM authentication details (basic auth passwords) to be reported to external loggers in certain failure scenarios (e.g., when a git clone fails and ansible itself prints an error message to stdout) https://github.com/ansible/awx/pull/5812
|
||||
- Fixed a k8s installer bug that caused installs to fail in certain situations https://github.com/ansible/awx/issues/5574
|
||||
- Fixed a number of issues that caused analytics gathering and reporting to run more often than necessary https://github.com/ansible/awx/pull/5721
|
||||
- Fixed a bug in the AWX CLI that prevented JSON-type settings from saving properly https://github.com/ansible/awx/issues/5528
|
||||
- Improved support for fetching custom virtualenv dependencies when AWX is installed behind a proxy https://github.com/ansible/awx/pull/5805
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
- Fixed a bug that caused database migrations on Kubernetes installs to hang https://github.com/ansible/awx/pull/5579
|
||||
- Upgraded Python-level app dependencies in AWX virtual environment https://github.com/ansible/awx/pull/5407
|
||||
- Running jobs no longer block associated inventory updates https://github.com/ansible/awx/pull/5519
|
||||
- Fixed invalid_response SAML error https://github.com/ansible/awx/pull/5577
|
||||
- Optimized the callback receiver to drastically improve the write speed of stdout for parallel jobs (https://github.com/ansible/awx/pull/5618)
|
||||
|
||||
## 9.1.0 (Dec 17, 2019)
|
||||
- Added a command to generate a new SECRET_KEY and rekey the secrets in the database
|
||||
- Removed project update locking when jobs using it are running
|
||||
- Fixed slow queries for /api/v2/instances and /api/v2/instance_groups when smart inventories are used
|
||||
- Fixed a partial password disclosure when special characters existed in the RabbitMQ password (CVE-2019-19342)
|
||||
- Fixed hang in error handling for source control checkouts
|
||||
- Fixed an error on subsequent job runs that override the branch of a project on an instance that did not have a prior project checkout
|
||||
- Fixed an issue where jobs launched in isolated or container groups would incorrectly timeout
|
||||
- Fixed an incorrect link to instance groups documentation in the user interface
|
||||
- Fixed editing of inventory on Workflow templates
|
||||
- Fixed multiple issues with OAuth2 token cleanup system jobs
|
||||
- Fixed a bug that broke email notifications for workflow approval/deny https://github.com/ansible/awx/issues/5401
|
||||
- Updated SAML implementation to automatically login if authorization already exists
|
||||
- Updated AngularJS to 1.7.9 for CVE-2019-10768
|
||||
|
||||
## 9.0.1 (Nov 4, 2019)
|
||||
|
||||
- Fixed a bug in the installer that broke certain types of k8s installs https://github.com/ansible/awx/issues/5205
|
||||
|
||||
## 9.0.0 (Oct 31, 2019)
|
||||
|
||||
- Updated AWX images to use centos:8 as the parent image.
|
||||
- Updated to ansible-runner 1.4.4 to address various bugs.
|
||||
- Added oc and kubectl to the AWX images to support new container-based execution introduced in 8.0.0.
|
||||
- Added some optimizations to speed up the deletion of large Inventory Groups.
|
||||
- Fixed a bug that broke webhook launches for Job Templates that define a survey (https://github.com/ansible/awx/issues/5062).
|
||||
- Fixed a bug in the CLI which incorrectly parsed launch time arguments for `awx job_templates launch` and `awx workflow_job_templates launch` (https://github.com/ansible/awx/issues/5093).
|
||||
- Fixed a bug that caused inventory updates using "sourced from a project" to stop working (https://github.com/ansible/awx/issues/4750).
|
||||
- Fixed a bug that caused Slack notifications to sometimes show the wrong bot avatar (https://github.com/ansible/awx/pull/5125).
|
||||
- Fixed a bug that prevented the use of digits in Tower's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
|
||||
## 8.0.0 (Oct 21, 2019)
|
||||
|
||||
- The Ansible Tower Ansible modules have been migrated to a new official Ansible AWX collection: https://galaxy.ansible.com/awx/AWX
|
||||
Please note that this functionality is only supported in Ansible 2.9+
|
||||
- AWX now supports the ability to launch jobs from external webhooks (GitHub and GitLab integration are supported).
|
||||
- AWX now supports Container Groups, a new feature that allows you to schedule and run playbooks on single-use kubernetes pods on-demand.
|
||||
- AWX now supports sending notifications when Workflow steps are approved, denied, or time out.
|
||||
- AWX now records the user who approved or denied Workflow steps.
|
||||
- AWX now supports fetching Ansible Collections from private galaxy servers.
|
||||
- AWX now checks the user's ansible.cfg for paths where role/collections may live when running project updates.
|
||||
- AWX now uses PostgreSQL 10 by default.
|
||||
- AWX now warns more loudly about underlying AMQP connectivity issues (https://github.com/ansible/awx/pull/4857).
|
||||
- Added a few optimizations to drastically improve dashboard performance for larger AWX installs (installs with several hundred thousand jobs or more).
|
||||
- Updated to the latest version of Ansible's VMWare inventory script (which adds support for vmware_guest_facts).
|
||||
- Deprecated /api/v2/inventory_scripts/ (this endpoint - and the Custom Inventory Script feature - will be removed in a future release of AWX).
|
||||
- Fixed a bug which prevented Organization Admins from removing users from their own Organization (https://github.com/ansible/awx/issues/2979)
|
||||
- Fixed a bug which sometimes caused cluster nodes to fail to re-join with a cryptic error, "No instance found with the current cluster host id" (https://github.com/ansible/awx/issues/4294)
|
||||
- Fixed a bug that prevented the use of launch-time passphrases when using credential plugins (https://github.com/ansible/awx/pull/4807)
|
||||
- Fixed a bug that caused notifications assigned at the Organization level not to take effect for Workflows in that Organization (https://github.com/ansible/awx/issues/4712)
|
||||
- Fixed a bug which caused a notable amount of CPU overhead on RabbitMQ health checks (https://github.com/ansible/awx/pull/5009)
|
||||
- Fixed a bug which sometimes caused the <return> key to stop functioning in <textarea> elements (https://github.com/ansible/awx/issues/4192)
|
||||
- Fixed a bug which caused request contention when the same OAuth2.0 token was used in multiple simultaneous requests (https://github.com/ansible/awx/issues/4694)
|
||||
- Fixed a bug related to parsing multiple choice survey options (https://github.com/ansible/awx/issues/4452).
|
||||
- Fixed a bug that caused single-sign-on icons on the login page to fail to render in certain Windows browsers (https://github.com/ansible/awx/issues/3924)
|
||||
- Fixed a number of bugs that caused certain OAuth2 settings to not be properly respected, such as REFRESH_TOKEN_EXPIRE_SECONDS.
|
||||
- Fixed a number of bugs in the AWX CLI, including a bug which sometimes caused long lines of stdout output to be unexpectedly truncated.
|
||||
- Fixed a number of bugs on the job details UI which sometimes caused auto-scrolling stdout to become stuck.
|
||||
- Fixed a bug which caused LDAP authentication to fail if the TLD of the server URL contained digits (https://github.com/ansible/awx/issues/3646)
|
||||
- Fixed a bug which broke HashiCorp Vault integration on older versions of HashiCorp Vault.
|
||||
|
||||
## 7.0.0 (Sept 4, 2019)
|
||||
|
||||
- AWX now detects and installs Ansible Collections defined in your project (note - this feature only works in Ansible 2.9+) (https://github.com/ansible/awx/issues/2534)
|
||||
- AWX now includes an official command line client. Keep an eye out for a follow-up email on this mailing list for information on how to install it and try it out.
|
||||
- Added the ability to provide a specific SCM branch on jobs (https://github.com/ansible/awx/issues/282)
|
||||
- Added support for Workflow Approval Nodes, a new feature which allows you to add "pause and wait for approval" steps into your workflows (https://github.com/ansible/awx/issues/1206)
|
||||
- Added the ability to specify a specific HTTP method for webhook notifications (POST vs PUT) (https://github.com/ansible/awx/pull/4124)
|
||||
- Added the ability to specify a username and password for HTTP Basic Authorization for webhook notifications (https://github.com/ansible/awx/pull/4124)
|
||||
- Added support for customizing the text content of notifications (https://github.com/ansible/awx/issues/79)
|
||||
- Added the ability to enable and disable hosts in dynamic inventory (https://github.com/ansible/awx/pull/4420)
|
||||
- Added the description (if any) to the Job Template list (https://github.com/ansible/awx/issues/4359)
|
||||
- Added new metrics for instance hostnames and pending jobs to the /api/v2/metrics/ endpoint (https://github.com/ansible/awx/pull/4375)
|
||||
- Changed AWX's on/off toggle buttons to a non-text based style to simplify internationalization (https://github.com/ansible/awx/pull/4425)
|
||||
- Events emitted by ansible for adhoc commands are now sent to the external log aggregrator (https://github.com/ansible/awx/issues/4545)
|
||||
- Fixed a bug which allowed a user to make an organization credential in another organization without permissions to that organization (https://github.com/ansible/awx/pull/4483)
|
||||
- Fixed a bug that caused `extra_vars` on workflows to break when edited (https://github.com/ansible/awx/issues/4293)
|
||||
- Fixed a slow SQL query that caused performance issues when large numbers of groups exist (https://github.com/ansible/awx/issues/4461)
|
||||
- Fixed a few minor bugs in survey field validation (https://github.com/ansible/awx/pull/4509) (https://github.com/ansible/awx/pull/4479)
|
||||
- Fixed a bug that sometimes resulted in orphaned `ansible_runner_pi` directories in `/tmp` after playbook execution (https://github.com/ansible/awx/pull/4409)
|
||||
- Fixed a bug that caused the `is_system_auditor` flag in LDAP configuration to not work (https://github.com/ansible/awx/pull/4396)
|
||||
- Fixed a bug which caused schedules to disappear from the UI when toggled off (https://github.com/ansible/awx/pull/4378)
|
||||
- Fixed a bug that sometimes caused stdout content to contain extraneous blank lines in newer versions of Ansible (https://github.com/ansible/awx/pull/4391)
|
||||
- Updated to the latest Django security release, 2.2.4 (https://github.com/ansible/awx/pull/4410) (https://www.djangoproject.com/weblog/2019/aug/01/security-releases/)
|
||||
- Updated the default version of git to a version that includes support for x509 certificates (https://github.com/ansible/awx/issues/4362)
|
||||
- Removed the deprecated `credential` field from `/api/v2/workflow_job_templates/N/` (as part of the `/api/v1/` removal in prior AWX versions - https://github.com/ansible/awx/pull/4490).
|
||||
|
||||
## 6.1.0 (Jul 18, 2019)
|
||||
|
||||
- Updated AWX to use Django 2.2.2.
|
||||
- Updated the provided openstacksdk version to support new functionality (such as Nova scheduler_hints)
|
||||
- Added the ability to specify a custom cacert for the HashiCorp Vault credential plugin
|
||||
- Fixed a number of bugs related to path lookups for the HashiCorp Vault credential plugin
|
||||
- Fixed a bug which prevented signed SSH certificates from working, including the HashiCorp Vault Signed SSH backend
|
||||
- Fixed a bug which prevented custom logos from displaying on the login page (as a result of a new Content Security Policy in 6.0.0)
|
||||
- Fixed a bug which broke websocket connectivity in Apple Safari (as a result of a new Content Security Policy in 6.0.0)
|
||||
- Fixed a bug on the job output page that occasionally caused the "up" and "down" buttons to not load additional output
|
||||
- Fixed a bug on the job output page that caused quoted task names to display incorrectly
|
||||
|
||||
## 6.0.0 (Jul 1, 2019)
|
||||
|
||||
- Removed support for "Any" notification templates and their API endpoints e.g., /api/v2/job_templates/N/notification_templates/any/ (https://github.com/ansible/awx/issues/4022)
|
||||
- Fixed a bug which prevented credentials from properly being applied to inventory sources (https://github.com/ansible/awx/issues/4059)
|
||||
- Fixed a bug which can cause the task dispatcher to hang indefinitely when external logging support (e.g., Splunk, Logstash) is enabled (https://github.com/ansible/awx/issues/4181)
|
||||
- Fixed a bug which causes slow stdout display when running jobs against smart inventories. (https://github.com/ansible/awx/issues/3106)
|
||||
- Fixed a bug that caused SSL verification flags to fail to be respected for LDAP authentication in certain environments. (https://github.com/ansible/awx/pull/4190)
|
||||
- Added a simple Content Security Policy (https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) to restrict access to third-party resources in the browser. (https://github.com/ansible/awx/pull/4167)
|
||||
- Updated ovirt4 library dependencies to work with newer versions of oVirt (https://github.com/ansible/awx/issues/4138)
|
||||
|
||||
## 5.0.0 (Jun 21, 2019)
|
||||
|
||||
- Bump Django Rest Framework from 3.7.7 to 3.9.4
|
||||
- Bump setuptools / pip dependencies
|
||||
- Fixed bug where Recent Notification list would not appear
|
||||
- Added notifications on job start
|
||||
- Default to Ansible 2.8
|
||||
@@ -2,96 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Upgrades using Django migrations are not expected to work in AWX. As a result, to upgrade to a new version, it is necessary to export resources from the old AWX node and import them into a freshly-installed node with the new version. The recommended way to do this is to use the tower-cli send/receive feature.
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
This tool does __not__ support export/import of the following:
|
||||
* Logs/history
|
||||
* Credential passwords
|
||||
* LDAP/AWX config
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
### Install & Configure Tower-CLI
|
||||
|
||||
In terminal, pip install tower-cli (if you do not have pip already, install [here](https://pip.pypa.io/en/stable/installing/)):
|
||||
```
|
||||
$ pip install --upgrade ansible-tower-cli
|
||||
```
|
||||
|
||||
The AWX host URL, user, and password must be set for the AWX instance to be exported:
|
||||
```
|
||||
$ tower-cli config host http://<old-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
```
|
||||
|
||||
For more information on installing tower-cli look [here](http://tower-cli.readthedocs.io/en/latest/quickstart.html).
|
||||
|
||||
|
||||
### Export Resources
|
||||
|
||||
Export all objects
|
||||
|
||||
```$ tower-cli receive --all > assets.json```
|
||||
|
||||
|
||||
|
||||
### Teardown Old AWX
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
### Install New AWX version
|
||||
|
||||
If you are installing AWX as a dev container, pull down the latest code or version you want from GitHub, build
|
||||
the image locally, then start the container
|
||||
|
||||
```
|
||||
git pull # retrieve latest AWX changes from repository
|
||||
make docker-compose-build # build AWX image
|
||||
make docker-compose # run container
|
||||
```
|
||||
For other install methods, refer to the [Install.md](https://github.com/ansible/awx/blob/devel/INSTALL.md).
|
||||
|
||||
|
||||
### Import Resources
|
||||
|
||||
|
||||
Configure tower-cli for your new AWX host as shown earlier. Import from a JSON file named assets.json
|
||||
|
||||
```
|
||||
$ tower-cli config host http://<new-awx-host.example.com>
|
||||
$ tower-cli config username <user>
|
||||
$ tower-cli config password <pass>
|
||||
$ tower-cli send assets.json
|
||||
```
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Additional Info
|
||||
|
||||
If you have two running AWX hosts, it is possible to copy all assets from one instance to another
|
||||
|
||||
```$ tower-cli receive --tower-host old-awx-host.example.com --all | tower-cli send --tower-host new-awx-host.example.com```
|
||||
|
||||
|
||||
|
||||
#### More Granular Exports:
|
||||
|
||||
Export all credentials
|
||||
|
||||
```$ tower-cli receive --credential all > credentials.json```
|
||||
> Note: This exports the credentials with blank strings for passwords and secrets
|
||||
|
||||
Export a credential named "My Credential"
|
||||
|
||||
```$ tower-cli receive --credential "My Credential"```
|
||||
|
||||
#### More Granular Imports:
|
||||
|
||||
|
||||
You could import anything except an organization defined in a JSON file named assets.json
|
||||
|
||||
```$ tower-cli send --prevent organization assets.json```
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
|
||||
139
INSTALL.md
139
INSTALL.md
@@ -4,41 +4,45 @@ This document provides a guide for installing AWX.
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [Getting started](#getting-started)
|
||||
- [Clone the repo](#clone-the-repo)
|
||||
- [AWX branding](#awx-branding)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [System Requirements](#system-requirements)
|
||||
- [AWX Tunables](#awx-tunables)
|
||||
- [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
- [Official vs Building Images](#official-vs-building-images)
|
||||
- [OpenShift](#openshift)
|
||||
- [Prerequisites](#prerequisites-1)
|
||||
- [Deploying to Minishift](#deploying-to-minishift)
|
||||
- [Pre-build steps](#pre-build-steps)
|
||||
- [PostgreSQL](#postgresql)
|
||||
- [Start the build](#start-the-build)
|
||||
- [Post build](#post-build)
|
||||
- [Accessing AWX](#accessing-awx)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Prerequisites](#prerequisites-2)
|
||||
- [Pre-build steps](#pre-build-steps-1)
|
||||
- [Configuring Helm](#configuring-helm)
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
- [SSL Termination](#ssl-termination)
|
||||
- [Docker Compose](#docker-compose)
|
||||
- [Prerequisites](#prerequisites-3)
|
||||
- [Pre-build steps](#pre-build-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
- [Inventory variables](#inventory-variables)
|
||||
- [Installing AWX](#installing-awx)
|
||||
* [Getting started](#getting-started)
|
||||
+ [Clone the repo](#clone-the-repo)
|
||||
+ [AWX branding](#awx-branding)
|
||||
+ [Prerequisites](#prerequisites)
|
||||
+ [System Requirements](#system-requirements)
|
||||
+ [AWX Tunables](#awx-tunables)
|
||||
+ [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
+ [Official vs Building Images](#official-vs-building-images)
|
||||
* [Upgrading from previous versions](#upgrading-from-previous-versions)
|
||||
* [OpenShift](#openshift)
|
||||
+ [Prerequisites](#prerequisites-1)
|
||||
+ [Pre-install steps](#pre-install-steps)
|
||||
- [Deploying to Minishift](#deploying-to-minishift)
|
||||
- [PostgreSQL](#postgresql)
|
||||
+ [Run the installer](#run-the-installer)
|
||||
+ [Post-install](#post-install)
|
||||
+ [Accessing AWX](#accessing-awx)
|
||||
* [Kubernetes](#kubernetes)
|
||||
+ [Prerequisites](#prerequisites-2)
|
||||
+ [Pre-install steps](#pre-install-steps-1)
|
||||
+ [Configuring Helm](#configuring-helm)
|
||||
+ [Run the installer](#run-the-installer-1)
|
||||
+ [Post-install](#post-install-1)
|
||||
+ [Accessing AWX](#accessing-awx-1)
|
||||
+ [SSL Termination](#ssl-termination)
|
||||
* [Docker-Compose](#docker-compose)
|
||||
+ [Prerequisites](#prerequisites-3)
|
||||
+ [Pre-install steps](#pre-install-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
- [Inventory variables](#inventory-variables)
|
||||
- [Docker registry](#docker-registry)
|
||||
- [PostgreSQL](#postgresql-1)
|
||||
- [Proxy settings](#proxy-settings)
|
||||
- [Start the build](#start-the-build-2)
|
||||
- [Post build](#post-build-2)
|
||||
- [Accessing AWX](#accessing-awx-2)
|
||||
- [PostgreSQL](#postgresql-1)
|
||||
+ [Run the installer](#run-the-installer-2)
|
||||
+ [Post-install](#post-install-2)
|
||||
+ [Accessing AWX](#accessing-awx-2)
|
||||
|
||||
|
||||
## Getting started
|
||||
|
||||
### Clone the repo
|
||||
@@ -57,7 +61,7 @@ To install the assets, clone the `awx-logos` repo so that it is next to your `aw
|
||||
|
||||
Before you can run a deployment, you'll need the following installed in your local environment:
|
||||
|
||||
- [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) Requires Version 2.4+
|
||||
- [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) Requires Version 2.8+
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
+ A recent version
|
||||
- [docker](https://pypi.org/project/docker/) Python module
|
||||
@@ -114,12 +118,34 @@ If these variables are present then all deployments will use these hosted images
|
||||
|
||||
> Multiple versions are provided. `latest` always pulls the most recent. You may also select version numbers at different granularities: 1, 1.0, 1.0.1, 1.0.0.123
|
||||
|
||||
|
||||
## Upgrading from previous versions
|
||||
|
||||
Upgrading AWX involves rerunning the install playbook. Download a newer release from [https://github.com/ansible/awx/releases](https://github.com/ansible/awx/releases) and re-populate the inventory file with your customized variables.
|
||||
|
||||
For convenience, you can create a file called `vars.yml`:
|
||||
|
||||
```
|
||||
admin_password: 'adminpass'
|
||||
pg_password: 'pgpass'
|
||||
rabbitmq_password: 'rabbitpass'
|
||||
secret_key: 'mysupersecret'
|
||||
```
|
||||
|
||||
And pass it to the installer:
|
||||
|
||||
```
|
||||
$ ansible-playbook -i inventory install.yml -e @vars.yml
|
||||
```
|
||||
|
||||
## OpenShift
|
||||
|
||||
### Prerequisites
|
||||
|
||||
To complete a deployment to OpenShift, you will obviously need access to an OpenShift cluster. For demo and testing purposes, you can use [Minishift](https://github.com/minishift/minishift) to create a single node cluster running inside a virtual machine.
|
||||
|
||||
When using OpenShift for deploying AWX make sure you have correct privileges to add the security context 'privileged', otherwise the installation will fail. The privileged context is needed because of the use of [the bubblewrap tool](https://github.com/containers/bubblewrap) to add an additional layer of security when using containers.
|
||||
|
||||
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
|
||||
|
||||
The default resource requests per-deployment requires:
|
||||
@@ -131,9 +157,9 @@ This can be tuned by overriding the variables found in [/installer/roles/kuberne
|
||||
|
||||
For more detail on how resource requests are formed see: [https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources](https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources)
|
||||
|
||||
### Pre-build steps
|
||||
### Pre-install steps
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
Before starting the install, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*openshift_host*
|
||||
|
||||
@@ -195,20 +221,20 @@ By default, AWX will deploy a PostgreSQL pod inside of your cluster. You will ne
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_admin_password`, `pg_database`, and `pg_port` with the connection information. When setting `pg_hostname` the installer will assume you have configured the database in that location and will not launch the postgresql pod.
|
||||
|
||||
### Start the build
|
||||
### Run the installer
|
||||
|
||||
To start the build, you will pass two *extra* variables on the command line. The first is *openshift_password*, which is the password for the *openshift_user*, and the second is *docker_registry_password*, which is the password associated with *docker_registry_username*.
|
||||
To start the install, you will pass two *extra* variables on the command line. The first is *openshift_password*, which is the password for the *openshift_user*, and the second is *docker_registry_password*, which is the password associated with *docker_registry_username*.
|
||||
|
||||
If you're using the OpenShift internal registry, then you'll pass an access token for the *docker_registry_password* value, rather than a password. The `oc whoami -t` command will generate the required token, as long as you're logged into the cluster via `oc cluster login`.
|
||||
|
||||
To start the build and deployment, run the following (docker_registry_password is optional if using official images):
|
||||
Run the following command (docker_registry_password is optional if using official images):
|
||||
|
||||
```bash
|
||||
# Start the build and deployment
|
||||
# Start the install
|
||||
$ ansible-playbook -i inventory install.yml -e openshift_password=developer -e docker_registry_password=$(oc whoami -t)
|
||||
```
|
||||
|
||||
### Post build
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `oc get pods`:
|
||||
|
||||
@@ -325,9 +351,9 @@ This can be tuned by overriding the variables found in [/installer/roles/kuberne
|
||||
|
||||
For more detail on how resource requests are formed see: [https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
|
||||
### Pre-build steps
|
||||
### Pre-install steps
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section uncommenting when necessary. Make sure the openshift and standalone docker sections are commented out:
|
||||
Before starting the install process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section uncommenting when necessary. Make sure the openshift and standalone docker sections are commented out:
|
||||
|
||||
*kubernetes_context*
|
||||
|
||||
@@ -347,7 +373,7 @@ If you want the AWX installer to manage creating the database pod (rather than i
|
||||
|
||||
Newer Kubernetes clusters with RBAC enabled will need to make sure a service account is created, make sure to follow the instructions here [https://docs.helm.sh/using_helm/#role-based-access-control](https://docs.helm.sh/using_helm/#role-based-access-control)
|
||||
|
||||
### Start the build
|
||||
### Run the installer
|
||||
|
||||
After making changes to the `inventory` file use `ansible-playbook` to begin the install
|
||||
|
||||
@@ -355,7 +381,7 @@ After making changes to the `inventory` file use `ansible-playbook` to begin the
|
||||
$ ansible-playbook -i inventory install.yml
|
||||
```
|
||||
|
||||
### Post build
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `kubectl get pods --namespace awx` (replace awx with the namespace you used):
|
||||
|
||||
@@ -403,7 +429,7 @@ Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termi
|
||||
+ This also installs the `docker` Python module, which is incompatible with `docker-py`. If you have previously installed `docker-py`, please uninstall it.
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
### Pre-build steps
|
||||
### Pre-install steps
|
||||
|
||||
#### Deploying to a remote host
|
||||
|
||||
@@ -434,7 +460,7 @@ If you choose to use the official images then the remote host will be the one to
|
||||
|
||||
#### Inventory variables
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
Before starting the install process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*postgres_data_dir*
|
||||
|
||||
@@ -456,6 +482,10 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
> When using docker-compose, the `docker-compose.yml` file will be created there (default `/tmp/awxcompose`).
|
||||
|
||||
*custom_venv_dir*
|
||||
|
||||
> Adds the custom venv environments from the local host to be passed into the containers at install.
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
> If you're using a non trusted CA, provide a path where the untrusted Certs are stored on your Host.
|
||||
@@ -505,9 +535,9 @@ AWX requires access to a PostgreSQL database, and by default, one will be create
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_admin_password`, `pg_database`, and `pg_port` with the connection information.
|
||||
|
||||
### Start the build
|
||||
### Run the installer
|
||||
|
||||
If you are not pushing images to a Docker registry, start the build by running the following:
|
||||
If you are not pushing images to a Docker registry, start the install by running the following:
|
||||
|
||||
```bash
|
||||
# Set the working directory to installer
|
||||
@@ -527,7 +557,7 @@ $ cd installer
|
||||
$ ansible-playbook -i inventory -e docker_registry_password=password install.yml
|
||||
```
|
||||
|
||||
### Post build
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, Docker will report up to 5 running containers. If you chose to use an existing PostgresSQL database, then it will report 4. You can view the running containers using the `docker ps` command, as follows:
|
||||
|
||||
@@ -604,14 +634,3 @@ Added instance awx to tower
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
### Maintenance using docker-compose
|
||||
|
||||
After the installation, maintenance operations with docker-compose can be done by using the `docker-compose.yml` file created at the location pointed by `docker_compose_dir`.
|
||||
|
||||
Among the possible operations, you may:
|
||||
|
||||
- Stop AWX : `docker-compose stop`
|
||||
- Upgrade AWX : `docker-compose pull && docker-compose up --force-recreate`
|
||||
|
||||
See the [docker-compose documentation](https://docs.docker.com/compose/) for details.
|
||||
|
||||
94
Makefile
94
Makefile
@@ -26,6 +26,9 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0
|
||||
|
||||
# Determine appropriate shasum command
|
||||
UNAME_S := $(shell uname -s)
|
||||
@@ -100,7 +103,7 @@ clean-languages:
|
||||
find . -type f -regex ".*\.mo$$" -delete
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-dist
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
rm -rf awx/job_status
|
||||
@@ -116,6 +119,10 @@ clean-api:
|
||||
find . -type d -name "__pycache__" -delete
|
||||
rm -f awx/awx_test.sqlite3*
|
||||
rm -rf requirements/vendor
|
||||
rm -rf awx/projects
|
||||
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@@ -126,16 +133,16 @@ guard-%:
|
||||
|
||||
virtualenv: virtualenv_ansible virtualenv_awx
|
||||
|
||||
# virtualenv_* targets do not use --system-site-packages to prevent bugs installing packages
|
||||
# but Ansible venvs are expected to have this, so that must be done after venv creation
|
||||
virtualenv_ansible:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv -p python --system-site-packages $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed six packaging appdirs && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==36.0.1 && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==9.0.1; \
|
||||
virtualenv -p python $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -145,36 +152,46 @@ virtualenv_ansible_py3:
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/ansible; \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/ansible; \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# flit is needed for offline install of certain packages, specifically ptyprocess
|
||||
# it is needed for setup, but not always recognized as a setup dependency
|
||||
# similar to pip, setuptools, and wheel, these are all needed here as a bootstrapping issues
|
||||
virtualenv_awx:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed docutils==0.14; \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP) && \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) flit; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# --ignore-install flag is not used because *.txt files should specify exact versions
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -182,13 +199,13 @@ requirements_ansible_dev:
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements.txt requirements/requirements_local.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements.txt requirements/requirements_local.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
echo "include-system-site-packages = true" >> $(VENV_BASE)/awx/lib/python$(PYTHON_VERSION)/pyvenv.cfg
|
||||
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
|
||||
requirements_awx_dev:
|
||||
@@ -196,7 +213,7 @@ requirements_awx_dev:
|
||||
|
||||
requirements: requirements_ansible requirements_awx
|
||||
|
||||
requirements_dev: requirements requirements_awx_dev requirements_ansible_dev
|
||||
requirements_dev: requirements_awx requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
@@ -364,7 +381,7 @@ check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
awx-link:
|
||||
cp -R /tmp/awx.egg-info /awx_devel/ || true
|
||||
sed -i "s/placeholder/$(shell git describe --long | sed 's/\./\\./g')/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
sed -i "s/placeholder/$(shell cat VERSION)/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
cp -f /tmp/awx.egg-link /venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
@@ -381,7 +398,6 @@ test:
|
||||
prepare_collection_venv:
|
||||
rm -rf $(COLLECTION_VENV)
|
||||
mkdir $(COLLECTION_VENV)
|
||||
ln -s /usr/lib/python2.7/site-packages/ansible $(COLLECTION_VENV)/ansible
|
||||
$(VENV_BASE)/awx/bin/pip install --target=$(COLLECTION_VENV) git+https://github.com/ansible/tower-cli.git
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
@@ -392,16 +408,27 @@ test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
|
||||
test_collection_all: prepare_collection_venv test_collection flake8_collection
|
||||
|
||||
test_collection_sanity:
|
||||
rm -rf sanity
|
||||
mkdir -p sanity/ansible_collections/awx
|
||||
cp -Ra awx_collection sanity/ansible_collections/awx/awx # symlinks do not work
|
||||
cd sanity/ansible_collections/awx/awx && git init && git add . # requires both this file structure and a git repo, so there you go
|
||||
cd sanity/ansible_collections/awx/awx && ansible-test sanity
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-galaxy collection build awx_collection --output-path=awx_collection
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-galaxy collection install awx_collection/awx-awx-$(VERSION).tar.gz
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -609,28 +636,34 @@ docker-auth:
|
||||
echo "$(IMAGE_REPOSITORY_AUTH)" | docker login -u oauth2accesstoken --password-stdin $(IMAGE_REPOSITORY_BASE); \
|
||||
fi;
|
||||
|
||||
# This directory is bind-mounted inside of the development container and
|
||||
# needs to be pre-created for permissions to be set correctly. Otherwise,
|
||||
# Docker will create this directory as root.
|
||||
awx/projects:
|
||||
@mkdir -p $@
|
||||
|
||||
# Docker isolated rampart
|
||||
docker-compose-isolated:
|
||||
docker-compose-isolated: awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
docker-compose: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
docker-compose-cluster: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
docker-compose-credential-plugins: docker-auth
|
||||
docker-compose-credential-plugins: docker-auth awx/projects
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx
|
||||
|
||||
docker-compose-test: docker-auth
|
||||
docker-compose-test: docker-auth awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
|
||||
docker-compose-runtest:
|
||||
docker-compose-runtest: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger:
|
||||
docker-compose-build-swagger: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
|
||||
detect-schema-change: genschema
|
||||
@@ -638,7 +671,7 @@ detect-schema-change: genschema
|
||||
# Ignore differences in whitespace with -b
|
||||
diff -u -b reference-schema.json schema.json
|
||||
|
||||
docker-compose-clean:
|
||||
docker-compose-clean: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm -w /awx_devel --service-ports awx make clean
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
@@ -647,7 +680,6 @@ docker-compose-build: awx-devel-build
|
||||
# Base development image build
|
||||
awx-devel-build:
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:devel \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
docker tag ansible/awx_devel $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
@@ -667,10 +699,10 @@ docker-clean:
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth
|
||||
docker-compose-elk: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
docker-compose-cluster-elk: docker-auth awx/projects
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
|
||||
@@ -24,31 +24,18 @@ except ImportError: # pragma: no cover
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django
|
||||
from django.db.backends.base import schema
|
||||
from django.db.backends.utils import names_digest
|
||||
import django # noqa: F401
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.backends.utils import names_digest
|
||||
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# This line exists to make sure we don't regress on FIPS support if we
|
||||
# upgrade Django; if you're upgrading Django and see this error,
|
||||
# update the version check below, and confirm that FIPS still works.
|
||||
# If operating in a FIPS environment, `hashlib.md5()` will raise a `ValueError`,
|
||||
# but will support the `usedforsecurity` keyword on RHEL and Centos systems.
|
||||
|
||||
# Keep an eye on https://code.djangoproject.com/ticket/28401
|
||||
target_version = '2.2.4'
|
||||
if django.__version__ != target_version:
|
||||
raise RuntimeError(
|
||||
"Django version other than {target} detected: {current}. "
|
||||
"Overriding `names_digest` is known to work for Django {target} "
|
||||
"and may not work in other Django versions.".format(target=target_version,
|
||||
current=django.__version__)
|
||||
)
|
||||
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
@@ -86,7 +73,14 @@ def oauth2_getattribute(self, attr):
|
||||
# Custom method to override
|
||||
# oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__
|
||||
from django.conf import settings
|
||||
val = settings.OAUTH2_PROVIDER.get(attr)
|
||||
val = None
|
||||
if 'migrate' not in sys.argv:
|
||||
# certain Django OAuth Toolkit migrations actually reference
|
||||
# setting lookups for references to model classes (e.g.,
|
||||
# oauth2_settings.REFRESH_TOKEN_MODEL)
|
||||
# If we're doing an OAuth2 setting lookup *while running* a migration,
|
||||
# don't do our usual "Configure Tower in Tower" database setting lookup
|
||||
val = settings.OAUTH2_PROVIDER.get(attr)
|
||||
if val is None:
|
||||
val = object.__getattribute__(self, attr)
|
||||
return val
|
||||
|
||||
@@ -62,3 +62,15 @@ register(
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
register(
|
||||
'LOGIN_REDIRECT_OVERRIDE',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
required=False,
|
||||
default='',
|
||||
label=_('Login redirect override URL'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. '
|
||||
'If blank, users will be sent to the Tower login page.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ from functools import reduce
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
@@ -63,19 +63,19 @@ class TypeFilterBackend(BaseFilterBackend):
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
def get_fields_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and also the revised lookup path
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of field at the end of the line as well as a corrected
|
||||
path, for special cases we do substitutions
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
'''
|
||||
# Store of all the fields used to detect repeats
|
||||
field_set = set([])
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
@@ -111,13 +111,24 @@ def get_field_from_path(model, path):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_set:
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_set.add(field)
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field, '__'.join(new_parts)
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
'''
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
'''
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
@@ -133,7 +144,11 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
'regex', 'iregex', 'gt', 'gte', 'lt', 'lte', 'in',
|
||||
'isnull', 'search')
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_WHITELIST = (CharField, IntegerField, BooleanField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
@@ -147,11 +162,16 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field, new_lookup
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_text(value)
|
||||
@@ -182,7 +202,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field, new_lookup = self.get_field_from_lookup(model, lookup)
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = (not all(isinstance(f, self.NO_DUPLICATES_WHITELIST) for f in field_list))
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
@@ -211,10 +234,10 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
@@ -225,6 +248,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
@@ -256,9 +280,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys = self.value_to_python(queryset.model, key, force_text(value))
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_text(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
@@ -282,7 +309,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key = self.value_to_python(queryset.model, key, value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
@@ -332,7 +361,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
q = Q(**{k:v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args).distinct()
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
|
||||
@@ -574,7 +574,7 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Verify we have permission to add the object as given.
|
||||
if not request.user.can_access(self.model, 'add', serializer.initial_data):
|
||||
if not request.user.can_access(self.model, 'add', serializer.validated_data):
|
||||
raise PermissionDenied()
|
||||
|
||||
# save the object through the serializer, reload and returned the saved
|
||||
|
||||
@@ -158,9 +158,16 @@ class Metadata(metadata.SimpleMetadata):
|
||||
isinstance(field, JSONField) or
|
||||
isinstance(model_field, JSONField) or
|
||||
isinstance(field, DRFJSONField) or
|
||||
isinstance(getattr(field, 'model_field', None), JSONField)
|
||||
isinstance(getattr(field, 'model_field', None), JSONField) or
|
||||
field.field_name == 'credential_passwords'
|
||||
):
|
||||
field_info['type'] = 'json'
|
||||
elif (
|
||||
isinstance(field, ManyRelatedField) and
|
||||
field.field_name == 'credentials'
|
||||
# launch-time credentials
|
||||
):
|
||||
field_info['type'] = 'list_of_ids'
|
||||
elif isinstance(model_field, BooleanField):
|
||||
field_info['type'] = 'boolean'
|
||||
|
||||
|
||||
@@ -98,26 +98,19 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'organization_id',
|
||||
'kind',
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
|
||||
'total_hosts',
|
||||
'hosts_with_active_failures',
|
||||
'total_groups',
|
||||
'groups_with_active_failures',
|
||||
'has_inventory_sources'),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -125,7 +118,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
|
||||
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
|
||||
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
|
||||
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
|
||||
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
@@ -139,8 +132,9 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
}
|
||||
|
||||
|
||||
@@ -718,7 +712,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'failed', 'started', 'finished', 'elapsed', 'job_args',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
'result_traceback', 'event_processing_finished')
|
||||
@@ -1472,7 +1466,7 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdate
|
||||
fields = ('*', 'project', 'job_type', '-controller_node')
|
||||
fields = ('*', 'project', 'job_type', 'job_tags', '-controller_node')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ProjectUpdateSerializer, self).get_related(obj)
|
||||
@@ -1548,20 +1542,15 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'insights_credential', 'pending_deletion',)
|
||||
'has_inventory_sources', 'total_inventory_sources',
|
||||
'inventory_sources_with_failures', 'insights_credential',
|
||||
'pending_deletion',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySerializer, self).get_related(obj)
|
||||
@@ -1643,6 +1632,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin']
|
||||
|
||||
has_active_failures = serializers.SerializerMethodField()
|
||||
has_inventory_sources = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
|
||||
@@ -1756,6 +1748,14 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ret['last_job_host_summary'] = None
|
||||
return ret
|
||||
|
||||
def get_has_active_failures(self, obj):
|
||||
return bool(
|
||||
obj.last_job_host_summary and obj.last_job_host_summary.failed
|
||||
)
|
||||
|
||||
def get_has_inventory_sources(self, obj):
|
||||
return obj.inventory_sources.exists()
|
||||
|
||||
|
||||
class AnsibleFactsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -1768,17 +1768,10 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
show_capabilities = ['copy', 'edit', 'delete']
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
fields = ('*', 'inventory', 'variables', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources')
|
||||
fields = ('*', 'inventory', 'variables')
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
|
||||
@@ -2456,12 +2449,18 @@ class CredentialTypeSerializer(BaseSerializer):
|
||||
raise PermissionDenied(
|
||||
detail=_("Modifications not allowed for managed credential types")
|
||||
)
|
||||
|
||||
old_inputs = {}
|
||||
if self.instance:
|
||||
old_inputs = copy.deepcopy(self.instance.inputs)
|
||||
|
||||
ret = super(CredentialTypeSerializer, self).validate(attrs)
|
||||
|
||||
if self.instance and self.instance.credentials.exists():
|
||||
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
|
||||
if 'inputs' in attrs and old_inputs != self.instance.inputs:
|
||||
raise PermissionDenied(
|
||||
detail= _("Modifications to inputs are not allowed for credential types that are in use")
|
||||
)
|
||||
ret = super(CredentialTypeSerializer, self).validate(attrs)
|
||||
|
||||
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
|
||||
raise serializers.ValidationError({
|
||||
@@ -2816,7 +2815,7 @@ class JobTemplateMixin(object):
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
@@ -3678,7 +3677,7 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3717,8 +3716,8 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'all_parents_must_converge', 'do_not_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -3826,7 +3825,7 @@ class JobEventSerializer(BaseSerializer):
|
||||
model = JobEvent
|
||||
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
|
||||
'event_display', 'event_data', 'event_level', 'failed',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
|
||||
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
|
||||
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
|
||||
'verbosity')
|
||||
|
||||
@@ -3835,13 +3834,9 @@ class JobEventSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
|
||||
))
|
||||
if obj.parent_id:
|
||||
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
|
||||
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
|
||||
if obj.host_id:
|
||||
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
|
||||
if obj.hosts.exists():
|
||||
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -3867,26 +3862,6 @@ class JobEventSerializer(BaseSerializer):
|
||||
return data
|
||||
|
||||
|
||||
class JobEventWebSocketSerializer(JobEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = JobEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'job_events'
|
||||
|
||||
|
||||
class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
stdout = serializers.SerializerMethodField()
|
||||
event_data = serializers.SerializerMethodField()
|
||||
@@ -3918,26 +3893,6 @@ class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
return {}
|
||||
|
||||
|
||||
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ProjectUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'project_update_events'
|
||||
|
||||
|
||||
class AdHocCommandEventSerializer(BaseSerializer):
|
||||
|
||||
event_display = serializers.CharField(source='get_event_display', read_only=True)
|
||||
@@ -3969,26 +3924,6 @@ class AdHocCommandEventSerializer(BaseSerializer):
|
||||
return data
|
||||
|
||||
|
||||
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = AdHocCommandEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'ad_hoc_command_events'
|
||||
|
||||
|
||||
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
@@ -4004,26 +3939,6 @@ class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
|
||||
return res
|
||||
|
||||
|
||||
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = InventoryUpdateEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'inventory_update_events'
|
||||
|
||||
|
||||
class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
class Meta:
|
||||
@@ -4039,26 +3954,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
return res
|
||||
|
||||
|
||||
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
|
||||
created = serializers.SerializerMethodField()
|
||||
modified = serializers.SerializerMethodField()
|
||||
event_name = serializers.CharField(source='event')
|
||||
group_name = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = SystemJobEvent
|
||||
fields = ('*', 'event_name', 'group_name',)
|
||||
|
||||
def get_created(self, obj):
|
||||
return obj.created.isoformat()
|
||||
|
||||
def get_modified(self, obj):
|
||||
return obj.modified.isoformat()
|
||||
|
||||
def get_group_name(self, obj):
|
||||
return 'system_job_events'
|
||||
|
||||
|
||||
class JobLaunchSerializer(BaseSerializer):
|
||||
|
||||
# Representational fields
|
||||
@@ -4338,13 +4233,30 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
error_list = []
|
||||
collected_messages = []
|
||||
|
||||
def check_messages(messages):
|
||||
for message_type in messages:
|
||||
if message_type not in ('message', 'body'):
|
||||
error_list.append(_("Message type '{}' invalid, must be either 'message' or 'body'").format(message_type))
|
||||
continue
|
||||
message = messages[message_type]
|
||||
if message is None:
|
||||
continue
|
||||
if not isinstance(message, str):
|
||||
error_list.append(_("Expected string for '{}', found {}, ").format(message_type, type(message)))
|
||||
continue
|
||||
if message_type == 'message':
|
||||
if '\n' in message:
|
||||
error_list.append(_("Messages cannot contain newlines (found newline in {} event)".format(event)))
|
||||
continue
|
||||
collected_messages.append(message)
|
||||
|
||||
# Validate structure / content types
|
||||
if not isinstance(messages, dict):
|
||||
error_list.append(_("Expected dict for 'messages' field, found {}".format(type(messages))))
|
||||
else:
|
||||
for event in messages:
|
||||
if event not in ['started', 'success', 'error']:
|
||||
error_list.append(_("Event '{}' invalid, must be one of 'started', 'success', or 'error'").format(event))
|
||||
if event not in ('started', 'success', 'error', 'workflow_approval'):
|
||||
error_list.append(_("Event '{}' invalid, must be one of 'started', 'success', 'error', or 'workflow_approval'").format(event))
|
||||
continue
|
||||
event_messages = messages[event]
|
||||
if event_messages is None:
|
||||
@@ -4352,21 +4264,21 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
if not isinstance(event_messages, dict):
|
||||
error_list.append(_("Expected dict for event '{}', found {}").format(event, type(event_messages)))
|
||||
continue
|
||||
for message_type in event_messages:
|
||||
if message_type not in ['message', 'body']:
|
||||
error_list.append(_("Message type '{}' invalid, must be either 'message' or 'body'").format(message_type))
|
||||
continue
|
||||
message = event_messages[message_type]
|
||||
if message is None:
|
||||
continue
|
||||
if not isinstance(message, str):
|
||||
error_list.append(_("Expected string for '{}', found {}, ").format(message_type, type(message)))
|
||||
continue
|
||||
if message_type == 'message':
|
||||
if '\n' in message:
|
||||
error_list.append(_("Messages cannot contain newlines (found newline in {} event)".format(event)))
|
||||
if event == 'workflow_approval':
|
||||
for subevent in event_messages:
|
||||
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
|
||||
error_list.append(_("Workflow Approval event '{}' invalid, must be one of "
|
||||
"'running', 'approved', 'timed_out', or 'denied'").format(subevent))
|
||||
continue
|
||||
collected_messages.append(message)
|
||||
subevent_messages = event_messages[subevent]
|
||||
if subevent_messages is None:
|
||||
continue
|
||||
if not isinstance(subevent_messages, dict):
|
||||
error_list.append(_("Expected dict for workflow approval event '{}', found {}").format(subevent, type(subevent_messages)))
|
||||
continue
|
||||
check_messages(subevent_messages)
|
||||
else:
|
||||
check_messages(event_messages)
|
||||
|
||||
# Subclass to return name of undefined field
|
||||
class DescriptiveUndefined(StrictUndefined):
|
||||
@@ -4497,8 +4409,18 @@ class NotificationSerializer(BaseSerializer):
|
||||
'notification_type', 'recipients', 'subject', 'body')
|
||||
|
||||
def get_body(self, obj):
|
||||
if obj.notification_type == 'webhook' and 'body' in obj.body:
|
||||
return obj.body['body']
|
||||
if obj.notification_type in ('webhook', 'pagerduty'):
|
||||
if isinstance(obj.body, dict):
|
||||
if 'body' in obj.body:
|
||||
return obj.body['body']
|
||||
elif isinstance(obj.body, str):
|
||||
# attempt to load json string
|
||||
try:
|
||||
potential_body = json.loads(obj.body)
|
||||
if isinstance(potential_body, dict):
|
||||
return potential_body
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return obj.body
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -4631,6 +4553,10 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
|
||||
|
||||
if isinstance(obj.unified_job_template, SystemJobTemplate):
|
||||
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
|
||||
|
||||
if 'inventory' in summary_fields:
|
||||
return summary_fields
|
||||
|
||||
@@ -4774,6 +4700,18 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
|
||||
if self.instance and self.instance.controller_id is not None:
|
||||
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
def validate_policy_instance_percentage(self, value):
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
def validate_policy_instance_minimum(self, value):
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
def validate_name(self, value):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Cancel Inventory Update
|
||||
|
||||
Make a GET request to this resource to determine if the inventory update can be
|
||||
cancelled. The response will include the following field:
|
||||
canceled. The response will include the following field:
|
||||
|
||||
* `can_cancel`: Indicates whether this update can be canceled (boolean,
|
||||
read-only)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% ifmeth GET %}
|
||||
# Determine if a Job can be cancelled
|
||||
# Determine if a Job can be canceled
|
||||
|
||||
Make a GET request to this resource to determine if the job can be cancelled.
|
||||
Make a GET request to this resource to determine if the job can be canceled.
|
||||
The response will include the following field:
|
||||
|
||||
* `can_cancel`: Indicates whether this job can be canceled (boolean, read-only)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Cancel Project Update
|
||||
|
||||
Make a GET request to this resource to determine if the project update can be
|
||||
cancelled. The response will include the following field:
|
||||
canceled. The response will include the following field:
|
||||
|
||||
* `can_cancel`: Indicates whether this update can be canceled (boolean,
|
||||
read-only)
|
||||
|
||||
@@ -72,17 +72,17 @@ from awx.api.generics import (
|
||||
SubListDestroyAPIView
|
||||
)
|
||||
from awx.api.versioning import reverse
|
||||
from awx.conf.license import get_license
|
||||
from awx.main import models
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
extract_ansible_vars,
|
||||
get_awx_version,
|
||||
get_awx_http_client_headers,
|
||||
get_object_or_400,
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ignore_inventory_computed_fields
|
||||
ignore_inventory_computed_fields,
|
||||
set_environ
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
@@ -102,7 +102,7 @@ from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.api.views.mixin import (
|
||||
ControlledByScmMixin, InstanceGroupMembershipMixin,
|
||||
OrganizationCountsMixin, RelatedJobsPreventDeleteMixin,
|
||||
UnifiedJobDeletionMixin,
|
||||
UnifiedJobDeletionMixin, NoTruncateMixin,
|
||||
)
|
||||
from awx.api.views.organization import ( # noqa
|
||||
OrganizationList,
|
||||
@@ -205,20 +205,15 @@ class DashboardView(APIView):
|
||||
'failed': ec2_inventory_failed.count()}
|
||||
|
||||
user_groups = get_user_queryset(request.user, models.Group)
|
||||
groups_job_failed = (
|
||||
models.Group.objects.filter(hosts_with_active_failures__gt=0) | models.Group.objects.filter(groups_with_active_failures__gt=0)
|
||||
).count()
|
||||
groups_inventory_failed = models.Group.objects.filter(inventory_sources__last_job_failed=True).count()
|
||||
data['groups'] = {'url': reverse('api:group_list', request=request),
|
||||
'failures_url': reverse('api:group_list', request=request) + "?has_active_failures=True",
|
||||
'total': user_groups.count(),
|
||||
'job_failed': groups_job_failed,
|
||||
'inventory_failed': groups_inventory_failed}
|
||||
|
||||
user_hosts = get_user_queryset(request.user, models.Host)
|
||||
user_hosts_failed = user_hosts.filter(has_active_failures=True)
|
||||
user_hosts_failed = user_hosts.filter(last_job_host_summary__failed=True)
|
||||
data['hosts'] = {'url': reverse('api:host_list', request=request),
|
||||
'failures_url': reverse('api:host_list', request=request) + "?has_active_failures=True",
|
||||
'failures_url': reverse('api:host_list', request=request) + "?last_job_host_summary__failed=True",
|
||||
'total': user_hosts.count(),
|
||||
'failed': user_hosts_failed.count()}
|
||||
|
||||
@@ -383,6 +378,13 @@ class InstanceGroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAP
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
permission_classes = (InstanceGroupTowerPermission,)
|
||||
|
||||
def update_raw_data(self, data):
|
||||
if self.get_object().is_containerized:
|
||||
data.pop('policy_instance_percentage', None)
|
||||
data.pop('policy_instance_minimum', None)
|
||||
data.pop('policy_instance_list', None)
|
||||
return super(InstanceGroupDetail, self).update_raw_data(data)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
if instance.controller is not None:
|
||||
@@ -568,6 +570,7 @@ class TeamUsersList(BaseUsersList):
|
||||
serializer_class = serializers.UserSerializer
|
||||
parent_model = models.Team
|
||||
relationship = 'member_role.members'
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class TeamRolesList(SubListAttachDetachAPIView):
|
||||
@@ -904,6 +907,7 @@ class UserList(ListCreateAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
permission_classes = (UserPermission,)
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class UserMeList(ListAPIView):
|
||||
@@ -911,6 +915,7 @@ class UserMeList(ListAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
name = _('Me')
|
||||
ordering = ('username',)
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.filter(pk=self.request.user.pk)
|
||||
@@ -1254,6 +1259,7 @@ class CredentialOwnerUsersList(SubListAPIView):
|
||||
serializer_class = serializers.UserSerializer
|
||||
parent_model = models.Credential
|
||||
relationship = 'admin_role.members'
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class CredentialOwnerTeamsList(SubListAPIView):
|
||||
@@ -1375,6 +1381,7 @@ class CredentialExternalTest(SubDetailAPIView):
|
||||
|
||||
model = models.Credential
|
||||
serializer_class = serializers.EmptySerializer
|
||||
obj_permission_type = 'use'
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -1600,7 +1607,8 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
def _call_insights_api(self, url, session, headers):
|
||||
try:
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
res = session.get(url, headers=headers, timeout=120)
|
||||
except requests.exceptions.SSLError:
|
||||
raise BadGateway(_('SSLError while trying to connect to {}').format(url))
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -1632,18 +1640,6 @@ class HostInsights(GenericAPIView):
|
||||
|
||||
return session
|
||||
|
||||
def _get_headers(self):
|
||||
license = get_license(show_key=False).get('license_type', 'UNLICENSED')
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': '{} {} ({})'.format(
|
||||
'AWX' if license == 'open' else 'Red Hat Ansible Tower',
|
||||
get_awx_version(),
|
||||
license
|
||||
)
|
||||
}
|
||||
|
||||
return headers
|
||||
|
||||
def _get_platform_info(self, host, session, headers):
|
||||
url = '{}/api/inventory/v1/hosts?insights_id={}'.format(
|
||||
@@ -1710,7 +1706,7 @@ class HostInsights(GenericAPIView):
|
||||
username = cred.get_input('username', default='')
|
||||
password = cred.get_input('password', default='')
|
||||
session = self._get_session(username, password)
|
||||
headers = self._get_headers()
|
||||
headers = get_awx_http_client_headers()
|
||||
|
||||
data = self._get_insights(host, session, headers)
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
@@ -2136,13 +2132,22 @@ class InventorySourceHostsList(HostRelatedSearchMixin, SubListDestroyAPIView):
|
||||
def perform_list_destroy(self, instance_list):
|
||||
inv_source = self.get_parent_object()
|
||||
with ignore_inventory_computed_fields():
|
||||
# Activity stream doesn't record disassociation here anyway
|
||||
# no signals-related reason to not bulk-delete
|
||||
models.Host.groups.through.objects.filter(
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
if not settings.ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
# job host summary deletion necessary to avoid deadlock
|
||||
models.JobHostSummary.objects.filter(host__inventory_sources=inv_source).update(host=None)
|
||||
models.Host.objects.filter(inventory_sources=inv_source).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy([])
|
||||
else:
|
||||
# Advance delete of group-host memberships to prevent deadlock
|
||||
# Activity stream doesn't record disassociation here anyway
|
||||
# no signals-related reason to not bulk-delete
|
||||
models.Host.groups.through.objects.filter(
|
||||
host__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceHostsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2157,12 +2162,19 @@ class InventorySourceGroupsList(SubListDestroyAPIView):
|
||||
def perform_list_destroy(self, instance_list):
|
||||
inv_source = self.get_parent_object()
|
||||
with ignore_inventory_computed_fields():
|
||||
# Same arguments for bulk delete as with host list
|
||||
models.Group.hosts.through.objects.filter(
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id, True)
|
||||
if not settings.ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
models.Group.objects.filter(inventory_sources=inv_source).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy([])
|
||||
else:
|
||||
# Advance delete of group-host memberships to prevent deadlock
|
||||
# Same arguments for bulk delete as with host list
|
||||
models.Group.hosts.through.objects.filter(
|
||||
group__inventory_sources=inv_source
|
||||
).delete()
|
||||
r = super(InventorySourceGroupsList, self).perform_list_destroy(instance_list)
|
||||
update_inventory_computed_fields.delay(inv_source.inventory_id)
|
||||
return r
|
||||
|
||||
|
||||
@@ -2534,7 +2546,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if not isinstance(val, allow_types):
|
||||
return Response(dict(error=_("'{field_name}' in survey question {idx} expected to be {type_label}.").format(
|
||||
field_name=field_name, type_label=type_label, **context
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if survey_item['variable'] in variable_set:
|
||||
return Response(dict(error=_("'variable' '%(item)s' duplicated in survey question %(survey)s.") % {
|
||||
'item': survey_item['variable'], 'survey': str(idx)}), status=status.HTTP_400_BAD_REQUEST)
|
||||
@@ -2549,7 +2561,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
"'{survey_item[type]}' in survey question {idx} is not one of '{allowed_types}' allowed question types."
|
||||
).format(
|
||||
allowed_types=', '.join(JobTemplateSurveySpec.ALLOWED_TYPES.keys()), **context
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if 'default' in survey_item and survey_item['default'] != '':
|
||||
if not isinstance(survey_item['default'], JobTemplateSurveySpec.ALLOWED_TYPES[qtype]):
|
||||
type_label = 'string'
|
||||
@@ -2567,7 +2579,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if survey_item[key] is not None and (not isinstance(survey_item[key], int)):
|
||||
return Response(dict(error=_(
|
||||
"The {min_or_max} limit in survey question {idx} expected to be integer."
|
||||
).format(min_or_max=key, **context)))
|
||||
).format(min_or_max=key, **context)), status=status.HTTP_400_BAD_REQUEST)
|
||||
# if it's a multiselect or multiple choice, it must have coices listed
|
||||
# choices and defualts must come in as strings seperated by /n characters.
|
||||
if qtype == 'multiselect' or qtype == 'multiplechoice':
|
||||
@@ -2577,7 +2589,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
else:
|
||||
return Response(dict(error=_(
|
||||
"Survey question {idx} of type {survey_item[type]} must specify choices.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
# If there is a default string split it out removing extra /n characters.
|
||||
# Note: There can still be extra newline characters added in the API, these are sanitized out using .strip()
|
||||
if 'default' in survey_item:
|
||||
@@ -2591,11 +2603,11 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
if len(list_of_defaults) > 1:
|
||||
return Response(dict(error=_(
|
||||
"Multiple Choice (Single Select) can only have one default value.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
if any(item not in survey_item['choices'] for item in list_of_defaults):
|
||||
return Response(dict(error=_(
|
||||
"Default choice must be answered from the choices listed.".format(**context)
|
||||
)))
|
||||
)), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Process encryption substitution
|
||||
if ("default" in survey_item and isinstance(survey_item['default'], str) and
|
||||
@@ -3253,7 +3265,7 @@ class WorkflowJobRelaunch(GenericAPIView):
|
||||
jt = obj.job_template
|
||||
if not jt:
|
||||
raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.'))
|
||||
elif not jt.inventory or min(jt.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count():
|
||||
raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.'))
|
||||
new_workflow_job = obj.create_relaunch_workflow_job()
|
||||
new_workflow_job.signal_start()
|
||||
@@ -3762,18 +3774,12 @@ class JobHostSummaryDetail(RetrieveAPIView):
|
||||
serializer_class = serializers.JobHostSummarySerializer
|
||||
|
||||
|
||||
class JobEventList(ListAPIView):
|
||||
class JobEventList(NoTruncateMixin, ListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
if self.request.query_params.get('no_truncate'):
|
||||
context.update(no_truncate=True)
|
||||
return context
|
||||
|
||||
|
||||
class JobEventDetail(RetrieveAPIView):
|
||||
|
||||
@@ -3786,7 +3792,7 @@ class JobEventDetail(RetrieveAPIView):
|
||||
return context
|
||||
|
||||
|
||||
class JobEventChildrenList(SubListAPIView):
|
||||
class JobEventChildrenList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
@@ -3810,8 +3816,14 @@ class JobEventHostsList(HostRelatedSearchMixin, SubListAPIView):
|
||||
relationship = 'hosts'
|
||||
name = _('Job Event Hosts List')
|
||||
|
||||
def get_queryset(self):
|
||||
parent_event = self.get_parent_object()
|
||||
self.check_parent_access(parent_event)
|
||||
qs = self.request.user.get_queryset(self.model).filter(job_events_as_primary_host=parent_event)
|
||||
return qs
|
||||
|
||||
class BaseJobEventsList(SubListAPIView):
|
||||
|
||||
class BaseJobEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
model = models.JobEvent
|
||||
serializer_class = serializers.JobEventSerializer
|
||||
@@ -3832,8 +3844,7 @@ class HostJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
parent_obj = self.get_parent_object()
|
||||
self.check_parent_access(parent_obj)
|
||||
qs = self.request.user.get_queryset(self.model).filter(
|
||||
Q(host=parent_obj) | Q(hosts=parent_obj)).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).filter(host=parent_obj)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3849,9 +3860,7 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
qs = job.job_events
|
||||
qs = qs.select_related('host')
|
||||
qs = qs.prefetch_related('hosts', 'children')
|
||||
qs = job.job_events.select_related('host').order_by('start_line')
|
||||
return qs.all()
|
||||
|
||||
|
||||
@@ -4007,18 +4016,12 @@ class AdHocCommandRelaunch(GenericAPIView):
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class AdHocCommandEventList(ListAPIView):
|
||||
class AdHocCommandEventList(NoTruncateMixin, ListAPIView):
|
||||
|
||||
model = models.AdHocCommandEvent
|
||||
serializer_class = serializers.AdHocCommandEventSerializer
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
if self.request.query_params.get('no_truncate'):
|
||||
context.update(no_truncate=True)
|
||||
return context
|
||||
|
||||
|
||||
class AdHocCommandEventDetail(RetrieveAPIView):
|
||||
|
||||
@@ -4031,7 +4034,7 @@ class AdHocCommandEventDetail(RetrieveAPIView):
|
||||
return context
|
||||
|
||||
|
||||
class BaseAdHocCommandEventsList(SubListAPIView):
|
||||
class BaseAdHocCommandEventsList(NoTruncateMixin, SubListAPIView):
|
||||
|
||||
model = models.AdHocCommandEvent
|
||||
serializer_class = serializers.AdHocCommandEventSerializer
|
||||
@@ -4297,8 +4300,15 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE),
|
||||
{"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)})
|
||||
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
if obj.notification_type in ('email', 'pagerduty'):
|
||||
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
elif obj.notification_type == 'webhook':
|
||||
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
else:
|
||||
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
notification = obj.generate_notification(msg, body)
|
||||
|
||||
if not notification:
|
||||
return Response({}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
|
||||
@@ -270,3 +270,11 @@ class ControlledByScmMixin(object):
|
||||
obj = super(ControlledByScmMixin, self).get_parent_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class NoTruncateMixin(object):
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
if self.request.query_params.get('no_truncate'):
|
||||
context.update(no_truncate=True)
|
||||
return context
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
import requests
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
@@ -37,6 +38,7 @@ from awx.main.models import (
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
@@ -60,6 +62,7 @@ class ApiRootView(APIView):
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -189,7 +192,8 @@ class ApiV2SubscriptionView(APIView):
|
||||
data['rh_password'] = settings.REDHAT_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('rh_username'), data.get('rh_password')
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.REDHAT_USERNAME = data['rh_username']
|
||||
if pw:
|
||||
@@ -201,10 +205,15 @@ class ApiV2SubscriptionView(APIView):
|
||||
getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
if isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
elif isinstance(exc, requests.exceptions.ConnectionError):
|
||||
msg = _("Could not connect to subscription service.")
|
||||
elif isinstance(exc, (ValueError, OSError)) and exc.args:
|
||||
msg = exc.args[0]
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
else:
|
||||
logger.exception(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
return Response(validated)
|
||||
@@ -301,7 +310,8 @@ class ApiV2ConfigView(APIView):
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
if not settings_registry.is_setting_read_only('TOWER_URL_BASE'):
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from hashlib import sha1
|
||||
import hmac
|
||||
import json
|
||||
import logging
|
||||
import urllib.parse
|
||||
|
||||
@@ -151,13 +150,13 @@ class WebhookReceiverBase(APIView):
|
||||
'webhook_credential': obj.webhook_credential,
|
||||
'webhook_guid': event_guid,
|
||||
},
|
||||
'extra_vars': json.dumps({
|
||||
'extra_vars': {
|
||||
'tower_webhook_event_type': event_type,
|
||||
'tower_webhook_event_guid': event_guid,
|
||||
'tower_webhook_event_ref': event_ref,
|
||||
'tower_webhook_status_api': status_api,
|
||||
'tower_webhook_payload': request.data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
new_job = obj.create_unified_job(**kwargs)
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
# Python
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
from collections import OrderedDict
|
||||
|
||||
# Django
|
||||
from django.core.validators import URLValidator
|
||||
from django.core.validators import URLValidator, _lazy_re_compile
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField,
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
|
||||
@@ -118,17 +119,42 @@ class StringListPathField(StringListField):
|
||||
|
||||
|
||||
class URLField(CharField):
|
||||
# these lines set up a custom regex that allow numbers in the
|
||||
# top-level domain
|
||||
tld_re = (
|
||||
r'\.' # dot
|
||||
r'(?!-)' # can't start with a dash
|
||||
r'(?:[a-z' + URLValidator.ul + r'0-9' + '-]{2,63}' # domain label, this line was changed from the original URLValidator
|
||||
r'|xn--[a-z0-9]{1,59})' # or punycode label
|
||||
r'(?<!-)' # can't end with a dash
|
||||
r'\.?' # may have a trailing dot
|
||||
)
|
||||
|
||||
host_re = '(' + URLValidator.hostname_re + URLValidator.domain_re + tld_re + '|localhost)'
|
||||
|
||||
regex = _lazy_re_compile(
|
||||
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
|
||||
r'(?:[^\s:@/]+(?::[^\s:@/]*)?@)?' # user:pass authentication
|
||||
r'(?:' + URLValidator.ipv4_re + '|' + URLValidator.ipv6_re + '|' + host_re + ')'
|
||||
r'(?::\d{2,5})?' # port
|
||||
r'(?:[/?#][^\s]*)?' # resource path
|
||||
r'\Z', re.IGNORECASE)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
schemes = kwargs.pop('schemes', None)
|
||||
regex = kwargs.pop('regex', None)
|
||||
self.allow_plain_hostname = kwargs.pop('allow_plain_hostname', False)
|
||||
self.allow_numbers_in_top_level_domain = kwargs.pop('allow_numbers_in_top_level_domain', True)
|
||||
super(URLField, self).__init__(**kwargs)
|
||||
validator_kwargs = dict(message=_('Enter a valid URL'))
|
||||
if schemes is not None:
|
||||
validator_kwargs['schemes'] = schemes
|
||||
if regex is not None:
|
||||
validator_kwargs['regex'] = regex
|
||||
if self.allow_numbers_in_top_level_domain and regex is None:
|
||||
# default behavior is to allow numbers in the top level domain
|
||||
# if a custom regex isn't provided
|
||||
validator_kwargs['regex'] = URLField.regex
|
||||
self.validators.append(URLValidator(**validator_kwargs))
|
||||
|
||||
def to_representation(self, value):
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Tower
|
||||
from awx.main.utils.common import get_licenser
|
||||
|
||||
__all__ = ['get_license']
|
||||
|
||||
|
||||
def _get_validated_license_data():
|
||||
from awx.main.utils.common import get_licenser
|
||||
return get_licenser().validate()
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
from awx.conf.migrations import _reencrypt
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@@ -12,5 +11,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(_reencrypt.replace_aesecb_fernet),
|
||||
# This list is intentionally empty.
|
||||
# Tower 3.2 included several data migrations that are no longer
|
||||
# necessary (this list is now empty because Tower 3.2 is past EOL and
|
||||
# cannot be directly upgraded to modern versions of Tower)
|
||||
]
|
||||
|
||||
@@ -1,30 +1,13 @@
|
||||
import base64
|
||||
import hashlib
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher
|
||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||
from cryptography.hazmat.primitives.ciphers.modes import ECB
|
||||
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
__all__ = ['replace_aesecb_fernet', 'get_encryption_key', 'encrypt_field',
|
||||
'decrypt_value', 'decrypt_value', 'should_decrypt_field']
|
||||
|
||||
|
||||
def replace_aesecb_fernet(apps, schema_editor):
|
||||
from awx.main.utils.encryption import encrypt_field
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
|
||||
for setting in Setting.objects.filter().order_by('pk'):
|
||||
if settings_registry.is_setting_encrypted(setting.key):
|
||||
if should_decrypt_field(setting.value):
|
||||
setting.value = decrypt_field(setting, 'value')
|
||||
setting.value = encrypt_field(setting, 'value')
|
||||
setting.save()
|
||||
__all__ = ['get_encryption_key', 'decrypt_field']
|
||||
|
||||
|
||||
def get_encryption_key(field_name, pk=None):
|
||||
@@ -76,38 +59,3 @@ def decrypt_field(instance, field_name, subfield=None):
|
||||
key = get_encryption_key(field_name, getattr(instance, 'pk', None))
|
||||
|
||||
return decrypt_value(key, value)
|
||||
|
||||
|
||||
def encrypt_field(instance, field_name, ask=False, subfield=None, skip_utf8=False):
|
||||
'''
|
||||
Return content of the given instance and field name encrypted.
|
||||
'''
|
||||
value = getattr(instance, field_name)
|
||||
if isinstance(value, dict) and subfield is not None:
|
||||
value = value[subfield]
|
||||
if not value or value.startswith('$encrypted$') or (ask and value == 'ASK'):
|
||||
return value
|
||||
if skip_utf8:
|
||||
utf8 = False
|
||||
else:
|
||||
utf8 = type(value) == str
|
||||
value = smart_str(value)
|
||||
key = get_encryption_key(field_name, getattr(instance, 'pk', None))
|
||||
encryptor = Cipher(AES(key), ECB(), default_backend()).encryptor()
|
||||
block_size = 16
|
||||
while len(value) % block_size != 0:
|
||||
value += '\x00'
|
||||
encrypted = encryptor.update(value) + encryptor.finalize()
|
||||
b64data = base64.b64encode(encrypted)
|
||||
tokens = ['$encrypted', 'AES', b64data]
|
||||
if utf8:
|
||||
# If the value to encrypt is utf-8, we need to add a marker so we
|
||||
# know to decode the data when it's decrypted later
|
||||
tokens.insert(1, 'UTF8')
|
||||
return '$'.join(tokens)
|
||||
|
||||
|
||||
def should_decrypt_field(value):
|
||||
if hasattr(value, 'startswith'):
|
||||
return value.startswith('$encrypted$') and '$AESCBC$' not in value
|
||||
return False
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -89,42 +86,11 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -136,6 +102,15 @@ def filter_sensitive(registry, key, value):
|
||||
return value
|
||||
|
||||
|
||||
class TransientSetting(object):
|
||||
|
||||
__slots__ = ('pk', 'value')
|
||||
|
||||
def __init__(self, pk, value):
|
||||
self.pk = pk
|
||||
self.value = value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
|
||||
def __init__(self, cache, registry, encrypter=None, decrypter=None):
|
||||
@@ -163,7 +138,6 @@ class EncryptedCacheProxy(object):
|
||||
def get(self, key, **kwargs):
|
||||
value = self.cache.get(key, **kwargs)
|
||||
value = self._handle_encryption(self.decrypter, key, value)
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
@@ -186,8 +160,6 @@ class EncryptedCacheProxy(object):
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from rest_framework.fields import ValidationError
|
||||
from awx.conf.fields import StringListBooleanField, StringListPathField, ListTuplesField
|
||||
from awx.conf.fields import StringListBooleanField, StringListPathField, ListTuplesField, URLField
|
||||
|
||||
|
||||
class TestStringListBooleanField():
|
||||
@@ -62,7 +62,7 @@ class TestListTuplesField():
|
||||
FIELD_VALUES = [
|
||||
([('a', 'b'), ('abc', '123')], [("a", "b"), ("abc", "123")]),
|
||||
]
|
||||
|
||||
|
||||
FIELD_VALUES_INVALID = [
|
||||
("abc", type("abc")),
|
||||
([('a', 'b', 'c'), ('abc', '123', '456')], type(('a',))),
|
||||
@@ -130,3 +130,25 @@ class TestStringListPathField():
|
||||
field.to_internal_value([value])
|
||||
assert e.value.detail[0] == "{} is not a valid path choice.".format(value)
|
||||
|
||||
|
||||
class TestURLField():
|
||||
regex = "^https://www.example.org$"
|
||||
|
||||
@pytest.mark.parametrize("url,schemes,regex, allow_numbers_in_top_level_domain, expect_no_error",[
|
||||
("ldap://www.example.org42", "ldap", None, True, True),
|
||||
("https://www.example.org42", "https", None, False, False),
|
||||
("https://www.example.org", None, regex, None, True),
|
||||
("https://www.example3.org", None, regex, None, False),
|
||||
("ftp://www.example.org", "https", None, None, False)
|
||||
])
|
||||
def test_urls(self, url, schemes, regex, allow_numbers_in_top_level_domain, expect_no_error):
|
||||
kwargs = {}
|
||||
kwargs.setdefault("allow_numbers_in_top_level_domain", allow_numbers_in_top_level_domain)
|
||||
kwargs.setdefault("schemes", schemes)
|
||||
kwargs.setdefault("regex", regex)
|
||||
field = URLField(**kwargs)
|
||||
if expect_no_error:
|
||||
field.run_validators(url)
|
||||
else:
|
||||
with pytest.raises(ValidationError):
|
||||
field.run_validators(url)
|
||||
|
||||
@@ -307,7 +307,7 @@ class BaseAccess(object):
|
||||
|
||||
return True # User has access to both, permission check passed
|
||||
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True):
|
||||
def check_license(self, add_host_name=None, feature=None, check_expiration=True, quiet=False):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
@@ -317,8 +317,10 @@ class BaseAccess(object):
|
||||
validation_info['time_remaining'] = 99999999
|
||||
validation_info['grace_period_remaining'] = 99999999
|
||||
|
||||
report_violation = lambda message: logger.error(message)
|
||||
|
||||
if quiet:
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if (
|
||||
validation_info.get('trial', False) is True or
|
||||
validation_info['instance_count'] == 10 # basic 10 license
|
||||
@@ -465,7 +467,7 @@ class BaseAccess(object):
|
||||
else:
|
||||
relationship = 'members'
|
||||
return access_method(obj, parent_obj, relationship, skip_sub_obj_read_check=True, data={})
|
||||
except (ParseError, ObjectDoesNotExist):
|
||||
except (ParseError, ObjectDoesNotExist, PermissionDenied):
|
||||
return False
|
||||
return False
|
||||
|
||||
@@ -907,7 +909,7 @@ class HostAccess(BaseAccess):
|
||||
model = Host
|
||||
select_related = ('created_by', 'modified_by', 'inventory',
|
||||
'last_job__job_template', 'last_job_host_summary__job',)
|
||||
prefetch_related = ('groups',)
|
||||
prefetch_related = ('groups', 'inventory_sources')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -1660,26 +1662,19 @@ class JobAccess(BaseAccess):
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
|
||||
if obj.job_template and (self.user not in obj.job_template.execute_role):
|
||||
return False
|
||||
|
||||
# Check if JT execute access (and related prompts) is sufficient
|
||||
if obj.job_template is not None:
|
||||
if config is None:
|
||||
prompts_access = False
|
||||
elif not config.has_user_prompts(obj.job_template):
|
||||
prompts_access = True
|
||||
elif obj.created_by_id != self.user.pk and vars_are_encrypted(config.extra_data):
|
||||
prompts_access = False
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with secret prompts provided by another user.')
|
||||
else:
|
||||
prompts_access = (
|
||||
JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}) and
|
||||
not config.has_unprompted(obj.job_template)
|
||||
)
|
||||
jt_access = self.user in obj.job_template.execute_role
|
||||
if prompts_access and jt_access:
|
||||
if config and obj.job_template:
|
||||
if not config.has_user_prompts(obj.job_template):
|
||||
return True
|
||||
elif not jt_access:
|
||||
return False
|
||||
elif obj.created_by_id != self.user.pk and vars_are_encrypted(config.extra_data):
|
||||
# never allowed, not even for org admins
|
||||
raise PermissionDenied(_('Job was launched with secret prompts provided by another user.'))
|
||||
elif not config.has_unprompted(obj.job_template):
|
||||
if JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
return True
|
||||
|
||||
org_access = bool(obj.inventory) and self.user in obj.inventory.organization.inventory_admin_role
|
||||
project_access = obj.project is None or self.user in obj.project.admin_role
|
||||
@@ -2098,23 +2093,20 @@ class WorkflowJobAccess(BaseAccess):
|
||||
self.messages['detail'] = _('Workflow Job was launched with unknown prompts.')
|
||||
return False
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
if self.user not in template.execute_role:
|
||||
return False
|
||||
|
||||
# Check if access to prompts to prevent relaunch
|
||||
if config.prompts_dict():
|
||||
if obj.created_by_id != self.user.pk and vars_are_encrypted(config.extra_data):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with secret prompts provided by another user.')
|
||||
return False
|
||||
raise PermissionDenied(_("Job was launched with secret prompts provided by another user."))
|
||||
if not JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts you lack access to.')
|
||||
return False
|
||||
raise PermissionDenied(_('Job was launched with prompts you lack access to.'))
|
||||
if config.has_unprompted(template):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts no longer accepted.')
|
||||
return False
|
||||
raise PermissionDenied(_('Job was launched with prompts no longer accepted.'))
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
return (self.user in template.execute_role)
|
||||
return True # passed config checks
|
||||
|
||||
def can_recreate(self, obj):
|
||||
node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credentials', 'unified_job_template')
|
||||
@@ -2248,7 +2240,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
prefetch_related = ('job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
|
||||
@@ -52,7 +52,7 @@ def config(since):
|
||||
'tower_version': get_awx_version(),
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free instances', 0),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
@@ -166,6 +166,8 @@ def instance_info(since, include_hostnames=False):
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'last_isolated_check', 'enabled')
|
||||
for instance in instances:
|
||||
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'],
|
||||
status__in=('running', 'waiting')))
|
||||
instance_info = {
|
||||
'uuid': instance['uuid'],
|
||||
'version': instance['version'],
|
||||
@@ -174,7 +176,9 @@ def instance_info(since, include_hostnames=False):
|
||||
'memory': instance['memory'],
|
||||
'managed_by_policy': instance['managed_by_policy'],
|
||||
'last_isolated_check': _get_isolated_datetime(instance['last_isolated_check']),
|
||||
'enabled': instance['enabled']
|
||||
'enabled': instance['enabled'],
|
||||
'consumed_capacity': consumed_capacity,
|
||||
'remaining_capacity': instance['capacity'] - consumed_capacity
|
||||
}
|
||||
if include_hostnames is True:
|
||||
instance_info['hostname'] = instance['hostname']
|
||||
|
||||
@@ -15,6 +15,7 @@ from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship', 'table_version']
|
||||
@@ -165,11 +166,16 @@ def ship(path):
|
||||
return logger.error('REDHAT_PASSWORD is not set')
|
||||
with open(path, 'rb') as f:
|
||||
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
||||
response = requests.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
timeout=(31, 31))
|
||||
s = requests.Session()
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url,
|
||||
files=files,
|
||||
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
|
||||
auth=(rh_user, rh_password),
|
||||
headers=s.headers,
|
||||
timeout=(31, 31))
|
||||
if response.status_code != 202:
|
||||
return logger.exception('Upload failed with status {}, {}'.format(response.status_code,
|
||||
response.text))
|
||||
|
||||
@@ -46,6 +46,8 @@ INSTANCE_MEMORY = Gauge('awx_instance_memory', 'RAM (Kb) on each node in a Tower
|
||||
INSTANCE_INFO = Info('awx_instance', 'Info about each node in a Tower system', ['hostname', 'instance_uuid',])
|
||||
INSTANCE_LAUNCH_TYPE = Gauge('awx_instance_launch_type_total', 'Type of Job launched', ['node', 'launch_type',])
|
||||
INSTANCE_STATUS = Gauge('awx_instance_status_total', 'Status of Job launched', ['node', 'status',])
|
||||
INSTANCE_CONSUMED_CAPACITY = Gauge('awx_instance_consumed_capacity', 'Consumed capacity of each node in a Tower system', ['hostname', 'instance_uuid',])
|
||||
INSTANCE_REMAINING_CAPACITY = Gauge('awx_instance_remaining_capacity', 'Remaining capacity of each node in a Tower system', ['hostname', 'instance_uuid',])
|
||||
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license')
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license')
|
||||
@@ -104,6 +106,8 @@ def metrics():
|
||||
INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
|
||||
INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
|
||||
INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
|
||||
INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
|
||||
INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
|
||||
INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info({
|
||||
'enabled': str(instance_data[uuid]['enabled']),
|
||||
'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'),
|
||||
|
||||
@@ -54,15 +54,6 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'TOWER_ADMIN_ALERTS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable Administrator Alerts'),
|
||||
help_text=_('Email Admin users for system events that may require attention.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'TOWER_URL_BASE',
|
||||
field_class=fields.URLField,
|
||||
@@ -513,6 +504,27 @@ register(
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'PUBLIC_GALAXY_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
default=True,
|
||||
label=_('Allow Access to Public Galaxy'),
|
||||
help_text=_('Allow or deny access to the public Ansible Galaxy during project updates.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_IGNORE_CERTS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Ignore Ansible Galaxy SSL Certificate Verification'),
|
||||
help_text=_('If set to true, certificate validation will not be done when'
|
||||
'installing content from any Galaxy server.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs'
|
||||
)
|
||||
|
||||
register(
|
||||
'STDOUT_MAX_BYTES_DISPLAY',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -604,6 +616,18 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'MAX_FORKS',
|
||||
field_class=fields.IntegerField,
|
||||
allow_null=False,
|
||||
default=200,
|
||||
label=_('Maximum number of forks per job.'),
|
||||
help_text=_('Saving a Job Template with more than this number of forks will result in an error. '
|
||||
'When set to 0, no limit is applied.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
field_class=fields.CharField,
|
||||
@@ -775,6 +799,28 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
@@ -799,10 +845,7 @@ def galaxy_validate(serializer, attrs):
|
||||
to save settings which obviously break all project updates.
|
||||
"""
|
||||
prefix = 'PRIMARY_GALAXY_'
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
errors = {}
|
||||
|
||||
def _new_value(setting_name):
|
||||
if setting_name in attrs:
|
||||
@@ -811,10 +854,22 @@ def galaxy_validate(serializer, attrs):
|
||||
return ''
|
||||
return getattr(serializer.instance, setting_name, '')
|
||||
|
||||
if not _new_value('PRIMARY_GALAXY_URL'):
|
||||
if _new_value('PUBLIC_GALAXY_ENABLED') is False:
|
||||
msg = _('A URL for Primary Galaxy must be defined before disabling public Galaxy.')
|
||||
# put error in both keys because UI has trouble with errors in toggles
|
||||
for key in ('PRIMARY_GALAXY_URL', 'PUBLIC_GALAXY_ENABLED'):
|
||||
errors.setdefault(key, [])
|
||||
errors[key].append(msg)
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
from awx.main.constants import GALAXY_SERVER_FIELDS
|
||||
if not any('{}{}'.format(prefix, subfield.upper()) in attrs for subfield in GALAXY_SERVER_FIELDS):
|
||||
return attrs
|
||||
|
||||
galaxy_data = {}
|
||||
for subfield in GALAXY_SERVER_FIELDS:
|
||||
galaxy_data[subfield] = _new_value('{}{}'.format(prefix, subfield.upper()))
|
||||
errors = {}
|
||||
if not galaxy_data['url']:
|
||||
for k, v in galaxy_data.items():
|
||||
if v:
|
||||
|
||||
@@ -3,6 +3,16 @@ from .plugin import CredentialPlugin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
clouds = [
|
||||
vars(azure_cloud)[n]
|
||||
for n in dir(azure_cloud)
|
||||
if n.startswith("AZURE_") and n.endswith("_CLOUD")
|
||||
]
|
||||
default_cloud = vars(azure_cloud)["AZURE_PUBLIC_CLOUD"]
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
@@ -24,6 +34,12 @@ azure_keyvault_inputs = {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'cloud_name',
|
||||
'label': _('Cloud Environment'),
|
||||
'help_text': _('Specify which azure cloud environment to use.'),
|
||||
'choices': list(set([default_cloud.name] + [c.name for c in clouds])),
|
||||
'default': default_cloud.name
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
@@ -42,6 +58,7 @@ azure_keyvault_inputs = {
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
@@ -49,7 +66,7 @@ def azure_keyvault_backend(**kwargs):
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
resource = f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
52
awx/main/dispatch/periodic.py
Normal file
52
awx/main/dispatch/periodic.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
cease_continuous_run = threading.Event()
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
class ScheduleThread(threading.Thread):
|
||||
@classmethod
|
||||
def run(cls):
|
||||
while not cease_continuous_run.is_set():
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
logger.debug('periodic thread exiting...')
|
||||
|
||||
thread = ScheduleThread()
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return cease_continuous_run
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
return scheduler.run_continuously()
|
||||
@@ -72,9 +72,6 @@ class PoolWorker(object):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
@@ -123,8 +120,16 @@ class PoolWorker(object):
|
||||
# if any tasks were finished, removed them from the managed tasks for
|
||||
# this worker
|
||||
for uuid in finished:
|
||||
self.messages_finished += 1
|
||||
del self.managed_tasks[uuid]
|
||||
try:
|
||||
del self.managed_tasks[uuid]
|
||||
self.messages_finished += 1
|
||||
except KeyError:
|
||||
# ansible _sometimes_ appears to send events w/ duplicate UUIDs;
|
||||
# UUIDs for ansible events are *not* actually globally unique
|
||||
# when this occurs, it's _fine_ to ignore this KeyError because
|
||||
# the purpose of self.managed_tasks is to just track internal
|
||||
# state of which events are *currently* being processed.
|
||||
logger.warn('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
@@ -269,7 +274,7 @@ class WorkerPool(object):
|
||||
logger.warn("could not write to queue %s" % preferred_queue)
|
||||
logger.warn("detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
logger.error("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def stop(self, signum):
|
||||
|
||||
@@ -61,7 +61,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
logger.warn('Consumer received control message {}'.format(body))
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
@@ -119,6 +119,9 @@ class AWXConsumer(ConsumerMixin):
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def read(self, queue):
|
||||
return queue.get(block=True, timeout=1)
|
||||
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
@@ -128,7 +131,7 @@ class BaseWorker(object):
|
||||
if os.getppid() != ppid:
|
||||
break
|
||||
try:
|
||||
body = queue.get(block=True, timeout=1)
|
||||
body = self.read(queue)
|
||||
if body == 'QUIT':
|
||||
break
|
||||
except QueueEmpty:
|
||||
@@ -145,7 +148,6 @@ class BaseWorker(object):
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
import cProfile
|
||||
import logging
|
||||
import os
|
||||
import pstats
|
||||
import signal
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
from awx.main.models.events import emit_event_detail
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
# the number of seconds to buffer events in memory before flushing
|
||||
# using JobEvent.objects.bulk_create()
|
||||
BUFFER_SECONDS = .1
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
@@ -25,90 +37,134 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
prof = None
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
return queue.get(block=True, timeout=BUFFER_SECONDS)
|
||||
except QueueEmpty:
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if self.prof:
|
||||
self.prof.disable()
|
||||
filename = f'callback-{os.getpid()}.pstats'
|
||||
filepath = os.path.join(tempfile.gettempdir(), filename)
|
||||
with open(filepath, 'w') as f:
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
|
||||
self.prof = False
|
||||
logger.error(f'profiling is disabled, wrote {filepath}')
|
||||
else:
|
||||
self.prof = cProfile.Profile()
|
||||
self.prof.enable()
|
||||
logger.error('profiling is enabled')
|
||||
|
||||
def work_loop(self, *args, **kw):
|
||||
if settings.AWX_CALLBACK_PROFILE:
|
||||
signal.signal(signal.SIGUSR1, self.toggle_profiling)
|
||||
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
force or
|
||||
any([len(events) >= 1000 for events in self.buff.values()])
|
||||
):
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
except Exception as exc:
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale (e.g., an IntegrityError on a specific event)
|
||||
for e in events:
|
||||
try:
|
||||
if (
|
||||
isinstance(exc, IntegrityError),
|
||||
getattr(e, 'host_id', '')
|
||||
):
|
||||
# this is one potential IntegrityError we can
|
||||
# work around - if the host disappears before
|
||||
# the event can be processed
|
||||
e.host_id = None
|
||||
e.save()
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
for e in events:
|
||||
emit_event_detail(e)
|
||||
self.buff = {}
|
||||
|
||||
def perform_work(self, body):
|
||||
try:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
flush = body.get('event') == 'FLUSH'
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
|
||||
def _save_event_data():
|
||||
job_identifier = 'unknown job'
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
job_key = 'unknown'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
job_key = key
|
||||
break
|
||||
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
if body.get('event') == 'EOF':
|
||||
event_thing = 'EOF event'
|
||||
else:
|
||||
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
|
||||
logger.info('Callback worker received {} for {} {}'.format(
|
||||
event_thing, job_key[:-len('_id')], job_identifier
|
||||
))
|
||||
logger.debug('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
event = cls.create_from_data(**body)
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
self.flush(force=flush)
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on event for Job {}'.format(job_identifier))
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
@@ -119,7 +175,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
|
||||
@@ -4,6 +4,7 @@ import importlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from kubernetes.config import kube_config
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
@@ -107,6 +108,14 @@ class TaskWorker(BaseWorker):
|
||||
for callback in body.get('errbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
self.perform_work(callback)
|
||||
finally:
|
||||
# It's frustrating that we have to do this, but the python k8s
|
||||
# client leaves behind cacert files in /tmp, so we must clean up
|
||||
# the tmpdir per-dispatcher process every time a new task comes in
|
||||
try:
|
||||
kube_config._cleanup_temp_files()
|
||||
except Exception:
|
||||
logger.exception('failed to cleanup k8s client tmp files')
|
||||
|
||||
for callback in body.get('callbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
|
||||
@@ -6,6 +6,7 @@ import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
import yaml
|
||||
|
||||
from django.conf import settings
|
||||
import ansible_runner
|
||||
@@ -31,15 +32,14 @@ def set_pythonpath(venv_libdir, env):
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, cancelled_callback=None, check_callback=None, pod_manager=None):
|
||||
def __init__(self, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
"""
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
:param canceled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
canceled
|
||||
"""
|
||||
self.cancelled_callback = cancelled_callback
|
||||
self.canceled_callback = canceled_callback
|
||||
self.check_callback = check_callback
|
||||
self.idle_timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
self.started_at = None
|
||||
self.captured_command_artifact = False
|
||||
self.instance = None
|
||||
@@ -48,10 +48,17 @@ class IsolatedManager(object):
|
||||
def build_inventory(self, hosts):
|
||||
if self.instance and self.instance.is_containerized:
|
||||
inventory = {'all': {'hosts': {}}}
|
||||
fd, path = tempfile.mkstemp(
|
||||
prefix='.kubeconfig', dir=self.private_data_dir
|
||||
)
|
||||
with open(path, 'wb') as temp:
|
||||
temp.write(yaml.dump(self.pod_manager.kube_config).encode())
|
||||
temp.flush()
|
||||
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
for host in hosts:
|
||||
inventory['all']['hosts'][host] = {
|
||||
"ansible_connection": "kubectl",
|
||||
"ansible_kubectl_config": self.pod_manager.kube_config
|
||||
"ansible_kubectl_config": path,
|
||||
}
|
||||
else:
|
||||
inventory = '\n'.join([
|
||||
@@ -98,9 +105,8 @@ class IsolatedManager(object):
|
||||
'envvars': env,
|
||||
'finished_callback': finished_callback,
|
||||
'verbosity': verbosity,
|
||||
'cancel_callback': self.cancelled_callback,
|
||||
'cancel_callback': self.canceled_callback,
|
||||
'settings': {
|
||||
'idle_timeout': self.idle_timeout,
|
||||
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
@@ -110,7 +116,7 @@ class IsolatedManager(object):
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
def run_management_playbook(self, playbook, private_data_dir, **kw):
|
||||
def run_management_playbook(self, playbook, private_data_dir, idle_timeout=None, **kw):
|
||||
iso_dir = tempfile.mkdtemp(
|
||||
prefix=playbook,
|
||||
dir=private_data_dir
|
||||
@@ -118,6 +124,10 @@ class IsolatedManager(object):
|
||||
params = self.runner_params.copy()
|
||||
params['playbook'] = playbook
|
||||
params['private_data_dir'] = iso_dir
|
||||
if idle_timeout:
|
||||
params['settings']['idle_timeout'] = idle_timeout
|
||||
else:
|
||||
params['settings'].pop('idle_timeout', None)
|
||||
params.update(**kw)
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
@@ -143,6 +153,8 @@ class IsolatedManager(object):
|
||||
'- /artifacts/job_events/*-partial.json.tmp',
|
||||
# don't rsync the ssh_key FIFO
|
||||
'- /env/ssh_key',
|
||||
# don't rsync kube config files
|
||||
'- .kubeconfig*'
|
||||
]
|
||||
|
||||
for filename, data in (
|
||||
@@ -167,6 +179,7 @@ class IsolatedManager(object):
|
||||
logger.debug('Starting job {} on isolated host with `run_isolated.yml` playbook.'.format(self.instance.id))
|
||||
runner_obj = self.run_management_playbook('run_isolated.yml',
|
||||
self.private_data_dir,
|
||||
idle_timeout=max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT),
|
||||
extravars=extravars)
|
||||
|
||||
if runner_obj.status == 'failed':
|
||||
@@ -198,14 +211,14 @@ class IsolatedManager(object):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
while status == 'failed':
|
||||
canceled = self.cancelled_callback() if self.cancelled_callback else False
|
||||
canceled = self.canceled_callback() if self.canceled_callback else False
|
||||
if not canceled and time.time() - last_check < interval:
|
||||
# If the job isn't cancelled, but we haven't waited `interval` seconds, wait longer
|
||||
# If the job isn't canceled, but we haven't waited `interval` seconds, wait longer
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if canceled:
|
||||
logger.warning('Isolated job {} was manually cancelled.'.format(self.instance.id))
|
||||
logger.warning('Isolated job {} was manually canceled.'.format(self.instance.id))
|
||||
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
runner_obj = self.run_management_playbook('check_isolated.yml',
|
||||
@@ -357,33 +370,32 @@ class IsolatedManager(object):
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
40
awx/main/management/commands/callback_stats.py
Normal file
40
awx/main/management/commands/callback_stats.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import time
|
||||
import sys
|
||||
|
||||
from django.db import connection
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
with connection.cursor() as cursor:
|
||||
start = {}
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
cursor.execute(f"SELECT MAX(id) FROM {relation};")
|
||||
start[relation] = cursor.fetchone()[0] or 0
|
||||
clear = False
|
||||
while True:
|
||||
lines = []
|
||||
for relation in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent'
|
||||
):
|
||||
lines.append(relation)
|
||||
minimum = start[relation]
|
||||
cursor.execute(
|
||||
f"SELECT MAX(id) - MIN(id) FROM {relation} WHERE id > {minimum} AND modified > now() - '1 minute'::interval;"
|
||||
)
|
||||
events = cursor.fetchone()[0] or 0
|
||||
lines.append(f'↳ last minute {events}')
|
||||
lines.append('')
|
||||
if clear:
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
clear = True
|
||||
time.sleep(.25)
|
||||
@@ -16,13 +16,10 @@ from awx.main.models import (
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate,
|
||||
SystemJob, WorkflowJob, Notification
|
||||
)
|
||||
from awx.main.signals import ( # noqa
|
||||
emit_update_inventory_on_created_or_deleted,
|
||||
emit_update_inventory_computed_fields,
|
||||
from awx.main.signals import (
|
||||
disable_activity_stream,
|
||||
disable_computed_fields
|
||||
)
|
||||
from django.db.models.signals import post_save, post_delete, m2m_changed # noqa
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -28,6 +28,7 @@ from awx.main.models.inventory import (
|
||||
Host
|
||||
)
|
||||
from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
# other AWX imports
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||
@@ -795,6 +796,10 @@ class Command(BaseCommand):
|
||||
if self.instance_id_var:
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
host_attrs['instance_id'] = instance_id
|
||||
try:
|
||||
sanitize_jinja(mem_host_name)
|
||||
except ValueError as e:
|
||||
raise ValueError(str(e) + ': {}'.format(mem_host_name))
|
||||
db_host = self.inventory.hosts.update_or_create(name=mem_host_name, defaults=host_attrs)[0]
|
||||
if enabled is False:
|
||||
logger.debug('Host "%s" added (disabled)', mem_host_name)
|
||||
@@ -916,11 +921,14 @@ class Command(BaseCommand):
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
hard_error = license_info.get('trial', False) is True or license_info['instance_count'] == 10
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
if license_info.get('trial', False) is True:
|
||||
if time_remaining <= 0:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
else:
|
||||
logger.warning(LICENSE_EXPIRED_MESSAGE)
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
@@ -933,15 +941,11 @@ class Command(BaseCommand):
|
||||
'new_count': new_count,
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
if hard_error:
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
if (
|
||||
license_info.get('trial', False) is True or
|
||||
license_info['instance_count'] == 10 # basic 10 license
|
||||
):
|
||||
raise CommandError('License count exceeded!')
|
||||
else:
|
||||
logger.warning(LICENSE_MESSAGE % d)
|
||||
|
||||
def check_org_host_limit(self):
|
||||
license_info = get_licenser().validate()
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from awx.main.models import Instance
|
||||
from django.conf import settings
|
||||
|
||||
@@ -22,6 +24,8 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname used during provisioning')
|
||||
parser.add_argument('--is-isolated', dest='is_isolated', action='store_true',
|
||||
help='Specify whether the instance is isolated')
|
||||
|
||||
def _register_hostname(self, hostname):
|
||||
if not hostname:
|
||||
@@ -37,7 +41,10 @@ class Command(BaseCommand):
|
||||
def handle(self, **options):
|
||||
if not options.get('hostname'):
|
||||
raise CommandError("Specify `--hostname` to use this command.")
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
if options['is_isolated']:
|
||||
self.uuid = str(uuid4())
|
||||
else:
|
||||
self.uuid = settings.SYSTEM_UUID
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'))
|
||||
if self.changed:
|
||||
|
||||
129
awx/main/management/commands/regenerate_secret_key.py
Normal file
129
awx/main/management/commands/regenerate_secret_key.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
from django.db.models.signals import post_save
|
||||
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.signals import on_post_save_setting
|
||||
from awx.main.models import (
|
||||
UnifiedJob, Credential, NotificationTemplate, Job, JobTemplate, WorkflowJob,
|
||||
WorkflowJobTemplate, OAuth2Application
|
||||
)
|
||||
from awx.main.utils.encryption import (
|
||||
encrypt_field, decrypt_field, encrypt_value, decrypt_value, get_encryption_key
|
||||
)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Regenerate a new SECRET_KEY value and re-encrypt every secret in the
|
||||
Tower database.
|
||||
"""
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, **options):
|
||||
self.old_key = settings.SECRET_KEY
|
||||
self.new_key = base64.encodebytes(os.urandom(33)).decode().rstrip()
|
||||
self._notification_templates()
|
||||
self._credentials()
|
||||
self._unified_jobs()
|
||||
self._oauth2_app_secrets()
|
||||
self._settings()
|
||||
self._survey_passwords()
|
||||
return self.new_key
|
||||
|
||||
def _notification_templates(self):
|
||||
for nt in NotificationTemplate.objects.iterator():
|
||||
CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2]) for x in NotificationTemplate.NOTIFICATION_TYPES])
|
||||
notification_class = CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
|
||||
for field in filter(lambda x: notification_class.init_parameters[x]['type'] == "password",
|
||||
notification_class.init_parameters):
|
||||
nt.notification_configuration[field] = decrypt_field(nt, 'notification_configuration', subfield=field, secret_key=self.old_key)
|
||||
nt.notification_configuration[field] = encrypt_field(nt, 'notification_configuration', subfield=field, secret_key=self.new_key)
|
||||
nt.save()
|
||||
|
||||
def _credentials(self):
|
||||
for credential in Credential.objects.iterator():
|
||||
for field_name in credential.credential_type.secret_fields:
|
||||
if field_name in credential.inputs:
|
||||
credential.inputs[field_name] = decrypt_field(
|
||||
credential,
|
||||
field_name,
|
||||
secret_key=self.old_key
|
||||
)
|
||||
credential.inputs[field_name] = encrypt_field(
|
||||
credential,
|
||||
field_name,
|
||||
secret_key=self.new_key
|
||||
)
|
||||
credential.save()
|
||||
|
||||
def _unified_jobs(self):
|
||||
for uj in UnifiedJob.objects.iterator():
|
||||
if uj.start_args:
|
||||
uj.start_args = decrypt_field(
|
||||
uj,
|
||||
'start_args',
|
||||
secret_key=self.old_key
|
||||
)
|
||||
uj.start_args = encrypt_field(uj, 'start_args', secret_key=self.new_key)
|
||||
uj.save()
|
||||
|
||||
def _oauth2_app_secrets(self):
|
||||
for app in OAuth2Application.objects.iterator():
|
||||
raw = app.client_secret
|
||||
app.client_secret = raw
|
||||
encrypted = encrypt_value(raw, secret_key=self.new_key)
|
||||
OAuth2Application.objects.filter(pk=app.pk).update(client_secret=encrypted)
|
||||
|
||||
def _settings(self):
|
||||
# don't update memcached, the *actual* value isn't changing
|
||||
post_save.disconnect(on_post_save_setting, sender=Setting)
|
||||
for setting in Setting.objects.filter().order_by('pk'):
|
||||
if settings_registry.is_setting_encrypted(setting.key):
|
||||
setting.value = decrypt_field(setting, 'value', secret_key=self.old_key)
|
||||
setting.value = encrypt_field(setting, 'value', secret_key=self.new_key)
|
||||
setting.save()
|
||||
|
||||
def _survey_passwords(self):
|
||||
for _type in (JobTemplate, WorkflowJobTemplate):
|
||||
for jt in _type.objects.exclude(survey_spec={}):
|
||||
changed = False
|
||||
if jt.survey_spec.get('spec', []):
|
||||
for field in jt.survey_spec['spec']:
|
||||
if field.get('type') == 'password' and field.get('default', ''):
|
||||
raw = decrypt_value(
|
||||
get_encryption_key('value', None, secret_key=self.old_key),
|
||||
field['default']
|
||||
)
|
||||
field['default'] = encrypt_value(
|
||||
raw,
|
||||
pk=None,
|
||||
secret_key=self.new_key
|
||||
)
|
||||
changed = True
|
||||
if changed:
|
||||
jt.save(update_fields=["survey_spec"])
|
||||
|
||||
for _type in (Job, WorkflowJob):
|
||||
for job in _type.objects.exclude(survey_passwords={}).iterator():
|
||||
changed = False
|
||||
for key in job.survey_passwords:
|
||||
if key in job.extra_vars:
|
||||
extra_vars = json.loads(job.extra_vars)
|
||||
if not extra_vars.get(key):
|
||||
continue
|
||||
raw = decrypt_value(
|
||||
get_encryption_key('value', None, secret_key=self.old_key),
|
||||
extra_vars[key]
|
||||
)
|
||||
extra_vars[key] = encrypt_value(raw, pk=None, secret_key=self.new_key)
|
||||
job.extra_vars = json.dumps(extra_vars)
|
||||
changed = True
|
||||
if changed:
|
||||
job.save(update_fields=["extra_vars"])
|
||||
@@ -9,6 +9,7 @@ import random
|
||||
from django.utils import timezone
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.models import (
|
||||
UnifiedJob,
|
||||
Job,
|
||||
@@ -17,14 +18,6 @@ from awx.main.models import (
|
||||
InventoryUpdate,
|
||||
SystemJob
|
||||
)
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.api.serializers import (
|
||||
JobEventWebSocketSerializer,
|
||||
AdHocCommandEventWebSocketSerializer,
|
||||
ProjectUpdateEventWebSocketSerializer,
|
||||
InventoryUpdateEventWebSocketSerializer,
|
||||
SystemJobEventWebSocketSerializer
|
||||
)
|
||||
|
||||
|
||||
class JobStatusLifeCycle():
|
||||
@@ -96,21 +89,6 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
raise RuntimeError("No events for job id {}".format(job.id))
|
||||
return job_events, count
|
||||
|
||||
def get_serializer(self, job):
|
||||
if type(job) is Job:
|
||||
return JobEventWebSocketSerializer
|
||||
elif type(job) is AdHocCommand:
|
||||
return AdHocCommandEventWebSocketSerializer
|
||||
elif type(job) is ProjectUpdate:
|
||||
return ProjectUpdateEventWebSocketSerializer
|
||||
elif type(job) is InventoryUpdate:
|
||||
return InventoryUpdateEventWebSocketSerializer
|
||||
elif type(job) is SystemJob:
|
||||
return SystemJobEventWebSocketSerializer
|
||||
else:
|
||||
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
|
||||
sys.exit(1)
|
||||
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False):
|
||||
stats = {
|
||||
'events_ontime': {
|
||||
@@ -136,7 +114,6 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
try:
|
||||
job = self.get_job(job_id)
|
||||
job_events, job_event_count = self.get_job_events(job)
|
||||
serializer = self.get_serializer(job)
|
||||
except RuntimeError as e:
|
||||
print("{}".format(e.message))
|
||||
sys.exit(1)
|
||||
@@ -162,8 +139,7 @@ class ReplayJobEvents(JobStatusLifeCycle):
|
||||
stats['replay_start'] = self.replay_start
|
||||
je_previous = je_current
|
||||
|
||||
je_serialized = serializer(je_current).data
|
||||
emit_channel_notification('{}-{}'.format(je_serialized['group_name'], job.id), je_serialized)
|
||||
emit_event_detail(je_current)
|
||||
|
||||
replay_offset = self.replay_offset(je_previous.created, speed)
|
||||
recording_diff = (je_current.created - je_previous.created).total_seconds() * (1.0 / speed)
|
||||
|
||||
@@ -16,6 +16,7 @@ from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -36,71 +37,6 @@ class Command(BaseCommand):
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
sched_file = '/var/lib/awx/beat.db'
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
|
||||
# celery in py3 seems to have a bug where the celerybeat schedule
|
||||
# shelve can become corrupted; we've _only_ seen this in Ubuntu and py36
|
||||
# it can be avoided by detecting and removing the corrupted file
|
||||
# at some point, we'll just stop using celerybeat, because it's clearly
|
||||
# buggy, too -_-
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4777
|
||||
sched = AWXScheduler(schedule_filename=sched_file, app=app)
|
||||
try:
|
||||
sched.setup_schedule()
|
||||
except Exception:
|
||||
logger.exception('{} is corrupted, removing.'.format(sched_file))
|
||||
sched._remove_db()
|
||||
finally:
|
||||
try:
|
||||
sched.close()
|
||||
except Exception:
|
||||
logger.exception('{} failed to sync/close'.format(sched_file))
|
||||
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule=sched_file, scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
@@ -116,9 +52,10 @@ class Command(BaseCommand):
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
cease_continuous_run = periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
@@ -152,6 +89,7 @@ class Command(BaseCommand):
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
cease_continuous_run.set()
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
@@ -62,6 +62,17 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
with open(filepath, 'w') as f:
|
||||
f.write('%s %s\n' % (request.method, request.get_full_path()))
|
||||
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
|
||||
|
||||
if settings.AWX_REQUEST_PROFILE_WITH_DOT:
|
||||
from gprof2dot import main as generate_dot
|
||||
raw = os.path.join(self.dest, filename) + '.raw'
|
||||
pstats.Stats(self.prof).dump_stats(raw)
|
||||
generate_dot([
|
||||
'-n', '2.5', '-f', 'pstats', '-o',
|
||||
os.path.join( self.dest, filename).replace('.pstats', '.dot'),
|
||||
raw
|
||||
])
|
||||
os.remove(raw)
|
||||
return filepath
|
||||
|
||||
|
||||
|
||||
@@ -7,12 +7,6 @@ from django.db import migrations, models
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
from awx.main.migrations import _inventory_source as invsrc
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _reencrypt as reencrypt
|
||||
from awx.main.migrations import _scan_jobs as scan_jobs
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
from awx.main.migrations import _azure_credentials as azurecreds
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
@@ -23,16 +17,8 @@ class Migration(ActivityStreamDisabledMigration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
# Inventory Refresh
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(invsrc.remove_rax_inventory_sources),
|
||||
migrations.RunPython(azurecreds.remove_azure_credentials),
|
||||
migrations.RunPython(invsrc.remove_azure_inventory_sources),
|
||||
migrations.RunPython(invsrc.remove_inventory_source_with_no_inventory_link),
|
||||
migrations.RunPython(invsrc.rename_inventory_sources),
|
||||
migrations.RunPython(reencrypt.replace_aesecb_fernet),
|
||||
migrations.RunPython(scan_jobs.migrate_scan_job_templates),
|
||||
|
||||
migrations.RunPython(credentialtypes.migrate_to_v2_credentials),
|
||||
migrations.RunPython(credentialtypes.migrate_job_credentials),
|
||||
# This list is intentionally empty.
|
||||
# Tower 3.2 included several data migrations that are no longer
|
||||
# necessary (this list is now empty because Tower 3.2 is past EOL and
|
||||
# cannot be directly upgraded to modern versions of Tower)
|
||||
]
|
||||
|
||||
@@ -15,8 +15,6 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(credentialtypes.create_rhv_tower_credtype),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
|
||||
@@ -3,8 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
from awx.main.migrations import _reencrypt as reencrypt
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
|
||||
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
@@ -14,6 +12,8 @@ class Migration(ActivityStreamDisabledMigration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(reencrypt.encrypt_survey_passwords),
|
||||
# This list is intentionally empty.
|
||||
# Tower 3.2 included several data migrations that are no longer
|
||||
# necessary (this list is now empty because Tower 3.2 is past EOL and
|
||||
# cannot be directly upgraded to modern versions of Tower)
|
||||
]
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
@@ -15,6 +11,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(credentialtypes.add_azure_cloud_environment_field),
|
||||
# This list is intentionally empty.
|
||||
# Tower 3.2 included several data migrations that are no longer
|
||||
# necessary (this list is now empty because Tower 3.2 is past EOL and
|
||||
# cannot be directly upgraded to modern versions of Tower)
|
||||
]
|
||||
|
||||
@@ -19,11 +19,11 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='systemjob',
|
||||
name='job_type',
|
||||
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('clearsessions', 'Removes expired browser sessions from the database'), ('cleartokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
|
||||
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_sessions', 'Removes expired browser sessions from the database'), ('cleanup_tokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('clearsessions', 'Removes expired browser sessions from the database'), ('cleartokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
|
||||
field=models.CharField(blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_sessions', 'Removes expired browser sessions from the database'), ('cleanup_tokens', 'Removes expired OAuth 2 access tokens and refresh tokens')], default='', max_length=32),
|
||||
),
|
||||
]
|
||||
|
||||
21
awx/main/migrations/0099_v361_license_cleanup.py
Normal file
21
awx/main/migrations/0099_v361_license_cleanup.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def _cleanup_license_setting(apps, schema_editor):
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
for license in Setting.objects.filter(key='LICENSE').all():
|
||||
for k in ('rh_username', 'rh_password'):
|
||||
license.value.pop(k, None)
|
||||
license.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0098_v360_rename_cyberark_aim_credential_type'),
|
||||
]
|
||||
|
||||
operations = [migrations.RunPython(_cleanup_license_setting)]
|
||||
18
awx/main/migrations/0100_v370_projectupdate_job_tags.py
Normal file
18
awx/main/migrations/0100_v370_projectupdate_job_tags.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-01 18:46
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0099_v361_license_cleanup'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='projectupdate',
|
||||
name='job_tags',
|
||||
field=models.CharField(blank=True, default='', help_text='Parts of the project update playbook that will be run.', max_length=1024),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from uuid import uuid4
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
def _generate_new_uuid_for_iso_nodes(apps, schema_editor):
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
for instance in Instance.objects.all():
|
||||
# The below code is a copy paste of instance.is_isolated()
|
||||
# We can't call is_isolated because we are using the "old" version
|
||||
# of the Instance definition.
|
||||
if instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
instance.uuid = str(uuid4())
|
||||
instance.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0100_v370_projectupdate_job_tags'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(_generate_new_uuid_for_iso_nodes)
|
||||
]
|
||||
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
18
awx/main/migrations/0102_v370_unifiedjob_canceled.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.4 on 2019-11-25 20:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0101_v370_generate_new_uuids_for_iso_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='canceled_on',
|
||||
field=models.DateTimeField(db_index=True, default=None, editable=False, help_text='The date and time when the cancel request was sent.', null=True),
|
||||
),
|
||||
]
|
||||
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
52
awx/main/migrations/0103_v370_remove_computed_fields.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2019-02-21 17:35
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0102_v370_unifiedjob_canceled'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='hosts_with_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_groups',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='group',
|
||||
name='total_hosts',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_active_failures',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='host',
|
||||
name='has_inventory_sources',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobhostsummary',
|
||||
name='failed',
|
||||
field=models.BooleanField(db_index=True, default=False, editable=False),
|
||||
),
|
||||
]
|
||||
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
24
awx/main/migrations/0104_v370_cleanup_old_scan_jts.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 20:01
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def cleanup_scan_jts(apps, schema_editor):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
JobTemplate.objects.filter(job_type='scan').update(job_type='run')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0103_v370_remove_computed_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(cleanup_scan_jts),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(choices=[('run', 'Run'), ('check', 'Check')], default='run', max_length=64),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,21 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-15 18:01
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0104_v370_cleanup_old_scan_jts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='parent',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='jobevent',
|
||||
name='hosts',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 2.2.8 on 2020-01-27 12:39
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0105_v370_remove_jobevent_parent_and_hosts'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventory',
|
||||
name='groups_with_active_failures',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 2.2.4 on 2020-01-08 22:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0106_v370_remove_inventory_groups_with_active_failures'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='all_parents_must_converge',
|
||||
field=models.BooleanField(default=False, help_text='If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-06 16:43
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0107_v370_workflow_convergence_api_toggle'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='dependencies_processed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, the task manager has already processed potential dependencies for this job.'),
|
||||
),
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def remove_azure_credentials(apps, schema_editor):
|
||||
'''Azure is not supported as of 3.2 and greater. Instead, azure_rm is
|
||||
supported.
|
||||
'''
|
||||
Credential = apps.get_model('main', 'Credential')
|
||||
logger.debug("Removing all Azure Credentials from database.")
|
||||
Credential.objects.filter(kind='azure').delete()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from awx.main import utils
|
||||
from awx.main.models import CredentialType
|
||||
from awx.main.utils.encryption import encrypt_field, decrypt_field
|
||||
from django.db.models import Q
|
||||
|
||||
|
||||
@@ -61,16 +59,6 @@ def _disassociate_non_insights_projects(apps, cred):
|
||||
apps.get_model('main', 'Project').objects.filter(~Q(scm_type='insights') & Q(credential=cred)).update(credential=None)
|
||||
|
||||
|
||||
def migrate_to_v2_credentials(apps, schema_editor):
|
||||
# TODO: remove once legacy/EOL'd Towers no longer support this upgrade path
|
||||
pass
|
||||
|
||||
|
||||
def migrate_job_credentials(apps, schema_editor):
|
||||
# TODO: remove once legacy/EOL'd Towers no longer support this upgrade path
|
||||
pass
|
||||
|
||||
|
||||
def add_vault_id_field(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
@@ -81,21 +69,11 @@ def remove_vault_id_field(apps, schema_editor):
|
||||
pass
|
||||
|
||||
|
||||
def create_rhv_tower_credtype(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def add_tower_verify_field(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def add_azure_cloud_environment_field(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def remove_become_methods(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
from django.utils.encoding import smart_text
|
||||
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
@@ -8,64 +7,6 @@ from awx.main.utils.common import parse_yaml_or_json
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def remove_manual_inventory_sources(apps, schema_editor):
|
||||
'''Previously we would automatically create inventory sources after
|
||||
Group creation and we would use the parent Group as our interface for the user.
|
||||
During that process we would create InventorySource that had a source of "manual".
|
||||
'''
|
||||
# TODO: use this in the 3.3 data migrations
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
# see models/inventory.py SOURCE_CHOICES - ('', _('Manual'))
|
||||
logger.debug("Removing all Manual InventorySource from database.")
|
||||
InventorySource.objects.filter(source='').delete()
|
||||
|
||||
|
||||
def remove_rax_inventory_sources(apps, schema_editor):
|
||||
'''Rackspace inventory sources are not supported since 3.2, remove them.
|
||||
'''
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
logger.debug("Removing all Rackspace InventorySource from database.")
|
||||
InventorySource.objects.filter(source='rax').delete()
|
||||
|
||||
|
||||
def rename_inventory_sources(apps, schema_editor):
|
||||
'''Rename existing InventorySource entries using the following format.
|
||||
{{ inventory_source.name }} - {{ inventory.module }} - {{ number }}
|
||||
The number will be incremented for each InventorySource for the organization.
|
||||
'''
|
||||
Organization = apps.get_model('main', 'Organization')
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
|
||||
for org in Organization.objects.iterator():
|
||||
for i, invsrc in enumerate(InventorySource.objects.filter(Q(inventory__organization=org) |
|
||||
Q(deprecated_group__inventory__organization=org)).distinct().all()):
|
||||
|
||||
inventory = invsrc.deprecated_group.inventory if invsrc.deprecated_group else invsrc.inventory
|
||||
name = '{0} - {1} - {2}'.format(invsrc.name, inventory.name, i)
|
||||
logger.debug("Renaming InventorySource({0}) {1} -> {2}".format(
|
||||
invsrc.pk, invsrc.name, name
|
||||
))
|
||||
invsrc.name = name
|
||||
invsrc.save()
|
||||
|
||||
|
||||
def remove_inventory_source_with_no_inventory_link(apps, schema_editor):
|
||||
'''If we cannot determine the Inventory for which an InventorySource exists
|
||||
we can safely remove it.
|
||||
'''
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
logger.debug("Removing all InventorySource that have no link to an Inventory from database.")
|
||||
InventorySource.objects.filter(Q(inventory__organization=None) & Q(deprecated_group__inventory=None)).delete()
|
||||
|
||||
|
||||
def remove_azure_inventory_sources(apps, schema_editor):
|
||||
'''Azure inventory sources are not supported since 3.2, remove them.
|
||||
'''
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
logger.debug("Removing all Azure InventorySource from database.")
|
||||
InventorySource.objects.filter(source='azure').delete()
|
||||
|
||||
|
||||
def _get_instance_id(from_dict, new_id, default=''):
|
||||
'''logic mostly duplicated with inventory_import command Command._get_instance_id
|
||||
frozen in time here, for purposes of migrations
|
||||
|
||||
@@ -1,79 +1,12 @@
|
||||
import logging
|
||||
import json
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.conf.migrations._reencrypt import (
|
||||
decrypt_field,
|
||||
should_decrypt_field,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_field
|
||||
|
||||
from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
from awx.main.notifications.twilio_backend import TwilioBackend
|
||||
from awx.main.notifications.pagerduty_backend import PagerDutyBackend
|
||||
from awx.main.notifications.hipchat_backend import HipChatBackend
|
||||
from awx.main.notifications.mattermost_backend import MattermostBackend
|
||||
from awx.main.notifications.webhook_backend import WebhookBackend
|
||||
from awx.main.notifications.irc_backend import IrcBackend
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
__all__ = ['replace_aesecb_fernet']
|
||||
|
||||
|
||||
NOTIFICATION_TYPES = [('email', _('Email'), CustomEmailBackend),
|
||||
('slack', _('Slack'), SlackBackend),
|
||||
('twilio', _('Twilio'), TwilioBackend),
|
||||
('pagerduty', _('Pagerduty'), PagerDutyBackend),
|
||||
('hipchat', _('HipChat'), HipChatBackend),
|
||||
('mattermost', _('Mattermost'), MattermostBackend),
|
||||
('webhook', _('Webhook'), WebhookBackend),
|
||||
('irc', _('IRC'), IrcBackend)]
|
||||
|
||||
|
||||
PASSWORD_FIELDS = ('password', 'security_token', 'ssh_key_data', 'ssh_key_unlock',
|
||||
'become_password', 'vault_password', 'secret', 'authorize_password')
|
||||
|
||||
|
||||
def replace_aesecb_fernet(apps, schema_editor):
|
||||
_notification_templates(apps)
|
||||
_credentials(apps)
|
||||
_unified_jobs(apps)
|
||||
|
||||
|
||||
def _notification_templates(apps):
|
||||
NotificationTemplate = apps.get_model('main', 'NotificationTemplate')
|
||||
for nt in NotificationTemplate.objects.all():
|
||||
CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2]) for x in NOTIFICATION_TYPES])
|
||||
notification_class = CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
|
||||
for field in filter(lambda x: notification_class.init_parameters[x]['type'] == "password",
|
||||
notification_class.init_parameters):
|
||||
if should_decrypt_field(nt.notification_configuration[field]):
|
||||
nt.notification_configuration[field] = decrypt_field(nt, 'notification_configuration', subfield=field)
|
||||
nt.notification_configuration[field] = encrypt_field(nt, 'notification_configuration', subfield=field)
|
||||
nt.save()
|
||||
|
||||
|
||||
def _credentials(apps):
|
||||
for credential in apps.get_model('main', 'Credential').objects.all():
|
||||
for field_name in PASSWORD_FIELDS:
|
||||
value = getattr(credential, field_name)
|
||||
if should_decrypt_field(value):
|
||||
value = decrypt_field(credential, field_name)
|
||||
setattr(credential, field_name, value)
|
||||
setattr(credential, field_name, encrypt_field(credential, field_name))
|
||||
credential.save()
|
||||
|
||||
|
||||
def _unified_jobs(apps):
|
||||
UnifiedJob = apps.get_model('main', 'UnifiedJob')
|
||||
for uj in UnifiedJob.objects.all():
|
||||
if uj.start_args is not None:
|
||||
if should_decrypt_field(uj.start_args):
|
||||
uj.start_args = decrypt_field(uj, 'start_args')
|
||||
uj.start_args = encrypt_field(uj, 'start_args')
|
||||
uj.save()
|
||||
__all__ = []
|
||||
|
||||
|
||||
def blank_old_start_args(apps, schema_editor):
|
||||
@@ -91,53 +24,3 @@ def blank_old_start_args(apps, schema_editor):
|
||||
logger.debug('Blanking job args for %s', uj.pk)
|
||||
uj.start_args = ''
|
||||
uj.save()
|
||||
|
||||
|
||||
def encrypt_survey_passwords(apps, schema_editor):
|
||||
_encrypt_survey_passwords(
|
||||
apps.get_model('main', 'Job'),
|
||||
apps.get_model('main', 'JobTemplate'),
|
||||
apps.get_model('main', 'WorkflowJob'),
|
||||
apps.get_model('main', 'WorkflowJobTemplate'),
|
||||
)
|
||||
|
||||
|
||||
def _encrypt_survey_passwords(Job, JobTemplate, WorkflowJob, WorkflowJobTemplate):
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
for _type in (JobTemplate, WorkflowJobTemplate):
|
||||
for jt in _type.objects.exclude(survey_spec={}):
|
||||
changed = False
|
||||
if jt.survey_spec.get('spec', []):
|
||||
for field in jt.survey_spec['spec']:
|
||||
if field.get('type') == 'password' and field.get('default', ''):
|
||||
default = field['default']
|
||||
if default.startswith('$encrypted$'):
|
||||
if default == '$encrypted$':
|
||||
# If you have a survey_spec with a literal
|
||||
# '$encrypted$' as the default, you have
|
||||
# encountered a known bug in awx/Tower
|
||||
# https://github.com/ansible/ansible-tower/issues/7800
|
||||
logger.error(
|
||||
'{}.pk={} survey_spec has ambiguous $encrypted$ default for {}, needs attention...'.format(jt, jt.pk, field['variable'])
|
||||
)
|
||||
field['default'] = ''
|
||||
changed = True
|
||||
continue
|
||||
field['default'] = encrypt_value(field['default'], pk=None)
|
||||
changed = True
|
||||
if changed:
|
||||
jt.save()
|
||||
|
||||
for _type in (Job, WorkflowJob):
|
||||
for job in _type.objects.defer('result_stdout_text').exclude(survey_passwords={}).iterator():
|
||||
changed = False
|
||||
for key in job.survey_passwords:
|
||||
if key in job.extra_vars:
|
||||
extra_vars = json.loads(job.extra_vars)
|
||||
if not extra_vars.get(key, '') or extra_vars[key].startswith('$encrypted$'):
|
||||
continue
|
||||
extra_vars[key] = encrypt_value(extra_vars[key], pk=None)
|
||||
job.extra_vars = json.dumps(extra_vars)
|
||||
changed = True
|
||||
if changed:
|
||||
job.save()
|
||||
|
||||
@@ -1,89 +1,9 @@
|
||||
import logging
|
||||
|
||||
from django.utils.timezone import now
|
||||
from django.utils.text import slugify
|
||||
|
||||
from awx.main.models.base import PERM_INVENTORY_SCAN, PERM_INVENTORY_DEPLOY
|
||||
from awx.main import utils
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def _create_fact_scan_project(ContentType, Project, org):
|
||||
ct = ContentType.objects.get_for_model(Project)
|
||||
name = u"Tower Fact Scan - {}".format(org.name if org else "No Organization")
|
||||
proj = Project(name=name,
|
||||
scm_url='https://github.com/ansible/awx-facts-playbooks',
|
||||
scm_type='git',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=86400,
|
||||
organization=org,
|
||||
created=now(),
|
||||
modified=now(),
|
||||
polymorphic_ctype=ct)
|
||||
proj.save()
|
||||
|
||||
slug_name = slugify(str(name)).replace(u'-', u'_')
|
||||
proj.local_path = u'_%d__%s' % (int(proj.pk), slug_name)
|
||||
|
||||
proj.save()
|
||||
return proj
|
||||
|
||||
|
||||
def _create_fact_scan_projects(ContentType, Project, orgs):
|
||||
return {org.id : _create_fact_scan_project(ContentType, Project, org) for org in orgs}
|
||||
|
||||
|
||||
def _get_tower_scan_job_templates(JobTemplate):
|
||||
return JobTemplate.objects.filter(job_type=PERM_INVENTORY_SCAN, project__isnull=True) \
|
||||
.prefetch_related('inventory__organization')
|
||||
|
||||
|
||||
def _get_orgs(Organization, job_template_ids):
|
||||
return Organization.objects.filter(inventories__jobtemplates__in=job_template_ids).distinct()
|
||||
|
||||
|
||||
def _migrate_scan_job_templates(apps):
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
Organization = apps.get_model('main', 'Organization')
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
Project = apps.get_model('main', 'Project')
|
||||
|
||||
project_no_org = None
|
||||
|
||||
# A scan job template with a custom project will retain the custom project.
|
||||
JobTemplate.objects.filter(job_type=PERM_INVENTORY_SCAN, project__isnull=False).update(use_fact_cache=True, job_type=PERM_INVENTORY_DEPLOY)
|
||||
|
||||
# Scan jobs templates using Tower's default scan playbook will now point at
|
||||
# the same playbook but in a github repo.
|
||||
jts = _get_tower_scan_job_templates(JobTemplate)
|
||||
if jts.count() == 0:
|
||||
return
|
||||
|
||||
orgs = _get_orgs(Organization, jts.values_list('id'))
|
||||
if orgs.count() == 0:
|
||||
return
|
||||
|
||||
org_proj_map = _create_fact_scan_projects(ContentType, Project, orgs)
|
||||
for jt in jts:
|
||||
if jt.inventory and jt.inventory.organization:
|
||||
jt.project_id = org_proj_map[jt.inventory.organization.id].id
|
||||
# Job Templates without an Organization; through related Inventory
|
||||
else:
|
||||
if not project_no_org:
|
||||
project_no_org = _create_fact_scan_project(ContentType, Project, None)
|
||||
jt.project_id = project_no_org.id
|
||||
jt.job_type = PERM_INVENTORY_DEPLOY
|
||||
jt.playbook = "scan_facts.yml"
|
||||
jt.use_fact_cache = True
|
||||
jt.save()
|
||||
|
||||
|
||||
def migrate_scan_job_templates(apps, schema_editor):
|
||||
_migrate_scan_job_templates(apps)
|
||||
|
||||
|
||||
def remove_scan_type_nodes(apps, schema_editor):
|
||||
WorkflowJobTemplateNode = apps.get_model('main', 'WorkflowJobTemplateNode')
|
||||
WorkflowJobNode = apps.get_model('main', 'WorkflowJobNode')
|
||||
|
||||
@@ -295,7 +295,10 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
r = super(PrimordialModel, self).__init__(*args, **kwargs)
|
||||
self._prior_values_store = self._get_fields_snapshot()
|
||||
if self.pk:
|
||||
self._prior_values_store = self._get_fields_snapshot()
|
||||
else:
|
||||
self._prior_values_store = {}
|
||||
return r
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
|
||||
@@ -86,6 +86,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
unique_together = (('organization', 'name', 'credential_type'))
|
||||
|
||||
PASSWORD_FIELDS = ['inputs']
|
||||
FIELDS_TO_PRESERVE_AT_COPY = ['input_sources']
|
||||
|
||||
credential_type = models.ForeignKey(
|
||||
'CredentialType',
|
||||
@@ -1135,7 +1136,7 @@ ManagedCredentialType(
|
||||
'help_text': ugettext_noop('The OpenShift or Kubernetes API Endpoint to authenticate with.')
|
||||
},{
|
||||
'id': 'bearer_token',
|
||||
'label': ugettext_noop('API authentication bearer token.'),
|
||||
'label': ugettext_noop('API authentication bearer token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},{
|
||||
@@ -1162,6 +1163,8 @@ class CredentialInputSource(PrimordialModel):
|
||||
unique_together = (('target_credential', 'input_field_name'),)
|
||||
ordering = ('target_credential', 'source_credential', 'input_field_name',)
|
||||
|
||||
FIELDS_TO_PRESERVE_AT_COPY = ['source_credential', 'metadata', 'input_field_name']
|
||||
|
||||
target_credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='input_sources',
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import models, DatabaseError
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
@@ -11,9 +12,10 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import consumers
|
||||
from awx.main.fields import JSONField
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
@@ -55,6 +57,51 @@ def create_host_status_counts(event_data):
|
||||
return dict(host_status_counts)
|
||||
|
||||
|
||||
def emit_event_detail(event):
|
||||
cls = event.__class__
|
||||
relation = {
|
||||
JobEvent: 'job_id',
|
||||
AdHocCommandEvent: 'ad_hoc_command_id',
|
||||
ProjectUpdateEvent: 'project_update_id',
|
||||
InventoryUpdateEvent: 'inventory_update_id',
|
||||
SystemJobEvent: 'system_job_id',
|
||||
}[cls]
|
||||
url = ''
|
||||
if isinstance(event, JobEvent):
|
||||
url = '/api/v2/job_events/{}'.format(event.id)
|
||||
if isinstance(event, AdHocCommandEvent):
|
||||
url = '/api/v2/ad_hoc_command_events/{}'.format(event.id)
|
||||
group = camelcase_to_underscore(cls.__name__) + 's'
|
||||
timestamp = event.created.isoformat()
|
||||
consumers.emit_channel_notification(
|
||||
'-'.join([group, str(getattr(event, relation))]),
|
||||
{
|
||||
'id': event.id,
|
||||
relation.replace('_id', ''): getattr(event, relation),
|
||||
'created': timestamp,
|
||||
'modified': timestamp,
|
||||
'group_name': group,
|
||||
'url': url,
|
||||
'stdout': event.stdout,
|
||||
'counter': event.counter,
|
||||
'uuid': event.uuid,
|
||||
'parent_uuid': getattr(event, 'parent_uuid', ''),
|
||||
'start_line': event.start_line,
|
||||
'end_line': event.end_line,
|
||||
'event': event.event,
|
||||
'event_data': getattr(event, 'event_data', {}),
|
||||
'failed': event.failed,
|
||||
'changed': event.changed,
|
||||
'event_level': getattr(event, 'event_level', ''),
|
||||
'play': getattr(event, 'play', ''),
|
||||
'role': getattr(event, 'role', ''),
|
||||
'task': getattr(event, 'task', ''),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'''
|
||||
An event/message logged from a playbook callback for each host.
|
||||
@@ -63,7 +110,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
VALID_KEYS = [
|
||||
'event', 'event_data', 'playbook', 'play', 'role', 'task', 'created',
|
||||
'counter', 'uuid', 'stdout', 'parent_uuid', 'start_line', 'end_line',
|
||||
'verbosity'
|
||||
'host_id', 'host_name', 'verbosity',
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -271,37 +318,66 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update event model fields from event data.
|
||||
updated_fields = set()
|
||||
event_data = self.event_data
|
||||
res = event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS and not event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
updated_fields.add('failed')
|
||||
if isinstance(res, dict):
|
||||
if res.get('changed', False):
|
||||
self.changed = True
|
||||
updated_fields.add('changed')
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
failures_dict = event_data.get('failures', {})
|
||||
dark_dict = event_data.get('dark', {})
|
||||
self.failed = bool(sum(failures_dict.values()) +
|
||||
sum(dark_dict.values()))
|
||||
updated_fields.add('failed')
|
||||
changed_dict = event_data.get('changed', {})
|
||||
self.changed = bool(sum(changed_dict.values()))
|
||||
updated_fields.add('changed')
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
if isinstance(self, JobEvent):
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
if self.job.inventory:
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
|
||||
# find parent links and progagate changed=T and failed=T
|
||||
changed = self.job.job_events.filter(changed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
failed = self.job.job_events.filter(failed=True).exclude(parent_uuid=None).only('parent_uuid').values_list('parent_uuid', flat=True).distinct() # noqa
|
||||
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=changed
|
||||
).update(changed=True)
|
||||
JobEvent.objects.filter(
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
setattr(self, field, value)
|
||||
updated_fields.add(field)
|
||||
return updated_fields
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
# This function is called by the callback receiver *once* for *every
|
||||
# event* emitted by Ansible as a playbook runs. That means that
|
||||
# changes to this function are _very_ susceptible to introducing
|
||||
# performance regressions (which the user will experience as "my
|
||||
# playbook stdout takes too long to show up"), *especially* code which
|
||||
# might invoke additional database queries per event.
|
||||
#
|
||||
# Proceed with caution!
|
||||
#
|
||||
pk = None
|
||||
for key in ('job_id', 'project_update_id'):
|
||||
if key in kwargs:
|
||||
@@ -325,74 +401,16 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
workflow_job_id = kwargs.pop('workflow_job_id', None)
|
||||
job_event = cls.objects.create(**kwargs)
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(job_event, 'workflow_job_id', workflow_job_id)
|
||||
analytics_logger.info('Event data saved.', extra=dict(python_objects=dict(job_event=job_event)))
|
||||
return job_event
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
return 0
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# Update model fields and related objects unless we're only updating
|
||||
# failed/changed flags triggered from a child event.
|
||||
from_parent_update = kwargs.pop('from_parent_update', False)
|
||||
if not from_parent_update:
|
||||
# Update model fields from event data.
|
||||
updated_fields = self._update_from_event_data()
|
||||
for field in updated_fields:
|
||||
if field not in update_fields:
|
||||
update_fields.append(field)
|
||||
|
||||
# Update host related field from host_name.
|
||||
if hasattr(self, 'job') and not self.host_id and self.host_name:
|
||||
if self.job.inventory.kind == 'smart':
|
||||
# optimization to avoid calling inventory.hosts, which
|
||||
# can take a long time to run under some circumstances
|
||||
from awx.main.models.inventory import SmartInventoryMembership
|
||||
membership = SmartInventoryMembership.objects.filter(
|
||||
inventory=self.job.inventory, host__name=self.host_name
|
||||
).first()
|
||||
if membership:
|
||||
host_id = membership.host_id
|
||||
else:
|
||||
host_id = None
|
||||
else:
|
||||
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
|
||||
host_id = host_qs.only('id').values_list('id', flat=True).first()
|
||||
if host_id != self.host_id:
|
||||
self.host_id = host_id
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
super(BasePlaybookEvent, self).save(*args, **kwargs)
|
||||
|
||||
# Update related objects after this event is saved.
|
||||
if hasattr(self, 'job') and not from_parent_update:
|
||||
if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
|
||||
self._update_hosts()
|
||||
if self.parent_uuid:
|
||||
kwargs = {}
|
||||
if self.changed is True:
|
||||
kwargs['changed'] = True
|
||||
if self.failed is True:
|
||||
kwargs['failed'] = True
|
||||
if kwargs:
|
||||
JobEvent.objects.filter(job_id=self.job_id, uuid=self.parent_uuid).update(**kwargs)
|
||||
|
||||
if self.event == 'playbook_on_stats':
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
except DatabaseError:
|
||||
logger.exception('Computed fields database error saving event {}'.format(self.pk))
|
||||
|
||||
|
||||
|
||||
class JobEvent(BasePlaybookEvent):
|
||||
'''
|
||||
@@ -431,19 +449,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
default='',
|
||||
editable=False,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='job_events',
|
||||
editable=False,
|
||||
)
|
||||
parent = models.ForeignKey(
|
||||
'self',
|
||||
related_name='children',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
)
|
||||
parent_uuid = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
@@ -456,38 +461,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
def __str__(self):
|
||||
return u'%s @ %s' % (self.get_event_display2(), self.created.isoformat())
|
||||
|
||||
def _update_from_event_data(self):
|
||||
# Update job event hostname
|
||||
updated_fields = super(JobEvent, self)._update_from_event_data()
|
||||
value = force_text(self.event_data.get('host', '')).strip()
|
||||
if value != getattr(self, 'host_name'):
|
||||
setattr(self, 'host_name', value)
|
||||
updated_fields.add('host_name')
|
||||
return updated_fields
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
hostnames = set()
|
||||
if self.host_name:
|
||||
hostnames.add(self.host_name)
|
||||
if self.event == 'playbook_on_stats':
|
||||
try:
|
||||
for v in self.event_data.values():
|
||||
hostnames.update(v.keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
qs = self.job.inventory.hosts.all()
|
||||
qs = qs.filter(models.Q(name__in=hostnames) | models.Q(pk__in=extra_host_pks))
|
||||
qs = qs.exclude(job_events__pk=self.id).only('id')
|
||||
for host in qs:
|
||||
self.hosts.add(host)
|
||||
if self.parent_uuid:
|
||||
parent = JobEvent.objects.filter(uuid=self.parent_uuid)
|
||||
if parent.exists():
|
||||
parent = parent[0]
|
||||
parent._update_hosts(qs.values_list('id', flat=True))
|
||||
|
||||
def _hostnames(self):
|
||||
hostnames = set()
|
||||
try:
|
||||
@@ -605,6 +578,17 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
@classmethod
|
||||
def create_from_data(cls, **kwargs):
|
||||
#
|
||||
# ⚠️ D-D-D-DANGER ZONE ⚠️
|
||||
# This function is called by the callback receiver *once* for *every
|
||||
# event* emitted by Ansible as a playbook runs. That means that
|
||||
# changes to this function are _very_ susceptible to introducing
|
||||
# performance regressions (which the user will experience as "my
|
||||
# playbook stdout takes too long to show up"), *especially* code which
|
||||
# might invoke additional database queries per event.
|
||||
#
|
||||
# Proceed with caution!
|
||||
#
|
||||
# Convert the datetime for the event's creation
|
||||
# appropriately, and include a time zone for it.
|
||||
#
|
||||
@@ -620,12 +604,8 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
kwargs.pop('workflow_job_id', None)
|
||||
event = cls.objects.create(**kwargs)
|
||||
if isinstance(event, AdHocCommandEvent):
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=event))
|
||||
)
|
||||
event = cls(**kwargs)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
def get_event_display(self):
|
||||
@@ -640,10 +620,15 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
def get_host_status_counts(self):
|
||||
return create_host_status_counts(getattr(self, 'event_data', {}))
|
||||
|
||||
def _update_from_event_data(self):
|
||||
pass
|
||||
|
||||
|
||||
class AdHocCommandEvent(BaseCommandEvent):
|
||||
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + ['ad_hoc_command_id', 'event', 'workflow_job_id']
|
||||
VALID_KEYS = BaseCommandEvent.VALID_KEYS + [
|
||||
'ad_hoc_command_id', 'event', 'host_name', 'host_id', 'workflow_job_id'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
@@ -719,34 +704,18 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:ad_hoc_command_event_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
def _update_from_event_data(self):
|
||||
res = self.event_data.get('res', None)
|
||||
if self.event in self.FAILED_EVENTS:
|
||||
if not self.event_data.get('ignore_errors', False):
|
||||
self.failed = True
|
||||
if 'failed' not in update_fields:
|
||||
update_fields.append('failed')
|
||||
if isinstance(res, dict) and res.get('changed', False):
|
||||
self.changed = True
|
||||
if 'changed' not in update_fields:
|
||||
update_fields.append('changed')
|
||||
self.host_name = self.event_data.get('host', '').strip()
|
||||
if 'host_name' not in update_fields:
|
||||
update_fields.append('host_name')
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
|
||||
try:
|
||||
host_id = host_qs.only('id').values_list('id', flat=True)
|
||||
if host_id.exists():
|
||||
self.host_id = host_id[0]
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
super(AdHocCommandEvent, self).save(*args, **kwargs)
|
||||
|
||||
analytics_logger.info(
|
||||
'Event data saved.',
|
||||
extra=dict(python_objects=dict(job_event=self))
|
||||
)
|
||||
|
||||
|
||||
class InventoryUpdateEvent(BaseCommandEvent):
|
||||
|
||||
@@ -270,6 +270,11 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
.filter(capacity__gt=0, enabled=True)
|
||||
.values_list('hostname', flat=True)))
|
||||
|
||||
def set_default_policy_fields(self):
|
||||
self.policy_instance_list = []
|
||||
self.policy_instance_minimum = 0
|
||||
self.policy_instance_percentage = 0
|
||||
|
||||
|
||||
class TowerScheduleState(SingletonModel):
|
||||
schedule_last_run = models.DateTimeField(auto_now_add=True)
|
||||
@@ -289,6 +294,8 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
|
||||
if created or instance.has_policy_changes():
|
||||
if not instance.is_containerized:
|
||||
schedule_policy_task()
|
||||
elif created or instance.is_containerized:
|
||||
instance.set_default_policy_fields()
|
||||
|
||||
|
||||
@receiver(post_save, sender=Instance)
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -61,6 +60,7 @@ from awx.main.models.notifications import (
|
||||
)
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates, region_sorting, get_licenser
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate',
|
||||
@@ -122,12 +122,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of groups in this inventory.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of groups in this inventory with active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
@@ -338,139 +332,17 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
return data
|
||||
|
||||
def update_host_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all hosts in this inventory.
|
||||
'''
|
||||
hosts_to_update = {}
|
||||
hosts_qs = self.hosts
|
||||
# Define queryset of all hosts with active failures.
|
||||
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_active_failures flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = True
|
||||
# Find all hosts that need the has_active_failures flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_active_failures=True).exclude(pk__in=hosts_with_active_failures)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_active_failures'] = False
|
||||
# Define queryset of all hosts with cloud inventory sources.
|
||||
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
|
||||
# Find all hosts that need the has_inventory_sources flag set.
|
||||
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_set.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = True
|
||||
# Find all hosts that need the has_inventory_sources flag cleared.
|
||||
hosts_to_clear = hosts_qs.filter(has_inventory_sources=True).exclude(pk__in=hosts_with_cloud_inventory)
|
||||
for host_pk in hosts_to_clear.values_list('pk', flat=True):
|
||||
host_updates = hosts_to_update.setdefault(host_pk, {})
|
||||
host_updates['has_inventory_sources'] = False
|
||||
# Now apply updates to hosts where needed (in batches).
|
||||
all_update_pks = list(hosts_to_update.keys())
|
||||
|
||||
def _chunk(items, chunk_size):
|
||||
for i, group in itertools.groupby(enumerate(items), lambda x: x[0] // chunk_size):
|
||||
yield (g[1] for g in group)
|
||||
|
||||
for update_pks in _chunk(all_update_pks, 500):
|
||||
for host in hosts_qs.filter(pk__in=update_pks):
|
||||
host_updates = hosts_to_update[host.pk]
|
||||
for field, value in host_updates.items():
|
||||
setattr(host, field, value)
|
||||
host.save(update_fields=host_updates.keys())
|
||||
|
||||
def update_group_computed_fields(self):
|
||||
'''
|
||||
Update computed fields for all active groups in this inventory.
|
||||
'''
|
||||
group_children_map = self.get_group_children_map()
|
||||
group_hosts_map = self.get_group_hosts_map()
|
||||
active_host_pks = set(self.hosts.values_list('pk', flat=True))
|
||||
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
|
||||
# active_group_pks = set(self.groups.values_list('pk', flat=True))
|
||||
failed_group_pks = set() # Update below as we check each group.
|
||||
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
|
||||
groups_to_update = {}
|
||||
|
||||
# Build list of group pks to check, starting with the groups at the
|
||||
# deepest level within the tree.
|
||||
root_group_pks = set(self.root_groups.values_list('pk', flat=True))
|
||||
group_depths = {} # pk: max_depth
|
||||
|
||||
def update_group_depths(group_pk, current_depth=0):
|
||||
max_depth = group_depths.get(group_pk, -1)
|
||||
# Arbitrarily limit depth to avoid hitting Python recursion limit (which defaults to 1000).
|
||||
if current_depth > 100:
|
||||
return
|
||||
if current_depth > max_depth:
|
||||
group_depths[group_pk] = current_depth
|
||||
for child_pk in group_children_map.get(group_pk, set()):
|
||||
update_group_depths(child_pk, current_depth + 1)
|
||||
for group_pk in root_group_pks:
|
||||
update_group_depths(group_pk)
|
||||
group_pks_to_check = [x[1] for x in sorted([(v,k) for k,v in group_depths.items()], reverse=True)]
|
||||
|
||||
for group_pk in group_pks_to_check:
|
||||
# Get all children and host pks for this group.
|
||||
parent_pks_to_check = set([group_pk])
|
||||
parent_pks_checked = set()
|
||||
child_pks = set()
|
||||
host_pks = set()
|
||||
while parent_pks_to_check:
|
||||
for parent_pk in list(parent_pks_to_check):
|
||||
c_ids = group_children_map.get(parent_pk, set())
|
||||
child_pks.update(c_ids)
|
||||
parent_pks_to_check.remove(parent_pk)
|
||||
parent_pks_checked.add(parent_pk)
|
||||
parent_pks_to_check.update(c_ids - parent_pks_checked)
|
||||
h_ids = group_hosts_map.get(parent_pk, set())
|
||||
host_pks.update(h_ids)
|
||||
# Define updates needed for this group.
|
||||
group_updates = groups_to_update.setdefault(group_pk, {})
|
||||
group_updates.update({
|
||||
'total_hosts': len(active_host_pks & host_pks),
|
||||
'has_active_failures': bool(failed_host_pks & host_pks),
|
||||
'hosts_with_active_failures': len(failed_host_pks & host_pks),
|
||||
'total_groups': len(child_pks),
|
||||
'groups_with_active_failures': len(failed_group_pks & child_pks),
|
||||
'has_inventory_sources': bool(group_pk in groups_with_cloud_pks),
|
||||
})
|
||||
if group_updates['has_active_failures']:
|
||||
failed_group_pks.add(group_pk)
|
||||
|
||||
# Now apply updates to each group as needed (in batches).
|
||||
all_update_pks = list(groups_to_update.keys())
|
||||
for offset in range(0, len(all_update_pks), 500):
|
||||
update_pks = all_update_pks[offset:(offset + 500)]
|
||||
for group in self.groups.filter(pk__in=update_pks):
|
||||
group_updates = groups_to_update[group.pk]
|
||||
for field, value in list(group_updates.items()):
|
||||
if getattr(group, field) != value:
|
||||
setattr(group, field, value)
|
||||
else:
|
||||
group_updates.pop(field)
|
||||
if group_updates:
|
||||
group.save(update_fields=group_updates.keys())
|
||||
|
||||
def update_computed_fields(self, update_groups=True, update_hosts=True):
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
logger.debug("Going to update inventory computed fields, pk={0}".format(self.pk))
|
||||
start_time = time.time()
|
||||
if update_hosts:
|
||||
self.update_host_computed_fields()
|
||||
if update_groups:
|
||||
self.update_group_computed_fields()
|
||||
active_hosts = self.hosts
|
||||
failed_hosts = active_hosts.filter(has_active_failures=True)
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.groups
|
||||
if self.kind == 'smart':
|
||||
active_groups = active_groups.none()
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
if self.kind == 'smart':
|
||||
active_inventory_sources = self.inventory_sources.none()
|
||||
else:
|
||||
@@ -481,7 +353,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'total_hosts': active_hosts.count(),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
'total_inventory_sources': active_inventory_sources.count(),
|
||||
'inventory_sources_with_failures': failed_inventory_sources.count(),
|
||||
@@ -544,7 +415,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
if (self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and
|
||||
connection.vendor != 'sqlite'):
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.update_computed_fields()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -630,18 +501,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether the last job failed for this host.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this host was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='hosts',
|
||||
@@ -672,34 +531,6 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def update_computed_fields(self, update_inventory=True, update_groups=True):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
has_active_failures = bool(self.last_job_host_summary and
|
||||
self.last_job_host_summary.failed)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'has_active_failures': has_active_failures,
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
# Groups and inventory may also need to be updated when host fields
|
||||
# change.
|
||||
# NOTE: I think this is no longer needed
|
||||
# if update_groups:
|
||||
# for group in self.all_groups:
|
||||
# group.update_computed_fields()
|
||||
# if update_inventory:
|
||||
# self.inventory.update_computed_fields(update_groups=False,
|
||||
# update_hosts=False)
|
||||
# Rebuild summary fields cache
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
@property
|
||||
@@ -754,6 +585,13 @@ class Host(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
update_host_smart_inventory_memberships.delay()
|
||||
connection.on_commit(on_commit)
|
||||
|
||||
def clean_name(self):
|
||||
try:
|
||||
sanitize_jinja(self.name)
|
||||
except ValueError as e:
|
||||
raise ValidationError(str(e) + ": {}".format(self.name))
|
||||
return self.name
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Host, self).save(*args, **kwargs)
|
||||
@@ -807,42 +645,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
blank=True,
|
||||
help_text=_('Hosts associated directly with this group.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of hosts directly or indirectly in this group.'),
|
||||
)
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group has any hosts with active failures.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of hosts in this group with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Total number of child groups contained within this group.'),
|
||||
)
|
||||
groups_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Number of child groups within this group that have active failures.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. '
|
||||
'Flag indicating whether this group was created/updated from any external inventory sources.'),
|
||||
)
|
||||
inventory_sources = models.ManyToManyField(
|
||||
'InventorySource',
|
||||
related_name='groups',
|
||||
@@ -917,32 +719,6 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
mark_actual()
|
||||
activity_stream_delete(None, self)
|
||||
|
||||
def update_computed_fields(self):
|
||||
'''
|
||||
Update model fields that are computed from database relationships.
|
||||
'''
|
||||
active_hosts = self.all_hosts
|
||||
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
|
||||
active_groups = self.all_children
|
||||
# FIXME: May not be accurate unless we always update groups depth-first.
|
||||
failed_groups = active_groups.filter(has_active_failures=True)
|
||||
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
computed_fields = {
|
||||
'total_hosts': active_hosts.count(),
|
||||
'has_active_failures': bool(failed_hosts.count()),
|
||||
'hosts_with_active_failures': failed_hosts.count(),
|
||||
'total_groups': active_groups.count(),
|
||||
'groups_with_active_failures': failed_groups.count(),
|
||||
'has_inventory_sources': bool(active_inventory_sources.count()),
|
||||
}
|
||||
for field, value in computed_fields.items():
|
||||
if getattr(self, field) != value:
|
||||
setattr(self, field, value)
|
||||
else:
|
||||
computed_fields.pop(field)
|
||||
if computed_fields:
|
||||
self.save(update_fields=computed_fields.keys())
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
|
||||
def get_all_parents(self, except_pks=None):
|
||||
@@ -1548,7 +1324,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
self.update()
|
||||
if not getattr(_inventory_updates, 'is_updating', False):
|
||||
if self.inventory is not None:
|
||||
self.inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
self.inventory.update_computed_fields()
|
||||
|
||||
def _get_current_status(self):
|
||||
if self.source:
|
||||
@@ -2036,9 +1812,25 @@ class azure_rm(PluginFileInjector):
|
||||
for key, loc in old_filterables:
|
||||
value = source_vars.get(key, None)
|
||||
if value and isinstance(value, str):
|
||||
user_filters.append('{} not in {}'.format(
|
||||
loc, value.split(',')
|
||||
))
|
||||
# tags can be list of key:value pairs
|
||||
# e.g. 'Creator:jmarshall, peanutbutter:jelly'
|
||||
# or tags can be a list of keys
|
||||
# e.g. 'Creator, peanutbutter'
|
||||
if key == "tags":
|
||||
# grab each key value pair
|
||||
for kvpair in value.split(','):
|
||||
# split into key and value
|
||||
kv = kvpair.split(':')
|
||||
# filter out any host that does not have key
|
||||
# in their tags.keys() variable
|
||||
user_filters.append('"{}" not in tags.keys()'.format(kv[0].strip()))
|
||||
# if a value is provided, check that the key:value pair matches
|
||||
if len(kv) > 1:
|
||||
user_filters.append('tags["{}"] != "{}"'.format(kv[0].strip(), kv[1].strip()))
|
||||
else:
|
||||
user_filters.append('{} not in {}'.format(
|
||||
loc, value.split(',')
|
||||
))
|
||||
if user_filters:
|
||||
ret.setdefault('exclude_host_filters', [])
|
||||
ret['exclude_host_filters'].extend(user_filters)
|
||||
@@ -2592,6 +2384,9 @@ class satellite6(PluginFileInjector):
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
@@ -2601,6 +2396,12 @@ class satellite6(PluginFileInjector):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
@@ -2612,9 +2413,11 @@ class satellite6(PluginFileInjector):
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
|
||||
@@ -13,6 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
#from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -28,7 +29,7 @@ from awx.api.versioning import reverse
|
||||
from awx.main.models.base import (
|
||||
BaseModel, CreatedModifiedModel,
|
||||
prevent_search, accepts_json,
|
||||
JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
JOB_TYPE_CHOICES, NEW_JOB_TYPE_CHOICES, VERBOSITY_CHOICES,
|
||||
VarsDictProperty
|
||||
)
|
||||
from awx.main.models.events import JobEvent, SystemJobEvent
|
||||
@@ -204,6 +205,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
job_type = models.CharField(
|
||||
max_length=64,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
default='run',
|
||||
)
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -293,6 +299,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def resources_needed_to_start(self):
|
||||
return [fd for fd in ['project', 'inventory'] if not getattr(self, '{}_id'.format(fd))]
|
||||
|
||||
def clean_forks(self):
|
||||
if settings.MAX_FORKS > 0 and self.forks > settings.MAX_FORKS:
|
||||
raise ValidationError(_(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.'))
|
||||
return self.forks
|
||||
|
||||
def create_job(self, **kwargs):
|
||||
'''
|
||||
Create a new job based on this template.
|
||||
@@ -634,7 +645,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
else:
|
||||
# If for some reason we can't count the hosts then lets assume the impact as forks
|
||||
if self.inventory is not None:
|
||||
count_hosts = self.inventory.hosts.count()
|
||||
count_hosts = self.inventory.total_hosts
|
||||
if self.job_slice_count > 1:
|
||||
# Integer division intentional
|
||||
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) // self.job_slice_count
|
||||
@@ -900,6 +911,9 @@ class LaunchTimeConfigBase(BaseModel):
|
||||
data[prompt_name] = self.display_extra_vars()
|
||||
else:
|
||||
data[prompt_name] = self.extra_vars
|
||||
# Depending on model, field type may save and return as string
|
||||
if isinstance(data[prompt_name], str):
|
||||
data[prompt_name] = parse_yaml_or_json(data[prompt_name])
|
||||
if self.survey_passwords and not display:
|
||||
data['survey_passwords'] = self.survey_passwords
|
||||
else:
|
||||
@@ -1057,7 +1071,7 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
processed = models.PositiveIntegerField(default=0, editable=False)
|
||||
rescued = models.PositiveIntegerField(default=0, editable=False)
|
||||
skipped = models.PositiveIntegerField(default=0, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
failed = models.BooleanField(default=False, editable=False, db_index=True)
|
||||
|
||||
def __str__(self):
|
||||
host = getattr_dne(self, 'host')
|
||||
@@ -1092,7 +1106,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
#self.host.update_computed_fields()
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
@@ -1103,8 +1116,8 @@ class SystemJobOptions(BaseModel):
|
||||
SYSTEM_JOB_TYPE = [
|
||||
('cleanup_jobs', _('Remove jobs older than a certain number of days')),
|
||||
('cleanup_activitystream', _('Remove activity stream entries older than a certain number of days')),
|
||||
('clearsessions', _('Removes expired browser sessions from the database')),
|
||||
('cleartokens', _('Removes expired OAuth 2 access tokens and refresh tokens'))
|
||||
('cleanup_sessions', _('Removes expired browser sessions from the database')),
|
||||
('cleanup_tokens', _('Removes expired OAuth 2 access tokens and refresh tokens'))
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -1179,18 +1192,19 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
|
||||
for key in unallowed_vars:
|
||||
rejected[key] = data.pop(key)
|
||||
|
||||
if 'days' in data:
|
||||
try:
|
||||
if type(data['days']) is bool:
|
||||
raise ValueError
|
||||
if float(data['days']) != int(data['days']):
|
||||
raise ValueError
|
||||
days = int(data['days'])
|
||||
if days < 0:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
errors_list.append(_("days must be a positive integer."))
|
||||
rejected['days'] = data.pop('days')
|
||||
if self.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
|
||||
if 'days' in data:
|
||||
try:
|
||||
if isinstance(data['days'], (bool, type(None))):
|
||||
raise ValueError
|
||||
if float(data['days']) != int(data['days']):
|
||||
raise ValueError
|
||||
days = int(data['days'])
|
||||
if days < 0:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
errors_list.append(_("days must be a positive integer."))
|
||||
rejected['days'] = data.pop('days')
|
||||
|
||||
if errors_list:
|
||||
errors['extra_vars'] = errors_list
|
||||
|
||||
@@ -73,7 +73,7 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
notification_configuration = prevent_search(JSONField(blank=False))
|
||||
|
||||
def default_messages():
|
||||
return {'started': None, 'success': None, 'error': None}
|
||||
return {'started': None, 'success': None, 'error': None, 'workflow_approval': None}
|
||||
|
||||
messages = JSONField(
|
||||
null=True,
|
||||
@@ -92,25 +92,6 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
def get_message(self, condition):
|
||||
return self.messages.get(condition, {})
|
||||
|
||||
def build_notification_message(self, event_type, context):
|
||||
env = sandbox.ImmutableSandboxedEnvironment()
|
||||
templates = self.get_message(event_type)
|
||||
msg_template = templates.get('message', {})
|
||||
|
||||
try:
|
||||
notification_subject = env.from_string(msg_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
notification_subject = ''
|
||||
|
||||
|
||||
msg_body = templates.get('body', {})
|
||||
try:
|
||||
notification_body = env.from_string(msg_body).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
notification_body = ''
|
||||
|
||||
return (notification_subject, notification_body)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:notification_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -128,19 +109,34 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
old_messages = old_nt.messages
|
||||
new_messages = self.messages
|
||||
|
||||
def merge_messages(local_old_messages, local_new_messages, local_event):
|
||||
if local_new_messages.get(local_event, {}) and local_old_messages.get(local_event, {}):
|
||||
local_old_event_msgs = local_old_messages[local_event]
|
||||
local_new_event_msgs = local_new_messages[local_event]
|
||||
for msg_type in ['message', 'body']:
|
||||
if msg_type not in local_new_event_msgs and local_old_event_msgs.get(msg_type, None):
|
||||
local_new_event_msgs[msg_type] = local_old_event_msgs[msg_type]
|
||||
if old_messages is not None and new_messages is not None:
|
||||
for event in ['started', 'success', 'error']:
|
||||
for event in ('started', 'success', 'error', 'workflow_approval'):
|
||||
if not new_messages.get(event, {}) and old_messages.get(event, {}):
|
||||
new_messages[event] = old_messages[event]
|
||||
continue
|
||||
if new_messages.get(event, {}) and old_messages.get(event, {}):
|
||||
old_event_msgs = old_messages[event]
|
||||
new_event_msgs = new_messages[event]
|
||||
for msg_type in ['message', 'body']:
|
||||
if msg_type not in new_event_msgs and old_event_msgs.get(msg_type, None):
|
||||
new_event_msgs[msg_type] = old_event_msgs[msg_type]
|
||||
|
||||
if event == 'workflow_approval' and old_messages.get('workflow_approval', None):
|
||||
new_messages.setdefault('workflow_approval', {})
|
||||
for subevent in ('running', 'approved', 'timed_out', 'denied'):
|
||||
old_wfa_messages = old_messages['workflow_approval']
|
||||
new_wfa_messages = new_messages['workflow_approval']
|
||||
if not new_wfa_messages.get(subevent, {}) and old_wfa_messages.get(subevent, {}):
|
||||
new_wfa_messages[subevent] = old_wfa_messages[subevent]
|
||||
continue
|
||||
if old_wfa_messages:
|
||||
merge_messages(old_wfa_messages, new_wfa_messages, subevent)
|
||||
else:
|
||||
merge_messages(old_messages, new_messages, event)
|
||||
new_messages.setdefault(event, None)
|
||||
|
||||
|
||||
for field in filter(lambda x: self.notification_class.init_parameters[x]['type'] == "password",
|
||||
self.notification_class.init_parameters):
|
||||
if self.notification_configuration[field].startswith("$encrypted$"):
|
||||
@@ -169,12 +165,12 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
def recipients(self):
|
||||
return self.notification_configuration[self.notification_class.recipient_parameter]
|
||||
|
||||
def generate_notification(self, subject, message):
|
||||
def generate_notification(self, msg, body):
|
||||
notification = Notification(notification_template=self,
|
||||
notification_type=self.notification_type,
|
||||
recipients=smart_str(self.recipients),
|
||||
subject=subject,
|
||||
body=message)
|
||||
subject=msg,
|
||||
body=body)
|
||||
notification.save()
|
||||
return notification
|
||||
|
||||
@@ -273,11 +269,12 @@ class JobNotificationMixin(object):
|
||||
'timeout', 'use_fact_cache', 'launch_type', 'status', 'failed', 'started', 'finished',
|
||||
'elapsed', 'job_explanation', 'execution_node', 'controller_node', 'allow_simultaneous',
|
||||
'scm_revision', 'diff_mode', 'job_slice_number', 'job_slice_count', 'custom_virtualenv',
|
||||
'approval_status', 'approval_node_name', 'workflow_url',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failures', 'dark']},
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'groups_with_active_failures', 'has_inventory_sources',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
@@ -330,7 +327,6 @@ class JobNotificationMixin(object):
|
||||
'username': 'admin'},
|
||||
'instance_group': {'id': 1, 'name': 'tower'},
|
||||
'inventory': {'description': 'Sample inventory description',
|
||||
'groups_with_active_failures': 0,
|
||||
'has_active_failures': False,
|
||||
'has_inventory_sources': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
@@ -370,7 +366,10 @@ class JobNotificationMixin(object):
|
||||
'verbosity': 0},
|
||||
'job_friendly_name': 'Job',
|
||||
'url': 'https://towerhost/#/jobs/playbook/1010',
|
||||
'job_summary_dict': """{'url': 'https://towerhost/$/jobs/playbook/13',
|
||||
'approval_status': 'approved',
|
||||
'approval_node_name': 'Approve Me',
|
||||
'workflow_url': 'https://towerhost/#/workflows/1010',
|
||||
'job_metadata': """{'url': 'https://towerhost/$/jobs/playbook/13',
|
||||
'traceback': '',
|
||||
'status': 'running',
|
||||
'started': '2019-08-07T21:46:38.362630+00:00',
|
||||
@@ -389,14 +388,14 @@ class JobNotificationMixin(object):
|
||||
return context
|
||||
|
||||
def context(self, serialized_job):
|
||||
"""Returns a context that can be used for rendering notification messages.
|
||||
Context contains whitelisted content retrieved from a serialized job object
|
||||
"""Returns a dictionary that can be used for rendering notification messages.
|
||||
The context will contain whitelisted content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_WHITELIST), the job's friendly name,
|
||||
and a url to the job run."""
|
||||
context = {'job': {},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_summary_dict': json.dumps(self.notification_data(), indent=4)}
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)}
|
||||
|
||||
def build_context(node, fields, whitelisted_fields):
|
||||
for safe_field in whitelisted_fields:
|
||||
@@ -434,32 +433,33 @@ class JobNotificationMixin(object):
|
||||
context = self.context(job_serialization)
|
||||
|
||||
msg_template = body_template = None
|
||||
msg = body = ''
|
||||
|
||||
# Use custom template if available
|
||||
if nt.messages:
|
||||
templates = nt.messages.get(self.STATUS_TO_TEMPLATE_TYPE[status], {}) or {}
|
||||
msg_template = templates.get('message', {})
|
||||
body_template = templates.get('body', {})
|
||||
template = nt.messages.get(self.STATUS_TO_TEMPLATE_TYPE[status], {}) or {}
|
||||
msg_template = template.get('message', None)
|
||||
body_template = template.get('body', None)
|
||||
# If custom template not provided, look up default template
|
||||
default_template = nt.notification_class.default_messages[self.STATUS_TO_TEMPLATE_TYPE[status]]
|
||||
if not msg_template:
|
||||
msg_template = default_template.get('message', None)
|
||||
if not body_template:
|
||||
body_template = default_template.get('body', None)
|
||||
|
||||
if msg_template:
|
||||
try:
|
||||
notification_subject = env.from_string(msg_template).render(**context)
|
||||
msg = env.from_string(msg_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
notification_subject = ''
|
||||
else:
|
||||
notification_subject = u"{} #{} '{}' {}: {}".format(self.get_notification_friendly_name(),
|
||||
self.id,
|
||||
self.name,
|
||||
status,
|
||||
self.get_ui_url())
|
||||
notification_body = self.notification_data()
|
||||
notification_body['friendly_name'] = self.get_notification_friendly_name()
|
||||
msg = ''
|
||||
|
||||
if body_template:
|
||||
try:
|
||||
notification_body['body'] = env.from_string(body_template).render(**context)
|
||||
body = env.from_string(body_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
notification_body['body'] = ''
|
||||
body = ''
|
||||
|
||||
return (notification_subject, notification_body)
|
||||
return (msg, body)
|
||||
|
||||
def send_notification_templates(self, status):
|
||||
from awx.main.tasks import send_notifications # avoid circular import
|
||||
@@ -475,16 +475,13 @@ class JobNotificationMixin(object):
|
||||
return
|
||||
|
||||
for nt in set(notification_templates.get(self.STATUS_TO_TEMPLATE_TYPE[status], [])):
|
||||
try:
|
||||
(notification_subject, notification_body) = self.build_notification_message(nt, status)
|
||||
except AttributeError:
|
||||
raise NotImplementedError("build_notification_message() does not exist" % status)
|
||||
(msg, body) = self.build_notification_message(nt, status)
|
||||
|
||||
# Use kwargs to force late-binding
|
||||
# https://stackoverflow.com/a/3431699/10669572
|
||||
def send_it(local_nt=nt, local_subject=notification_subject, local_body=notification_body):
|
||||
def send_it(local_nt=nt, local_msg=msg, local_body=body):
|
||||
def _func():
|
||||
send_notifications.delay([local_nt.generate_notification(local_subject, local_body).id],
|
||||
send_notifications.delay([local_nt.generate_notification(local_msg, local_body).id],
|
||||
job_id=self.id)
|
||||
return _func
|
||||
connection.on_commit(send_it())
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Python
|
||||
import logging
|
||||
import re
|
||||
|
||||
# Django
|
||||
@@ -22,6 +23,9 @@ DATA_URI_RE = re.compile(r'.*') # FIXME
|
||||
__all__ = ['OAuth2AccessToken', 'OAuth2Application']
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.oauth')
|
||||
|
||||
|
||||
class OAuth2Application(AbstractApplication):
|
||||
|
||||
class Meta:
|
||||
@@ -121,14 +125,22 @@ class OAuth2AccessToken(AbstractAccessToken):
|
||||
valid = super(OAuth2AccessToken, self).is_valid(scopes)
|
||||
if valid:
|
||||
self.last_used = now()
|
||||
connection.on_commit(lambda: self.save(update_fields=['last_used']))
|
||||
|
||||
def _update_last_used():
|
||||
if OAuth2AccessToken.objects.filter(pk=self.pk).exists():
|
||||
self.save(update_fields=['last_used'])
|
||||
connection.on_commit(_update_last_used)
|
||||
return valid
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
def validate_external_users(self):
|
||||
if self.user and settings.ALLOW_OAUTH2_FOR_EXTERNAL_USERS is False:
|
||||
external_account = get_external_account(self.user)
|
||||
if external_account is not None:
|
||||
raise oauth2.AccessDeniedError(_(
|
||||
'OAuth2 Tokens cannot be created by users associated with an external authentication provider ({})'
|
||||
).format(external_account))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
if not self.pk:
|
||||
self.validate_external_users()
|
||||
super(OAuth2AccessToken, self).save(*args, **kwargs)
|
||||
|
||||
@@ -483,6 +483,12 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
choices=PROJECT_UPDATE_JOB_TYPE_CHOICES,
|
||||
default='check',
|
||||
)
|
||||
job_tags = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Parts of the project update playbook that will be run.'),
|
||||
)
|
||||
scm_revision = models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
@@ -587,3 +593,24 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
if not selected_groups:
|
||||
return self.global_instance_groups
|
||||
return selected_groups
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
added_update_fields = []
|
||||
if not self.job_tags:
|
||||
job_tags = ['update_{}'.format(self.scm_type)]
|
||||
if self.job_type == 'run':
|
||||
job_tags.append('install_roles')
|
||||
job_tags.append('install_collections')
|
||||
self.job_tags = ','.join(job_tags)
|
||||
added_update_fields.append('job_tags')
|
||||
if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':
|
||||
self.job_tags = ','.join([self.job_tags, 'delete'])
|
||||
added_update_fields.append('job_tags')
|
||||
elif (not self.scm_delete_on_update) and 'delete' in self.job_tags:
|
||||
job_tags = self.job_tags.split(',')
|
||||
job_tags.remove('delete')
|
||||
self.job_tags = ','.join(job_tags)
|
||||
added_update_fields.append('job_tags')
|
||||
if 'update_fields' in kwargs:
|
||||
kwargs['update_fields'].extend(added_update_fields)
|
||||
return super(ProjectUpdate, self).save(*args, **kwargs)
|
||||
|
||||
@@ -623,6 +623,11 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
editable=False,
|
||||
help_text=_("The date and time the job was queued for starting."),
|
||||
)
|
||||
dependencies_processed = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_("If True, the task manager has already processed potential dependencies for this job.")
|
||||
)
|
||||
finished = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
@@ -630,6 +635,13 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
help_text=_("The date and time the job finished execution."),
|
||||
db_index=True,
|
||||
)
|
||||
canceled_on = models.DateTimeField(
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text=_("The date and time when the cancel request was sent."),
|
||||
db_index=True,
|
||||
)
|
||||
elapsed = models.DecimalField(
|
||||
max_digits=12,
|
||||
decimal_places=3,
|
||||
@@ -833,7 +845,12 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
if 'canceled_on' not in update_fields:
|
||||
update_fields.append('canceled_on')
|
||||
# Okay; we're done. Perform the actual save.
|
||||
result = super(UnifiedJob, self).save(*args, **kwargs)
|
||||
|
||||
@@ -997,6 +1014,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
dir=settings.JOBOUTPUT_ROOT,
|
||||
encoding='utf-8'
|
||||
)
|
||||
from awx.main.tasks import purge_old_stdout_files # circular import
|
||||
purge_old_stdout_files.apply_async()
|
||||
|
||||
# Before the addition of event-based stdout, older versions of
|
||||
# awx stored stdout as raw text blobs in a certain database column
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
from copy import copy
|
||||
from urllib.parse import urljoin
|
||||
@@ -16,6 +17,9 @@ from django.core.exceptions import ObjectDoesNotExist
|
||||
# Django-CRUM
|
||||
from crum import get_current_user
|
||||
|
||||
from jinja2 import sandbox
|
||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import (prevent_search, accepts_json, UnifiedJobTemplate,
|
||||
@@ -75,6 +79,11 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
symmetrical=False,
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("If enabled then the node will only run if all of the parent nodes "
|
||||
"have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
related_name='%(class)ss',
|
||||
@@ -98,7 +107,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
'''
|
||||
return ['workflow_job', 'unified_job_template',
|
||||
'extra_data', 'survey_passwords',
|
||||
'inventory', 'credentials', 'char_prompts']
|
||||
'inventory', 'credentials', 'char_prompts', 'all_parents_must_converge']
|
||||
|
||||
def create_workflow_job_node(self, **kwargs):
|
||||
'''
|
||||
@@ -126,7 +135,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts'
|
||||
'char_prompts', 'all_parents_must_converge'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
@@ -763,22 +772,45 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
connection.on_commit(send_it())
|
||||
|
||||
def build_approval_notification_message(self, nt, approval_status):
|
||||
subject = []
|
||||
workflow_url = urljoin(settings.TOWER_URL_BASE, '/#/workflows/{}'.format(self.workflow_job.id))
|
||||
subject.append(('The approval node "{}"').format(self.workflow_approval_template.name))
|
||||
if approval_status == 'running':
|
||||
subject.append(('needs review. This node can be viewed at: {}').format(workflow_url))
|
||||
if approval_status == 'approved':
|
||||
subject.append(('was approved. {}').format(workflow_url))
|
||||
if approval_status == 'timed_out':
|
||||
subject.append(('has timed out. {}').format(workflow_url))
|
||||
elif approval_status == 'denied':
|
||||
subject.append(('was denied. {}').format(workflow_url))
|
||||
subject = " ".join(subject)
|
||||
body = self.notification_data()
|
||||
body['body'] = subject
|
||||
env = sandbox.ImmutableSandboxedEnvironment()
|
||||
|
||||
return subject, body
|
||||
context = self.context(approval_status)
|
||||
|
||||
msg_template = body_template = None
|
||||
msg = body = ''
|
||||
|
||||
# Use custom template if available
|
||||
if nt.messages and nt.messages.get('workflow_approval', None):
|
||||
template = nt.messages['workflow_approval'].get(approval_status, {})
|
||||
msg_template = template.get('message', None)
|
||||
body_template = template.get('body', None)
|
||||
# If custom template not provided, look up default template
|
||||
default_template = nt.notification_class.default_messages['workflow_approval'][approval_status]
|
||||
if not msg_template:
|
||||
msg_template = default_template.get('message', None)
|
||||
if not body_template:
|
||||
body_template = default_template.get('body', None)
|
||||
|
||||
if msg_template:
|
||||
try:
|
||||
msg = env.from_string(msg_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
msg = ''
|
||||
|
||||
if body_template:
|
||||
try:
|
||||
body = env.from_string(body_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
body = ''
|
||||
|
||||
return (msg, body)
|
||||
|
||||
def context(self, approval_status):
|
||||
workflow_url = urljoin(settings.TOWER_URL_BASE, '/#/workflows/{}'.format(self.workflow_job.id))
|
||||
return {'approval_status': approval_status,
|
||||
'approval_node_name': self.workflow_approval_template.name,
|
||||
'workflow_url': workflow_url,
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)}
|
||||
|
||||
@property
|
||||
def workflow_job_template(self):
|
||||
|
||||
@@ -1,21 +1,10 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.core.mail.backends.base import BaseEmailBackend
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
|
||||
class AWXBaseEmailBackend(BaseEmailBackend):
|
||||
|
||||
def format_body(self, body):
|
||||
if "body" in body:
|
||||
body_actual = body['body']
|
||||
else:
|
||||
body_actual = smart_text(_("{} #{} had status {}, view details at {}\n\n").format(
|
||||
body['friendly_name'], body['id'], body['status'], body['url'])
|
||||
)
|
||||
body_actual += json.dumps(body, indent=4)
|
||||
return body_actual
|
||||
return body
|
||||
|
||||
29
awx/main/notifications/custom_notification_base.py
Normal file
29
awx/main/notifications/custom_notification_base.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2019 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
|
||||
class CustomNotificationBase(object):
|
||||
DEFAULT_MSG = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
DEFAULT_BODY = "{{ job_friendly_name }} #{{ job.id }} had status {{ job.status }}, view details at {{ url }}\n\n{{ job_metadata }}"
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = 'The approval node "{{ approval_node_name }}" needs review. This node can be viewed at: {{ workflow_url }}'
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = ('The approval node "{{ approval_node_name }}" needs review. '
|
||||
'This approval node can be viewed at: {{ workflow_url }}\n\n{{ job_metadata }}')
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = 'The approval node "{{ approval_node_name }}" was approved. {{ workflow_url }}'
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = 'The approval node "{{ approval_node_name }}" was approved. {{ workflow_url }}\n\n{{ job_metadata }}'
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = 'The approval node "{{ approval_node_name }}" has timed out. {{ workflow_url }}'
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = 'The approval node "{{ approval_node_name }}" has timed out. {{ workflow_url }}\n\n{{ job_metadata }}'
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = 'The approval node "{{ approval_node_name }}" was denied. {{ workflow_url }}'
|
||||
DEFAULT_APPROVAL_DENIED_BODY = 'The approval node "{{ approval_node_name }}" was denied. {{ workflow_url }}\n\n{{ job_metadata }}'
|
||||
|
||||
|
||||
default_messages = {"started": {"message": DEFAULT_MSG, "body": None},
|
||||
"success": {"message": DEFAULT_MSG, "body": None},
|
||||
"error": {"message": DEFAULT_MSG, "body": None},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": None},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG, "body": None},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": None}}}
|
||||
@@ -1,14 +1,27 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.core.mail.backends.smtp import EmailBackend
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
DEFAULT_BODY = CustomNotificationBase.DEFAULT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
|
||||
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
|
||||
|
||||
|
||||
class CustomEmailBackend(EmailBackend):
|
||||
class CustomEmailBackend(EmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"host": {"label": "Host", "type": "string"},
|
||||
"port": {"label": "Port", "type": "int"},
|
||||
@@ -19,22 +32,17 @@ class CustomEmailBackend(EmailBackend):
|
||||
"sender": {"label": "Sender Email", "type": "string"},
|
||||
"recipients": {"label": "Recipient List", "type": "list"},
|
||||
"timeout": {"label": "Timeout", "type": "int", "default": 30}}
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
DEFAULT_BODY = smart_text(_("{{ job_friendly_name }} #{{ job.id }} had status {{ job.status }}, view details at {{ url }}\n\n{{ job_summary_dict }}"))
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT, "body": DEFAULT_BODY},
|
||||
"success": {"message": DEFAULT_SUBJECT, "body": DEFAULT_BODY},
|
||||
"error": {"message": DEFAULT_SUBJECT, "body": DEFAULT_BODY}}
|
||||
recipient_parameter = "recipients"
|
||||
sender_parameter = "sender"
|
||||
|
||||
default_messages = {"started": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"success": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"error": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG, "body": DEFAULT_APPROVAL_APPROVED_BODY},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY}}}
|
||||
|
||||
def format_body(self, body):
|
||||
if "body" in body:
|
||||
body_actual = body['body']
|
||||
else:
|
||||
body_actual = smart_text(_("{} #{} had status {}, view details at {}\n\n").format(
|
||||
body['friendly_name'], body['id'], body['status'], body['url'])
|
||||
)
|
||||
body_actual += json.dumps(body, indent=4)
|
||||
return body_actual
|
||||
# leave body unchanged (expect a string)
|
||||
return body
|
||||
|
||||
@@ -8,24 +8,21 @@ import dateutil.parser as dp
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.grafana_backend')
|
||||
|
||||
|
||||
class GrafanaBackend(AWXBaseEmailBackend):
|
||||
class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"grafana_url": {"label": "Grafana URL", "type": "string"},
|
||||
"grafana_key": {"label": "Grafana API Key", "type": "password"}}
|
||||
recipient_parameter = "grafana_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
@@ -7,12 +7,14 @@ import requests
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.hipchat_backend')
|
||||
|
||||
|
||||
class HipChatBackend(AWXBaseEmailBackend):
|
||||
class HipChatBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"token": {"label": "Token", "type": "password"},
|
||||
"rooms": {"label": "Destination Rooms", "type": "list"},
|
||||
@@ -23,11 +25,6 @@ class HipChatBackend(AWXBaseEmailBackend):
|
||||
recipient_parameter = "rooms"
|
||||
sender_parameter = "message_from"
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, token, color, api_url, notify, fail_silently=False, **kwargs):
|
||||
super(HipChatBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.token = token
|
||||
|
||||
@@ -9,12 +9,14 @@ import irc.client
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.irc_backend')
|
||||
|
||||
|
||||
class IrcBackend(AWXBaseEmailBackend):
|
||||
class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"server": {"label": "IRC Server Address", "type": "string"},
|
||||
"port": {"label": "IRC Server Port", "type": "int"},
|
||||
@@ -25,11 +27,6 @@ class IrcBackend(AWXBaseEmailBackend):
|
||||
recipient_parameter = "targets"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, server, port, nickname, password, use_ssl, fail_silently=False, **kwargs):
|
||||
super(IrcBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.server = server
|
||||
|
||||
@@ -7,23 +7,20 @@ import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.mattermost_backend')
|
||||
|
||||
|
||||
class MattermostBackend(AWXBaseEmailBackend):
|
||||
class MattermostBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"mattermost_url": {"label": "Target URL", "type": "string"},
|
||||
"mattermost_no_verify_ssl": {"label": "Verify SSL", "type": "bool"}}
|
||||
recipient_parameter = "mattermost_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, mattermost_no_verify_ssl=False, mattermost_channel=None, mattermost_username=None,
|
||||
mattermost_icon_url=None, fail_silently=False, **kwargs):
|
||||
super(MattermostBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import pygerduty
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_BODY = CustomNotificationBase.DEFAULT_BODY
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.pagerduty_backend')
|
||||
|
||||
|
||||
class PagerDutyBackend(AWXBaseEmailBackend):
|
||||
class PagerDutyBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"subdomain": {"label": "Pagerduty subdomain", "type": "string"},
|
||||
"token": {"label": "API Token", "type": "password"},
|
||||
@@ -20,11 +26,14 @@ class PagerDutyBackend(AWXBaseEmailBackend):
|
||||
recipient_parameter = "service_key"
|
||||
sender_parameter = "client_name"
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
DEFAULT_BODY = "{{ job_summary_dict }}"
|
||||
default_messages = {"started": { "message": DEFAULT_SUBJECT, "body": DEFAULT_BODY},
|
||||
"success": { "message": DEFAULT_SUBJECT, "body": DEFAULT_BODY},
|
||||
"error": { "message": DEFAULT_SUBJECT, "body": DEFAULT_BODY}}
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {"started": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"success": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"error": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"approved": {"message": DEFAULT_MSG,"body": DEFAULT_BODY},
|
||||
"timed_out": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"denied": {"message": DEFAULT_MSG, "body": DEFAULT_BODY}}}
|
||||
|
||||
def __init__(self, subdomain, token, fail_silently=False, **kwargs):
|
||||
super(PagerDutyBackend, self).__init__(fail_silently=fail_silently)
|
||||
@@ -32,6 +41,16 @@ class PagerDutyBackend(AWXBaseEmailBackend):
|
||||
self.token = token
|
||||
|
||||
def format_body(self, body):
|
||||
# cast to dict if possible # TODO: is it true that this can be a dict or str?
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# but it's okay if this is also just a string
|
||||
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
|
||||
@@ -7,22 +7,20 @@ import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.rocketchat_backend')
|
||||
|
||||
|
||||
class RocketChatBackend(AWXBaseEmailBackend):
|
||||
class RocketChatBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"rocketchat_url": {"label": "Target URL", "type": "string"},
|
||||
"rocketchat_no_verify_ssl": {"label": "Verify SSL", "type": "bool"}}
|
||||
recipient_parameter = "rocketchat_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, rocketchat_no_verify_ssl=False, rocketchat_username=None, rocketchat_icon_url=None, fail_silently=False, **kwargs):
|
||||
super(RocketChatBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
@@ -6,24 +6,21 @@ from slackclient import SlackClient
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.slack_backend')
|
||||
WEBSOCKET_TIMEOUT = 30
|
||||
|
||||
|
||||
class SlackBackend(AWXBaseEmailBackend):
|
||||
class SlackBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"token": {"label": "Token", "type": "password"},
|
||||
"channels": {"label": "Destination Channels", "type": "list"}}
|
||||
recipient_parameter = "channels"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, token, hex_color="", fail_silently=False, **kwargs):
|
||||
super(SlackBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.token = token
|
||||
@@ -50,6 +47,7 @@ class SlackBackend(AWXBaseEmailBackend):
|
||||
else:
|
||||
ret = connection.api_call("chat.postMessage",
|
||||
channel=r,
|
||||
as_user=True,
|
||||
text=m.subject)
|
||||
logger.debug(ret)
|
||||
if ret['ok']:
|
||||
|
||||
@@ -7,12 +7,14 @@ from twilio.rest import Client
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.twilio_backend')
|
||||
|
||||
|
||||
class TwilioBackend(AWXBaseEmailBackend):
|
||||
class TwilioBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"account_sid": {"label": "Account SID", "type": "string"},
|
||||
"account_token": {"label": "Account Token", "type": "password"},
|
||||
@@ -21,11 +23,6 @@ class TwilioBackend(AWXBaseEmailBackend):
|
||||
recipient_parameter = "to_numbers"
|
||||
sender_parameter = "from_number"
|
||||
|
||||
DEFAULT_SUBJECT = "{{ job_friendly_name }} #{{ job.id }} '{{ job.name }}' {{ job.status }}: {{ url }}"
|
||||
default_messages = {"started": {"message": DEFAULT_SUBJECT},
|
||||
"success": {"message": DEFAULT_SUBJECT},
|
||||
"error": {"message": DEFAULT_SUBJECT}}
|
||||
|
||||
def __init__(self, account_sid, account_token, fail_silently=False, **kwargs):
|
||||
super(TwilioBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.account_sid = account_sid
|
||||
|
||||
@@ -7,13 +7,15 @@ import requests
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.webhook_backend')
|
||||
|
||||
|
||||
class WebhookBackend(AWXBaseEmailBackend):
|
||||
class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"url": {"label": "Target URL", "type": "string"},
|
||||
"http_method": {"label": "HTTP Method", "type": "string", "default": "POST"},
|
||||
@@ -24,10 +26,16 @@ class WebhookBackend(AWXBaseEmailBackend):
|
||||
recipient_parameter = "url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_summary_dict }}"
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {"started": {"body": DEFAULT_BODY},
|
||||
"success": {"body": DEFAULT_BODY},
|
||||
"error": {"body": DEFAULT_BODY}}
|
||||
"error": {"body": DEFAULT_BODY},
|
||||
"workflow_approval": {
|
||||
"running": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" needs review. '
|
||||
'This node can be viewed at: {{ workflow_url }}"}'},
|
||||
"approved": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" was approved. {{ workflow_url }}"}'},
|
||||
"timed_out": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" has timed out. {{ workflow_url }}"}'},
|
||||
"denied": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" was denied. {{ workflow_url }}"}'}}}
|
||||
|
||||
def __init__(self, http_method, headers, disable_ssl_verification=False, fail_silently=False, username=None, password=None, **kwargs):
|
||||
self.http_method = http_method
|
||||
@@ -38,15 +46,13 @@ class WebhookBackend(AWXBaseEmailBackend):
|
||||
super(WebhookBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
def format_body(self, body):
|
||||
# If `body` has body field, attempt to use this as the main body,
|
||||
# otherwise, leave it as a sub-field
|
||||
if isinstance(body, dict) and 'body' in body and isinstance(body['body'], str):
|
||||
try:
|
||||
potential_body = json.loads(body['body'])
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
# expect body to be a string representing a dict
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
body = {}
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
|
||||
@@ -12,10 +12,12 @@ class UriCleaner(object):
|
||||
|
||||
@staticmethod
|
||||
def remove_sensitive(cleartext):
|
||||
# exclude_list contains the items that will _not_ be redacted
|
||||
exclude_list = [settings.PUBLIC_GALAXY_SERVER['url']]
|
||||
if settings.PRIMARY_GALAXY_URL:
|
||||
exclude_list = [settings.PRIMARY_GALAXY_URL] + [server['url'] for server in settings.FALLBACK_GALAXY_SERVERS]
|
||||
else:
|
||||
exclude_list = [server['url'] for server in settings.FALLBACK_GALAXY_SERVERS]
|
||||
exclude_list += [settings.PRIMARY_GALAXY_URL]
|
||||
if settings.FALLBACK_GALAXY_SERVERS:
|
||||
exclude_list += [server['url'] for server in settings.FALLBACK_GALAXY_SERVERS]
|
||||
redactedtext = cleartext
|
||||
text_index = 0
|
||||
while True:
|
||||
|
||||
@@ -89,8 +89,8 @@ class SimpleDAG(object):
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for label, edges in self.node_from_edges_by_label.items():
|
||||
for from_node, to_nodes in edges.items():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
@@ -140,36 +140,36 @@ class SimpleDAG(object):
|
||||
def find_ord(self, obj):
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
def _get_children_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
def get_children(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependencies_by_label(this_ord, l))
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
def get_parents(self, obj, label=None):
|
||||
this_ord = self.find_ord(obj)
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_dependents_by_label(this_ord, l))
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
@@ -188,7 +188,7 @@ class SimpleDAG(object):
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children = [node['node_object'] for node in self.get_children(node_obj)]
|
||||
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
|
||||
|
||||
if children_to_add:
|
||||
@@ -212,7 +212,7 @@ class SimpleDAG(object):
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
for child in self.get_children(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
@@ -55,7 +55,7 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
@@ -69,33 +69,55 @@ class WorkflowDAG(SimpleDAG):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _all_parents_met_convergence_criteria(self, node):
|
||||
# This function takes any node and checks that all it's parents have met their criteria to run the child.
|
||||
# This returns a boolean and is really only useful if the node is an ALL convergence node and is
|
||||
# intended to be used in conjuction with the node property `all_parents_must_converge`
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
for p in parent_nodes:
|
||||
#node has a status
|
||||
if p.job and p.job.status in ["successful", "failed"]:
|
||||
if p.job and p.job.status == "successful":
|
||||
status = "success_nodes"
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)]
|
||||
and p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
# This catches ALL convergence nodes
|
||||
elif obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
if self._all_parents_met_convergence_criteria(n):
|
||||
nodes_found.append(n)
|
||||
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
@@ -135,8 +157,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if (len(self.get_children(obj, 'failure_nodes')) +
|
||||
len(self.get_children(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
@@ -190,35 +212,48 @@ class WorkflowDAG(SimpleDAG):
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'success_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
elif not p.do_not_run and p.unified_job_template is None:
|
||||
if node in (self.get_children(p, 'failure_nodes') +
|
||||
self.get_children(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
'''
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
parent_nodes = [p['node_object'] for p in self.get_parents(obj)]
|
||||
if not obj.do_not_run and not obj.job and node not in root_nodes:
|
||||
if obj.all_parents_must_converge:
|
||||
if any(p.do_not_run for p in parent_nodes) or not self._all_parents_met_convergence_criteria(node):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
else:
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -15,8 +15,6 @@ class DependencyGraph(object):
|
||||
INVENTORY_UPDATES = 'inventory_updates'
|
||||
|
||||
JOB_TEMPLATE_JOBS = 'job_template_jobs'
|
||||
JOB_PROJECT_IDS = 'job_project_ids'
|
||||
JOB_INVENTORY_IDS = 'job_inventory_ids'
|
||||
|
||||
SYSTEM_JOB = 'system_job'
|
||||
INVENTORY_SOURCE_UPDATES = 'inventory_source_updates'
|
||||
@@ -41,10 +39,6 @@ class DependencyGraph(object):
|
||||
Track runnable job related project and inventory to ensure updates
|
||||
don't run while a job needing those resources is running.
|
||||
'''
|
||||
# project_id -> True / False
|
||||
self.data[self.JOB_PROJECT_IDS] = {}
|
||||
# inventory_id -> True / False
|
||||
self.data[self.JOB_INVENTORY_IDS] = {}
|
||||
|
||||
# inventory_source_id -> True / False
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES] = {}
|
||||
@@ -66,7 +60,7 @@ class DependencyGraph(object):
|
||||
|
||||
def get_now(self):
|
||||
return tz_now()
|
||||
|
||||
|
||||
def mark_system_job(self):
|
||||
self.data[self.SYSTEM_JOB] = False
|
||||
|
||||
@@ -80,20 +74,16 @@ class DependencyGraph(object):
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False
|
||||
|
||||
def mark_job_template_job(self, job):
|
||||
self.data[self.JOB_INVENTORY_IDS][job.inventory_id] = False
|
||||
self.data[self.JOB_PROJECT_IDS][job.project_id] = False
|
||||
self.data[self.JOB_TEMPLATE_JOBS][job.job_template_id] = False
|
||||
|
||||
def mark_workflow_job(self, job):
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job.workflow_job_template_id] = False
|
||||
|
||||
def can_project_update_run(self, job):
|
||||
return self.data[self.JOB_PROJECT_IDS].get(job.project_id, True) and \
|
||||
self.data[self.PROJECT_UPDATES].get(job.project_id, True)
|
||||
return self.data[self.PROJECT_UPDATES].get(job.project_id, True)
|
||||
|
||||
def can_inventory_update_run(self, job):
|
||||
return self.data[self.JOB_INVENTORY_IDS].get(job.inventory_source.inventory_id, True) and \
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
return self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
|
||||
def can_job_run(self, job):
|
||||
if self.data[self.PROJECT_UPDATES].get(job.project_id, True) is True and \
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
import collections
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import yaml
|
||||
import tempfile
|
||||
import logging
|
||||
from base64 import b64encode
|
||||
|
||||
@@ -88,8 +84,17 @@ class PodManager(object):
|
||||
|
||||
@cached_property
|
||||
def kube_api(self):
|
||||
my_client = config.new_client_from_config(config_file=self.kube_config)
|
||||
return client.CoreV1Api(api_client=my_client)
|
||||
# this feels a little janky, but it's what k8s' own code does
|
||||
# internally when it reads kube config files from disk:
|
||||
# https://github.com/kubernetes-client/python-base/blob/0b208334ef0247aad9afcaae8003954423b61a0d/config/kube_config.py#L643
|
||||
loader = config.kube_config.KubeConfigLoader(
|
||||
config_dict=self.kube_config
|
||||
)
|
||||
cfg = type.__call__(client.Configuration)
|
||||
loader.load_and_set(cfg)
|
||||
return client.CoreV1Api(api_client=client.ApiClient(
|
||||
configuration=cfg
|
||||
))
|
||||
|
||||
@property
|
||||
def pod_name(self):
|
||||
@@ -168,16 +173,10 @@ def generate_tmp_kube_config(credential, namespace):
|
||||
"current-context": host_input
|
||||
}
|
||||
|
||||
if credential.get_input('verify_ssl'):
|
||||
if credential.get_input('verify_ssl') and 'ssl_ca_cert' in credential.inputs:
|
||||
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
|
||||
credential.get_input('ssl_ca_cert').encode() # encode to bytes
|
||||
).decode() # decode the base64 data into a str
|
||||
else:
|
||||
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
|
||||
|
||||
fd, path = tempfile.mkstemp(prefix='kubeconfig')
|
||||
with open(path, 'wb') as temp:
|
||||
temp.write(yaml.dump(config).encode())
|
||||
temp.flush()
|
||||
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
return path
|
||||
return config
|
||||
|
||||
@@ -23,6 +23,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowApproval,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
@@ -74,21 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
@@ -200,9 +186,6 @@ class TaskManager():
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
@@ -252,19 +235,30 @@ class TaskManager():
|
||||
logger.debug('Submitting isolated {} to queue {} controlled by {}.'.format(
|
||||
task.log_format, task.execution_node, controller_node))
|
||||
elif rampart_group.is_containerized:
|
||||
# find one real, non-containerized instance with capacity to
|
||||
# act as the controller for k8s API interaction
|
||||
match = None
|
||||
for group in InstanceGroup.objects.all():
|
||||
if group.is_containerized or group.controller_id:
|
||||
continue
|
||||
match = group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if match:
|
||||
break
|
||||
task.instance_group = rampart_group
|
||||
if not task.supports_isolation():
|
||||
# project updates and inventory updates don't *actually* run in pods,
|
||||
# so just pick *any* non-isolated, non-containerized host and use it
|
||||
for group in InstanceGroup.objects.all():
|
||||
if group.is_containerized or group.controller_id:
|
||||
continue
|
||||
match = group.find_largest_idle_instance()
|
||||
if match:
|
||||
task.execution_node = match.hostname
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(
|
||||
task.log_format, task.execution_node))
|
||||
break
|
||||
if match is None:
|
||||
logger.warn(
|
||||
'No available capacity to run containerized <{}>.'.format(task.log_format)
|
||||
)
|
||||
else:
|
||||
if task.supports_isolation():
|
||||
task.controller_node = match.hostname
|
||||
else:
|
||||
# project updates and inventory updates don't *actually* run in pods,
|
||||
# so just pick *any* non-isolated, non-containerized host and use it
|
||||
# as the execution node
|
||||
task.execution_node = match.hostname
|
||||
logger.debug('Submitting containerized {} to queue {}.'.format(
|
||||
task.log_format, task.execution_node))
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
if instance is not None:
|
||||
@@ -353,10 +347,6 @@ class TaskManager():
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
@@ -382,8 +372,6 @@ class TaskManager():
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
@@ -415,18 +403,21 @@ class TaskManager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
created_dependencies.append(project_task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
@@ -441,56 +432,20 @@ class TaskManager():
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
created_dependencies.append(inventory_task)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent {} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if idle_instance_that_fits is None:
|
||||
idle_instance_that_fits = rampart_group.find_largest_idle_instance()
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group {} capacity <= 0".format(rampart_group.name))
|
||||
continue
|
||||
|
||||
execution_instance = rampart_group.fit_task_to_most_remaining_capacity_instance(task)
|
||||
if execution_instance:
|
||||
logger.debug("Starting dependent {} in group {} instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
elif not execution_instance and idle_instance_that_fits:
|
||||
if not rampart_group.is_containerized:
|
||||
execution_instance = idle_instance_that_fits
|
||||
logger.debug("Starting dependent {} in group {} on idle instance {}".format(
|
||||
task.log_format, rampart_group.name, execution_instance.hostname))
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = [t for t in dependency_tasks if t != task]
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail, execution_instance)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent {} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.filter(pk__in = [task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||
return created_dependencies
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
continue
|
||||
@@ -563,13 +518,6 @@ class TaskManager():
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('{} consumed {} capacity units from {} with prior total of {}'.format(
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
@@ -587,6 +535,9 @@ class TaskManager():
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = [t for t in all_sorted_tasks if t.status == 'pending']
|
||||
undeped_tasks = [t for t in pending_tasks if not t.dependencies_processed]
|
||||
dependencies = self.generate_dependencies(undeped_tasks)
|
||||
self.process_pending_tasks(dependencies)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
|
||||
@@ -10,6 +10,7 @@ import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.db.models.signals import (
|
||||
pre_save,
|
||||
@@ -30,12 +31,11 @@ from crum.signals import current_user_getter
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommandEvent, Group, Host, InstanceGroup, Inventory,
|
||||
InventorySource, InventoryUpdateEvent, Job, JobEvent, JobHostSummary,
|
||||
JobTemplate, OAuth2AccessToken, Organization, Project, ProjectUpdateEvent,
|
||||
Role, SystemJob, SystemJobEvent, SystemJobTemplate, UnifiedJob,
|
||||
UnifiedJobTemplate, User, UserSessionMembership, WorkflowJobTemplateNode,
|
||||
WorkflowApproval, WorkflowApprovalTemplate, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
ActivityStream, Group, Host, InstanceGroup, Inventory, InventorySource,
|
||||
Job, JobHostSummary, JobTemplate, OAuth2AccessToken, Organization, Project,
|
||||
Role, SystemJob, SystemJobTemplate, UnifiedJob, UnifiedJobTemplate, User,
|
||||
UserSessionMembership, WorkflowJobTemplateNode, WorkflowApproval,
|
||||
WorkflowApprovalTemplate, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
)
|
||||
from awx.main.constants import CENSOR_VALUE
|
||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||
@@ -72,77 +72,6 @@ def get_current_user_or_none():
|
||||
return u
|
||||
|
||||
|
||||
def emit_event_detail(serializer, relation, **kwargs):
|
||||
instance = kwargs['instance']
|
||||
created = kwargs['created']
|
||||
if created:
|
||||
event_serializer = serializer(instance)
|
||||
consumers.emit_channel_notification(
|
||||
'-'.join([event_serializer.get_group_name(instance), str(getattr(instance, relation))]),
|
||||
event_serializer.data
|
||||
)
|
||||
|
||||
|
||||
def emit_job_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.JobEventWebSocketSerializer, 'job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_ad_hoc_command_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.AdHocCommandEventWebSocketSerializer, 'ad_hoc_command_id', **kwargs)
|
||||
|
||||
|
||||
def emit_project_update_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.ProjectUpdateEventWebSocketSerializer, 'project_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_inventory_update_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.InventoryUpdateEventWebSocketSerializer, 'inventory_update_id', **kwargs)
|
||||
|
||||
|
||||
def emit_system_job_event_detail(sender, **kwargs):
|
||||
from awx.api import serializers
|
||||
emit_event_detail(serializers.SystemJobEventWebSocketSerializer, 'system_job_id', **kwargs)
|
||||
|
||||
|
||||
def emit_update_inventory_computed_fields(sender, **kwargs):
|
||||
logger.debug("In update inventory computed fields")
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
instance = kwargs['instance']
|
||||
if sender == Group.hosts.through:
|
||||
sender_name = 'group.hosts'
|
||||
elif sender == Group.parents.through:
|
||||
sender_name = 'group.parents'
|
||||
elif sender == Host.inventory_sources.through:
|
||||
sender_name = 'host.inventory_sources'
|
||||
elif sender == Group.inventory_sources.through:
|
||||
sender_name = 'group.inventory_sources'
|
||||
else:
|
||||
sender_name = str(sender._meta.verbose_name)
|
||||
if kwargs['signal'] == post_save:
|
||||
if sender == Job:
|
||||
return
|
||||
sender_action = 'saved'
|
||||
elif kwargs['signal'] == post_delete:
|
||||
sender_action = 'deleted'
|
||||
elif kwargs['signal'] == m2m_changed and kwargs['action'] in ('post_add', 'post_remove', 'post_clear'):
|
||||
sender_action = 'changed'
|
||||
else:
|
||||
return
|
||||
logger.debug('%s %s, updating inventory computed fields: %r %r',
|
||||
sender_name, sender_action, sender, kwargs)
|
||||
try:
|
||||
inventory = instance.inventory
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
|
||||
|
||||
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
if getattr(_inventory_updates, 'is_updating', False):
|
||||
return
|
||||
@@ -161,7 +90,9 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
|
||||
pass
|
||||
else:
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
connection.on_commit(
|
||||
lambda: update_inventory_computed_fields.delay(inventory.id)
|
||||
)
|
||||
|
||||
|
||||
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
|
||||
@@ -244,10 +175,6 @@ def connect_computed_field_signals():
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
@@ -258,11 +185,6 @@ connect_computed_field_signals()
|
||||
|
||||
post_save.connect(save_related_job_templates, sender=Project)
|
||||
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||
post_save.connect(emit_job_event_detail, sender=JobEvent)
|
||||
post_save.connect(emit_ad_hoc_command_event_detail, sender=AdHocCommandEvent)
|
||||
post_save.connect(emit_project_update_event_detail, sender=ProjectUpdateEvent)
|
||||
post_save.connect(emit_inventory_update_event_detail, sender=InventoryUpdateEvent)
|
||||
post_save.connect(emit_system_job_event_detail, sender=SystemJobEvent)
|
||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.parents.through)
|
||||
@@ -389,10 +311,6 @@ def disable_computed_fields():
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.parents.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
|
||||
m2m_changed.disconnect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
|
||||
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
|
||||
|
||||
@@ -22,10 +22,6 @@ import yaml
|
||||
import fcntl
|
||||
from pathlib import Path
|
||||
from uuid import uuid4
|
||||
try:
|
||||
import psutil
|
||||
except Exception:
|
||||
psutil = None
|
||||
import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
@@ -34,7 +30,6 @@ from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.core.mail import send_mail
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.cache import cache
|
||||
@@ -57,6 +52,7 @@ import ansible_runner
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
@@ -72,12 +68,11 @@ from awx.main.isolated import manager as isolated_manager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
get_licenser,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager,
|
||||
get_awx_version)
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.common import get_ansible_version, _get_ansible_version, get_custom_venv_choices
|
||||
from awx.main.utils.common import _get_ansible_version, get_custom_venv_choices
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -92,7 +87,7 @@ from rest_framework.exceptions import PermissionDenied
|
||||
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
|
||||
'RunAdHocCommand', 'handle_work_error', 'handle_work_success', 'apply_cluster_membership_policies',
|
||||
'update_inventory_computed_fields', 'update_host_smart_inventory_memberships',
|
||||
'send_notifications', 'run_administrative_checks', 'purge_old_stdout_files']
|
||||
'send_notifications', 'purge_old_stdout_files']
|
||||
|
||||
HIDDEN_PASSWORD = '**********'
|
||||
|
||||
@@ -343,39 +338,31 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task()
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.debug('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task()
|
||||
def run_administrative_checks():
|
||||
logger.warn("Running administrative checks.")
|
||||
if not settings.TOWER_ADMIN_ALERTS:
|
||||
return
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info['license_type'] != 'open' and validation_info.get('instance_count', 0) < 1:
|
||||
return
|
||||
used_percentage = float(validation_info.get('current_instances', 0)) / float(validation_info.get('instance_count', 100))
|
||||
tower_admin_emails = User.objects.filter(is_superuser=True).values_list('email', flat=True)
|
||||
if (used_percentage * 100) > 90:
|
||||
send_mail("Ansible Tower host usage over 90%",
|
||||
_("Ansible Tower host usage over 90%"),
|
||||
tower_admin_emails,
|
||||
fail_silently=True)
|
||||
if validation_info.get('date_warning', False):
|
||||
send_mail("Ansible Tower license will expire soon",
|
||||
_("Ansible Tower license will expire soon"),
|
||||
tower_admin_emails,
|
||||
fail_silently=True)
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
else:
|
||||
last_time = None
|
||||
gather_time = now()
|
||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
with advisory_lock('gather_analytics_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug('Not gathering analytics, another task holds lock')
|
||||
return
|
||||
try:
|
||||
tgz = analytics.gather()
|
||||
if not tgz:
|
||||
return
|
||||
logger.info('gathered analytics: {}'.format(tgz))
|
||||
analytics.ship(tgz)
|
||||
settings.AUTOMATION_ANALYTICS_LAST_GATHER = gather_time
|
||||
finally:
|
||||
if os.path.exists(tgz):
|
||||
os.remove(tgz)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@@ -527,7 +514,7 @@ def awx_periodic_scheduler():
|
||||
|
||||
invalid_license = False
|
||||
try:
|
||||
access_registry[Job](None).check_license()
|
||||
access_registry[Job](None).check_license(quiet=True)
|
||||
except PermissionDenied as e:
|
||||
invalid_license = e
|
||||
|
||||
@@ -616,7 +603,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
|
||||
|
||||
@task()
|
||||
def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
prevent unnecessary recursive calls.
|
||||
@@ -627,7 +614,7 @@ def update_inventory_computed_fields(inventory_id, should_update_hosts=True):
|
||||
return
|
||||
i = i[0]
|
||||
try:
|
||||
i.update_computed_fields(update_hosts=should_update_hosts)
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
@@ -670,7 +657,7 @@ def update_host_smart_inventory_memberships():
|
||||
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
|
||||
# Update computed fields for changed inventories outside atomic action
|
||||
for smart_inventory in changed_inventories:
|
||||
smart_inventory.update_computed_fields(update_groups=False, update_hosts=False)
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@@ -731,6 +718,7 @@ class BaseTask(object):
|
||||
def __init__(self):
|
||||
self.cleanup_paths = []
|
||||
self.parent_workflow_job_id = None
|
||||
self.host_map = {}
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
@@ -1029,11 +1017,17 @@ class BaseTask(object):
|
||||
return False
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True)
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.host_map = {
|
||||
hostname: hv.pop('remote_tower_id', '')
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()
|
||||
}
|
||||
json_data = json.dumps(script_data)
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
@@ -1142,6 +1136,32 @@ class BaseTask(object):
|
||||
event_data.pop('parent_uuid', None)
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
event_data['host_name'] = host
|
||||
if host in self.host_map:
|
||||
event_data['host_id'] = self.host_map[host]
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
# error messages on failure that contain the plaintext
|
||||
# basic auth credentials (username + password)
|
||||
# it's also common for the nested event data itself (['res']['...'])
|
||||
# to contain unredacted text on failure
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
should_write_event = False
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
@@ -1364,7 +1384,7 @@ class BaseTask(object):
|
||||
|
||||
ansible_runner.utils.dump_artifacts(params)
|
||||
isolated_manager_instance = isolated_manager.IsolatedManager(
|
||||
cancelled_callback=lambda: self.update_model(self.instance.pk).cancel_flag,
|
||||
canceled_callback=lambda: self.update_model(self.instance.pk).cancel_flag,
|
||||
check_callback=self.check_handler,
|
||||
pod_manager=pod_manager
|
||||
)
|
||||
@@ -1423,7 +1443,6 @@ class BaseTask(object):
|
||||
def deploy_container_group_pod(self, task):
|
||||
from awx.main.scheduler.kubernetes import PodManager # Avoid circular import
|
||||
pod_manager = PodManager(self.instance)
|
||||
self.cleanup_paths.append(pod_manager.kube_config)
|
||||
try:
|
||||
log_name = task.log_format
|
||||
logger.debug(f"Launching pod for {log_name}.")
|
||||
@@ -1452,7 +1471,7 @@ class BaseTask(object):
|
||||
self.update_model(task.pk, execution_node=pod_manager.pod_name)
|
||||
return pod_manager
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1669,8 +1688,12 @@ class RunJob(BaseTask):
|
||||
args.append('--vault-id')
|
||||
args.append('{}@prompt'.format(vault_id))
|
||||
|
||||
if job.forks: # FIXME: Max limit?
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.forks:
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
if job.limit:
|
||||
@@ -1763,14 +1786,16 @@ class RunJob(BaseTask):
|
||||
|
||||
project_path = job.project.get_project_path(check_if_exists=False)
|
||||
job_revision = job.project.scm_revision
|
||||
needs_sync = True
|
||||
sync_needs = []
|
||||
all_sync_needs = ['update_{}'.format(job.project.scm_type), 'install_roles', 'install_collections']
|
||||
if not job.project.scm_type:
|
||||
# manual projects are not synced, user has responsibility for that
|
||||
needs_sync = False
|
||||
pass # manual projects are not synced, user has responsibility for that
|
||||
elif not os.path.exists(project_path):
|
||||
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
|
||||
sync_needs = all_sync_needs
|
||||
elif not job.project.scm_revision:
|
||||
logger.debug('Revision not known for {}, will sync with remote'.format(job.project))
|
||||
sync_needs = all_sync_needs
|
||||
elif job.project.scm_type == 'git':
|
||||
git_repo = git.Repo(project_path)
|
||||
try:
|
||||
@@ -1781,23 +1806,27 @@ class RunJob(BaseTask):
|
||||
if desired_revision == current_revision:
|
||||
job_revision = desired_revision
|
||||
logger.info('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
needs_sync = False
|
||||
else:
|
||||
sync_needs = all_sync_needs
|
||||
except (ValueError, BadGitName):
|
||||
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
|
||||
sync_needs = all_sync_needs
|
||||
else:
|
||||
sync_needs = all_sync_needs
|
||||
# Galaxy requirements are not supported for manual projects
|
||||
if not needs_sync and job.project.scm_type:
|
||||
if not sync_needs and job.project.scm_type:
|
||||
# see if we need a sync because of presence of roles
|
||||
galaxy_req_path = os.path.join(project_path, 'roles', 'requirements.yml')
|
||||
if os.path.exists(galaxy_req_path):
|
||||
logger.debug('Running project sync for {} because of galaxy role requirements.'.format(job.log_format))
|
||||
needs_sync = True
|
||||
sync_needs.append('install_roles')
|
||||
|
||||
galaxy_collections_req_path = os.path.join(project_path, 'collections', 'requirements.yml')
|
||||
if os.path.exists(galaxy_collections_req_path):
|
||||
logger.debug('Running project sync for {} because of galaxy collections requirements.'.format(job.log_format))
|
||||
needs_sync = True
|
||||
sync_needs.append('install_collections')
|
||||
|
||||
if needs_sync:
|
||||
if sync_needs:
|
||||
pu_ig = job.instance_group
|
||||
pu_en = job.execution_node
|
||||
if job.is_isolated() is True:
|
||||
@@ -1807,6 +1836,7 @@ class RunJob(BaseTask):
|
||||
sync_metafields = dict(
|
||||
launch_type="sync",
|
||||
job_type='run',
|
||||
job_tags=','.join(sync_needs),
|
||||
status='running',
|
||||
instance_group = pu_ig,
|
||||
execution_node=pu_en,
|
||||
@@ -1814,6 +1844,8 @@ class RunJob(BaseTask):
|
||||
)
|
||||
if job.scm_branch and job.scm_branch != job.project.scm_branch:
|
||||
sync_metafields['scm_branch'] = job.scm_branch
|
||||
if 'update_' not in sync_metafields['job_tags']:
|
||||
sync_metafields['scm_revision'] = job_revision
|
||||
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
|
||||
# save the associated job before calling run() so that a
|
||||
# cancel() call on the job can cancel the project update
|
||||
@@ -1872,7 +1904,7 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id, True)
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@@ -1958,10 +1990,17 @@ class RunProjectUpdate(BaseTask):
|
||||
env['TMP'] = settings.AWX_PROOT_BASE_PATH
|
||||
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = self.get_path_to('..', 'plugins', 'callback')
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = True
|
||||
# Set up the fallback server, which is the normal Ansible Galaxy by default
|
||||
galaxy_servers = list(settings.FALLBACK_GALAXY_SERVERS)
|
||||
# If private galaxy URL is non-blank, that means this feature is enabled
|
||||
if settings.GALAXY_IGNORE_CERTS:
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = True
|
||||
# Set up the public Galaxy server, if enabled
|
||||
if settings.PUBLIC_GALAXY_ENABLED:
|
||||
galaxy_servers = [settings.PUBLIC_GALAXY_SERVER]
|
||||
else:
|
||||
galaxy_servers = []
|
||||
# Set up fallback Galaxy servers, if configured
|
||||
if settings.FALLBACK_GALAXY_SERVERS:
|
||||
galaxy_servers = settings.FALLBACK_GALAXY_SERVERS + galaxy_servers
|
||||
# Set up the primary Galaxy server, if configured
|
||||
if settings.PRIMARY_GALAXY_URL:
|
||||
galaxy_servers = [{'id': 'primary_galaxy'}] + galaxy_servers
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
@@ -1974,8 +2013,9 @@ class RunProjectUpdate(BaseTask):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2031,8 +2071,8 @@ class RunProjectUpdate(BaseTask):
|
||||
args = []
|
||||
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
|
||||
args.append('-vvv')
|
||||
else:
|
||||
args.append('-v')
|
||||
if project_update.job_tags:
|
||||
args.extend(['-t', project_update.job_tags])
|
||||
return args
|
||||
|
||||
def build_extra_vars_file(self, project_update, private_data_dir):
|
||||
@@ -2046,28 +2086,16 @@ class RunProjectUpdate(BaseTask):
|
||||
scm_branch = project_update.project.scm_revision
|
||||
elif not scm_branch:
|
||||
scm_branch = {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
if project_update.job_type == 'check':
|
||||
roles_enabled = False
|
||||
collections_enabled = False
|
||||
else:
|
||||
roles_enabled = getattr(settings, 'AWX_ROLES_ENABLED', True)
|
||||
collections_enabled = getattr(settings, 'AWX_COLLECTIONS_ENABLED', True)
|
||||
# collections were introduced in Ansible version 2.8
|
||||
if Version(get_ansible_version()) <= Version('2.8'):
|
||||
collections_enabled = False
|
||||
extra_vars.update({
|
||||
'project_path': project_update.get_project_path(check_if_exists=False),
|
||||
'insights_url': settings.INSIGHTS_URL_BASE,
|
||||
'awx_license_type': get_license(show_key=False).get('license_type', 'UNLICENSED'),
|
||||
'awx_version': get_awx_version(),
|
||||
'scm_type': project_update.scm_type,
|
||||
'scm_url': scm_url,
|
||||
'scm_branch': scm_branch,
|
||||
'scm_clean': project_update.scm_clean,
|
||||
'scm_delete_on_update': project_update.scm_delete_on_update if project_update.job_type == 'check' else False,
|
||||
'scm_full_checkout': True if project_update.job_type == 'run' else False,
|
||||
'roles_enabled': roles_enabled,
|
||||
'collections_enabled': collections_enabled,
|
||||
'roles_enabled': settings.AWX_ROLES_ENABLED,
|
||||
'collections_enabled': settings.AWX_COLLECTIONS_ENABLED,
|
||||
})
|
||||
if project_update.job_type != 'check' and self.job_private_data_dir:
|
||||
extra_vars['collections_destination'] = os.path.join(self.job_private_data_dir, 'requirements_collections')
|
||||
@@ -2179,7 +2207,7 @@ class RunProjectUpdate(BaseTask):
|
||||
try:
|
||||
instance.refresh_from_db(fields=['cancel_flag'])
|
||||
if instance.cancel_flag:
|
||||
logger.debug("ProjectUpdate({0}) was cancelled".format(instance.pk))
|
||||
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
|
||||
return
|
||||
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
break
|
||||
@@ -2208,7 +2236,10 @@ class RunProjectUpdate(BaseTask):
|
||||
project_path = instance.project.get_project_path(check_if_exists=False)
|
||||
if os.path.exists(project_path):
|
||||
git_repo = git.Repo(project_path)
|
||||
self.original_branch = git_repo.active_branch
|
||||
if git_repo.head.is_detached:
|
||||
self.original_branch = git_repo.head.commit
|
||||
else:
|
||||
self.original_branch = git_repo.active_branch
|
||||
|
||||
@staticmethod
|
||||
def make_local_copy(project_path, destination_folder, scm_type, scm_revision):
|
||||
@@ -2240,26 +2271,29 @@ class RunProjectUpdate(BaseTask):
|
||||
copy_tree(project_path, destination_folder)
|
||||
|
||||
def post_run_hook(self, instance, status):
|
||||
if self.job_private_data_dir:
|
||||
# copy project folder before resetting to default branch
|
||||
# because some git-tree-specific resources (like submodules) might matter
|
||||
self.make_local_copy(
|
||||
instance.get_project_path(check_if_exists=False), os.path.join(self.job_private_data_dir, 'project'),
|
||||
instance.scm_type, self.playbook_new_revision
|
||||
)
|
||||
if self.original_branch:
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
self.original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
|
||||
self.release_lock(instance)
|
||||
# To avoid hangs, very important to release lock even if errors happen here
|
||||
try:
|
||||
if self.playbook_new_revision:
|
||||
instance.scm_revision = self.playbook_new_revision
|
||||
instance.save(update_fields=['scm_revision'])
|
||||
if self.job_private_data_dir:
|
||||
# copy project folder before resetting to default branch
|
||||
# because some git-tree-specific resources (like submodules) might matter
|
||||
self.make_local_copy(
|
||||
instance.get_project_path(check_if_exists=False), os.path.join(self.job_private_data_dir, 'project'),
|
||||
instance.scm_type, instance.scm_revision
|
||||
)
|
||||
if self.original_branch:
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
self.original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
|
||||
finally:
|
||||
self.release_lock(instance)
|
||||
p = instance.project
|
||||
if self.playbook_new_revision:
|
||||
instance.scm_revision = self.playbook_new_revision
|
||||
instance.save(update_fields=['scm_revision'])
|
||||
if instance.job_type == 'check' and status not in ('failed', 'canceled',):
|
||||
if self.playbook_new_revision:
|
||||
p.scm_revision = self.playbook_new_revision
|
||||
@@ -2354,6 +2388,27 @@ class RunInventoryUpdate(BaseTask):
|
||||
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
|
||||
elif inventory_update.source == 'file':
|
||||
raise NotImplementedError('Cannot update file sources through the task system.')
|
||||
|
||||
if inventory_update.source == 'scm' and inventory_update.source_project_update:
|
||||
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
|
||||
config_setting = 'collections_paths'
|
||||
folder = 'requirements_collections'
|
||||
default = '~/.ansible/collections:/usr/share/ansible/collections'
|
||||
|
||||
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
|
||||
|
||||
paths = default.split(':')
|
||||
if env_key in env:
|
||||
for path in env[env_key].split(':'):
|
||||
if path not in paths:
|
||||
paths = [env[env_key]] + paths
|
||||
elif config_setting in config_values:
|
||||
for path in config_values[config_setting].split(':'):
|
||||
if path not in paths:
|
||||
paths = [config_values[config_setting]] + paths
|
||||
paths = [os.path.join(private_data_dir, folder)] + paths
|
||||
env[env_key] = os.pathsep.join(paths)
|
||||
|
||||
return env
|
||||
|
||||
def write_args_file(self, private_data_dir, args):
|
||||
@@ -2452,7 +2507,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
# Use the vendored script path
|
||||
inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name)
|
||||
elif src == 'scm':
|
||||
inventory_path = inventory_update.get_actual_source_path()
|
||||
inventory_path = os.path.join(private_data_dir, 'project', inventory_update.source_path)
|
||||
elif src == 'custom':
|
||||
handle, inventory_path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
@@ -2473,7 +2528,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
'''
|
||||
src = inventory_update.source
|
||||
if src == 'scm' and inventory_update.source_project_update:
|
||||
return inventory_update.source_project_update.get_project_path(check_if_exists=False)
|
||||
return os.path.join(private_data_dir, 'project')
|
||||
if src in CLOUD_PROVIDERS:
|
||||
injector = None
|
||||
if src in InventorySource.injectors:
|
||||
@@ -2499,6 +2554,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
_eager_fields=dict(
|
||||
launch_type="sync",
|
||||
job_type='run',
|
||||
job_tags='update_{},install_collections'.format(source_project.scm_type), # roles are never valid for inventory
|
||||
status='running',
|
||||
execution_node=inventory_update.execution_node,
|
||||
instance_group = inventory_update.instance_group,
|
||||
@@ -2509,8 +2565,10 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
project_update_task = local_project_sync._get_task_class()
|
||||
try:
|
||||
project_update_task().run(local_project_sync.id)
|
||||
inventory_update.inventory_source.scm_last_revision = local_project_sync.project.scm_revision
|
||||
sync_task = project_update_task(job_private_data_dir=private_data_dir)
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
|
||||
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
|
||||
except Exception:
|
||||
inventory_update = self.update_model(
|
||||
@@ -2518,6 +2576,13 @@ class RunInventoryUpdate(BaseTask):
|
||||
job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' %
|
||||
('project_update', local_project_sync.name, local_project_sync.id)))
|
||||
raise
|
||||
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
|
||||
# This follows update, not sync, so make copy here
|
||||
project_path = source_project.get_project_path(check_if_exists=False)
|
||||
RunProjectUpdate.make_local_copy(
|
||||
project_path, os.path.join(private_data_dir, 'project'),
|
||||
source_project.scm_type, source_project.scm_revision
|
||||
)
|
||||
|
||||
|
||||
@task()
|
||||
@@ -2725,10 +2790,11 @@ class RunSystemJob(BaseTask):
|
||||
json_vars = {}
|
||||
else:
|
||||
json_vars = json.loads(system_job.extra_vars)
|
||||
if 'days' in json_vars:
|
||||
args.extend(['--days', str(json_vars.get('days', 60))])
|
||||
if 'dry_run' in json_vars and json_vars['dry_run']:
|
||||
args.extend(['--dry-run'])
|
||||
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
|
||||
if 'days' in json_vars:
|
||||
args.extend(['--days', str(json_vars.get('days', 60))])
|
||||
if 'dry_run' in json_vars and json_vars['dry_run']:
|
||||
args.extend(['--dry-run'])
|
||||
if system_job.job_type == 'cleanup_jobs':
|
||||
args.extend(['--jobs', '--project-updates', '--inventory-updates',
|
||||
'--management-jobs', '--ad-hoc-commands', '--workflow-jobs',
|
||||
@@ -2822,4 +2888,4 @@ def deep_copy_model_obj(
|
||||
), permission_check_func[2])
|
||||
permission_check_func(creater, copy_mapping.values())
|
||||
if isinstance(new_obj, Inventory):
|
||||
update_inventory_computed_fields.delay(new_obj.id, True)
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Hello World Sample
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Hello Message
|
||||
debug:
|
||||
msg: "Hello World!"
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Hello World Sample
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Hello Message
|
||||
debug:
|
||||
msg: "Hello World!"
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user