mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 20:44:45 -03:30
Compare commits
1144 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fa1c33da7e | ||
|
|
8ed5964871 | ||
|
|
a989c624c7 | ||
|
|
7f01de26a1 | ||
|
|
e3b5d64aa7 | ||
|
|
eba0e4fd77 | ||
|
|
d3c80eef4d | ||
|
|
3683dfab37 | ||
|
|
8e3931de37 | ||
|
|
be0a7a2aa9 | ||
|
|
d0d8d1c66c | ||
|
|
8a8a48a4ff | ||
|
|
b0aa795b10 | ||
|
|
017064aecf | ||
|
|
7311ddf722 | ||
|
|
69835e9895 | ||
|
|
85960d9035 | ||
|
|
c8ceb62269 | ||
|
|
1acca459ef | ||
|
|
ee6fda9f8a | ||
|
|
a95632c349 | ||
|
|
ed3b6385f1 | ||
|
|
3518fb0c17 | ||
|
|
1289f141d6 | ||
|
|
8464ec5c49 | ||
|
|
3bc5975b90 | ||
|
|
af7e9cb533 | ||
|
|
af2a8f9831 | ||
|
|
f99a43ffa6 | ||
|
|
262d99fde6 | ||
|
|
63f56d33aa | ||
|
|
9cabf3ef4d | ||
|
|
2855be9d26 | ||
|
|
2a4912df3e | ||
|
|
daa312d7ee | ||
|
|
e95938715a | ||
|
|
f5d4f7858a | ||
|
|
25e0efd0b7 | ||
|
|
47a007caee | ||
|
|
cd6d2ed53a | ||
|
|
4de61204c4 | ||
|
|
6b21f2042b | ||
|
|
7820517734 | ||
|
|
2ba1288284 | ||
|
|
149f8a21a6 | ||
|
|
602f2951b9 | ||
|
|
b003f42e22 | ||
|
|
2ee2cd0bd9 | ||
|
|
a79f2ff07a | ||
|
|
75bb7cce22 | ||
|
|
52a253ad18 | ||
|
|
0f74a05fea | ||
|
|
440691387b | ||
|
|
27e6c2d47d | ||
|
|
8b69b08991 | ||
|
|
8714bde1b4 | ||
|
|
28b84d0d71 | ||
|
|
c6111fface | ||
|
|
98e8a09ad3 | ||
|
|
3f9af8fe69 | ||
|
|
dbe949a2c2 | ||
|
|
a296f64696 | ||
|
|
ee18400a33 | ||
|
|
98a4e85db4 | ||
|
|
f7f1bdf9c9 | ||
|
|
69cf915a20 | ||
|
|
9440785bdd | ||
|
|
ca7c840d8c | ||
|
|
f85bcae89f | ||
|
|
a0e31b9c01 | ||
|
|
c414fd68a0 | ||
|
|
2830cdfdeb | ||
|
|
07e9b46643 | ||
|
|
1f01521213 | ||
|
|
8587461ac9 | ||
|
|
e54e5280f2 | ||
|
|
516a44ce73 | ||
|
|
e52cebc28e | ||
|
|
bb5136cdae | ||
|
|
b0db2b7bec | ||
|
|
1000dc10fb | ||
|
|
2a4b009f04 | ||
|
|
8cdd42307c | ||
|
|
269558876e | ||
|
|
bba680671b | ||
|
|
f70a76109c | ||
|
|
5d54877183 | ||
|
|
f7dac8e68d | ||
|
|
39648b4f0b | ||
|
|
b942fde59a | ||
|
|
ce82b87d9f | ||
|
|
70391f96ae | ||
|
|
2329c1b797 | ||
|
|
470159b4d7 | ||
|
|
e740340793 | ||
|
|
4d5507d344 | ||
|
|
d350551547 | ||
|
|
7fd79b8e54 | ||
|
|
eb12f45e8e | ||
|
|
fb047b1267 | ||
|
|
d31c528257 | ||
|
|
996d7ce054 | ||
|
|
7040fcfd88 | ||
|
|
88ca4b63e6 | ||
|
|
c0af3c537b | ||
|
|
f8afae308a | ||
|
|
4cd0d60711 | ||
|
|
955d57bce6 | ||
|
|
589d27c88c | ||
|
|
eafb751ecc | ||
|
|
30ea66023f | ||
|
|
9843e21632 | ||
|
|
6002beb231 | ||
|
|
9c6e42fd1b | ||
|
|
eeab4b90a5 | ||
|
|
7827a2aedd | ||
|
|
a7f1a36ed8 | ||
|
|
d651786206 | ||
|
|
19e4758be1 | ||
|
|
fe9de0d4cc | ||
|
|
80147acc1c | ||
|
|
4acdf8584b | ||
|
|
cf607691ac | ||
|
|
d7adcfb119 | ||
|
|
97d26728e4 | ||
|
|
6403895eae | ||
|
|
8b26ff1fe6 | ||
|
|
9ddd020348 | ||
|
|
a2d1c32da3 | ||
|
|
af18aa8456 | ||
|
|
188b23e88f | ||
|
|
63bed7a30d | ||
|
|
fd93964953 | ||
|
|
1f9f86974a | ||
|
|
6a86af5b43 | ||
|
|
6a503e152a | ||
|
|
b7227113be | ||
|
|
907da2ae61 | ||
|
|
6f76b15d92 | ||
|
|
9d6fbd6c78 | ||
|
|
edb4dac652 | ||
|
|
42898b94e2 | ||
|
|
943543354a | ||
|
|
2da22ccd8a | ||
|
|
9cab5a5046 | ||
|
|
e270a692b7 | ||
|
|
677a8dae7b | ||
|
|
6eeb32a447 | ||
|
|
e57991d498 | ||
|
|
4242bd55c2 | ||
|
|
e8fb466f0f | ||
|
|
869fcbf483 | ||
|
|
6abeaf2c55 | ||
|
|
f734918d3e | ||
|
|
91f2e0c32b | ||
|
|
88d6dd96fa | ||
|
|
7feac5ecd6 | ||
|
|
193ec21149 | ||
|
|
14e62057da | ||
|
|
a26c0dfb8a | ||
|
|
6b4219badb | ||
|
|
1f598e1b12 | ||
|
|
7ddd4d74c0 | ||
|
|
6ad6f48ff0 | ||
|
|
d736adbedc | ||
|
|
c881762c97 | ||
|
|
be5d067148 | ||
|
|
189a10e35a | ||
|
|
285e9c2f62 | ||
|
|
054de87f8e | ||
|
|
7de8a8700c | ||
|
|
4f7669dec1 | ||
|
|
25a1bc7a33 | ||
|
|
955ef3e9cb | ||
|
|
0e8f2307fc | ||
|
|
bcfd2d6aa4 | ||
|
|
7e52f4682c | ||
|
|
9c218fa5f5 | ||
|
|
508aed67de | ||
|
|
0bf1116ef8 | ||
|
|
45df5ba9c4 | ||
|
|
b90a296d41 | ||
|
|
d40143a63d | ||
|
|
db40d550be | ||
|
|
da661e45ae | ||
|
|
58160b9eb4 | ||
|
|
05b28efd9c | ||
|
|
0b433ebb1c | ||
|
|
5b3f5bf37d | ||
|
|
397c0092a0 | ||
|
|
362fdaeecc | ||
|
|
606c3c3595 | ||
|
|
42705c9eb0 | ||
|
|
c2ba495824 | ||
|
|
85a1c88653 | ||
|
|
c4d704bee1 | ||
|
|
60d499e11c | ||
|
|
bb48ef40be | ||
|
|
771ca2400a | ||
|
|
735d44816b | ||
|
|
e346493921 | ||
|
|
bd39fab17a | ||
|
|
ce30594b30 | ||
|
|
2021c2a596 | ||
|
|
ecd1d09c9a | ||
|
|
7dbde8d82c | ||
|
|
4e64b17712 | ||
|
|
cc4c514103 | ||
|
|
ab8726dafa | ||
|
|
2cefba6f96 | ||
|
|
592043fa70 | ||
|
|
59477aa221 | ||
|
|
279fe53837 | ||
|
|
bb319136e4 | ||
|
|
b0f68d97da | ||
|
|
a46462eede | ||
|
|
646e403fbd | ||
|
|
64c846cfc1 | ||
|
|
8e07269738 | ||
|
|
6fc815937b | ||
|
|
014c995a8f | ||
|
|
c1bb62cc36 | ||
|
|
f5cf7c204f | ||
|
|
6d08e21511 | ||
|
|
8b881d195d | ||
|
|
5c9ff51248 | ||
|
|
3f64768ba8 | ||
|
|
fd24918ba8 | ||
|
|
f04e7067e8 | ||
|
|
9a91c0bfb2 | ||
|
|
c06188da56 | ||
|
|
7433aab258 | ||
|
|
37a715c680 | ||
|
|
3d9eb3b600 | ||
|
|
99511de728 | ||
|
|
82b1b85fa4 | ||
|
|
2aa29420ee | ||
|
|
9e331fe029 | ||
|
|
591cdb6015 | ||
|
|
bc244b3600 | ||
|
|
dbe3863b04 | ||
|
|
ae021c37e3 | ||
|
|
8baa9d8458 | ||
|
|
3c888475a5 | ||
|
|
29b567d6e1 | ||
|
|
00aa1ad295 | ||
|
|
4f3213715e | ||
|
|
0389e72197 | ||
|
|
0732795ecc | ||
|
|
a26df3135b | ||
|
|
a904aea519 | ||
|
|
6bd5053ae8 | ||
|
|
8b00b8c9c2 | ||
|
|
2b9acd78c8 | ||
|
|
d7f0642f48 | ||
|
|
8bbae0cc3a | ||
|
|
c00f1505d7 | ||
|
|
a08e6691fb | ||
|
|
98bc499498 | ||
|
|
6d0c42a91a | ||
|
|
79c5a62279 | ||
|
|
3bb671f3f2 | ||
|
|
0b9c5c410a | ||
|
|
d77d5a7734 | ||
|
|
0a00a3104a | ||
|
|
ab36129395 | ||
|
|
e99500cf16 | ||
|
|
299497ea12 | ||
|
|
843c22c6b1 | ||
|
|
86b49b6fe2 | ||
|
|
9489f00ca4 | ||
|
|
6d60e7dadc | ||
|
|
346b9b9e3e | ||
|
|
99384b1db9 | ||
|
|
d1b5a60bb9 | ||
|
|
d57258878d | ||
|
|
48414f6dab | ||
|
|
ff0186f72b | ||
|
|
a682565758 | ||
|
|
0dee2e5973 | ||
|
|
929f4bfb81 | ||
|
|
ac474e2108 | ||
|
|
d6722c2106 | ||
|
|
6eef0b82bd | ||
|
|
fb4343d75e | ||
|
|
a867a32b4e | ||
|
|
3060505110 | ||
|
|
5d68f796aa | ||
|
|
15036ff970 | ||
|
|
32783f7aaf | ||
|
|
8699a8fbc2 | ||
|
|
b4cde80fa9 | ||
|
|
eb4db4ed43 | ||
|
|
649aafb454 | ||
|
|
b6c272e946 | ||
|
|
9fe2211f82 | ||
|
|
4704e24c24 | ||
|
|
e5f293ce52 | ||
|
|
d64b898390 | ||
|
|
498c525b34 | ||
|
|
bb184f8ffb | ||
|
|
7f537dbedf | ||
|
|
f9b8a69f7b | ||
|
|
bc228b8d77 | ||
|
|
7710ad2e57 | ||
|
|
9f2c9b13d7 | ||
|
|
6940704deb | ||
|
|
6b9cacb85f | ||
|
|
cfa0fdaa12 | ||
|
|
4423e6edae | ||
|
|
13faa0ed2e | ||
|
|
42336355bb | ||
|
|
c18aa90534 | ||
|
|
39460fb3d3 | ||
|
|
4f51c1d2c9 | ||
|
|
04ccff0e3f | ||
|
|
2242119182 | ||
|
|
5cba34c34d | ||
|
|
33a699b8ae | ||
|
|
344a4bb238 | ||
|
|
0beda08cf9 | ||
|
|
2264a98c04 | ||
|
|
d19a9db523 | ||
|
|
4b76332daf | ||
|
|
db38339179 | ||
|
|
5eddcdd5f5 | ||
|
|
3480d2da59 | ||
|
|
e60e6c7d08 | ||
|
|
55356ebb51 | ||
|
|
7f4bbbe5c5 | ||
|
|
49b1ce6e8c | ||
|
|
caaefef900 | ||
|
|
96576b0e3d | ||
|
|
288ce123ca | ||
|
|
140dbbaa7d | ||
|
|
e9d11be680 | ||
|
|
d7f117e83f | ||
|
|
eef1246e0b | ||
|
|
65e38aa37d | ||
|
|
c7b23aac9b | ||
|
|
b4ea60eb79 | ||
|
|
24c738c6d8 | ||
|
|
0c26734d7d | ||
|
|
d9b613ccb3 | ||
|
|
831bf9124f | ||
|
|
0b31cad2db | ||
|
|
059e744774 | ||
|
|
827adbce76 | ||
|
|
849a64f20a | ||
|
|
3bbd03732b | ||
|
|
32627ce51a | ||
|
|
4a8f1d41fa | ||
|
|
2b3c57755c | ||
|
|
508c9b3102 | ||
|
|
f8be1f4110 | ||
|
|
d727e69a00 | ||
|
|
04dd1352c9 | ||
|
|
ea54815e6b | ||
|
|
78db965797 | ||
|
|
3326979806 | ||
|
|
230949c43c | ||
|
|
a862a00d24 | ||
|
|
2e8f9185ab | ||
|
|
6d6322ae4d | ||
|
|
914ea54925 | ||
|
|
b9b62e3771 | ||
|
|
e03911d378 | ||
|
|
61287f6b36 | ||
|
|
f6bfdef34d | ||
|
|
7494ba7b9c | ||
|
|
5f62426684 | ||
|
|
6914213aa0 | ||
|
|
83721ff9a8 | ||
|
|
4998c7bf21 | ||
|
|
155a1d9a32 | ||
|
|
6f582b5688 | ||
|
|
579648a017 | ||
|
|
c4ed9a14ef | ||
|
|
21872e7101 | ||
|
|
f2e9a8d1b2 | ||
|
|
301d6ff616 | ||
|
|
d24271849d | ||
|
|
a50b03da17 | ||
|
|
27b5b534a1 | ||
|
|
6bc97158fe | ||
|
|
9ce2a9240a | ||
|
|
6b3befcb94 | ||
|
|
c8044b4755 | ||
|
|
0eb526919f | ||
|
|
3045511401 | ||
|
|
24f334085e | ||
|
|
90d35f07f3 | ||
|
|
e334f33d13 | ||
|
|
464db28be5 | ||
|
|
a8f56f78e9 | ||
|
|
f7ad3d78eb | ||
|
|
61a0d1f77b | ||
|
|
77e99ad355 | ||
|
|
9f4afe6972 | ||
|
|
b99a04dd8d | ||
|
|
357e22eb51 | ||
|
|
9dbf75f2a9 | ||
|
|
eab74cac07 | ||
|
|
979f549d90 | ||
|
|
ca82f48c18 | ||
|
|
5a45a62cb3 | ||
|
|
090349a49b | ||
|
|
c38d13c5ab | ||
|
|
f490a940cf | ||
|
|
42c24419d4 | ||
|
|
e17c93ecd7 | ||
|
|
67d48a87f8 | ||
|
|
b755fa6777 | ||
|
|
ee4dcd2055 | ||
|
|
0f7a4b384b | ||
|
|
02415db881 | ||
|
|
703345e9d8 | ||
|
|
d102b06474 | ||
|
|
55c18fa76c | ||
|
|
d37039a18a | ||
|
|
6335004c94 | ||
|
|
177867de5a | ||
|
|
08bd445caf | ||
|
|
b5776c8eb3 | ||
|
|
8f1db173c1 | ||
|
|
62e93d5c57 | ||
|
|
abfeb735f0 | ||
|
|
68b0b40e91 | ||
|
|
910d926ac3 | ||
|
|
c84ab9f1dc | ||
|
|
65cafa37c7 | ||
|
|
551fd088f5 | ||
|
|
a72e885274 | ||
|
|
bd7c048113 | ||
|
|
91135f638f | ||
|
|
cbc02dd607 | ||
|
|
de09deff66 | ||
|
|
5272d088ed | ||
|
|
22a593f30f | ||
|
|
b56812018a | ||
|
|
ab755134b3 | ||
|
|
ebb0f31b0b | ||
|
|
51ef57188c | ||
|
|
653850fa6d | ||
|
|
8ba4388014 | ||
|
|
f3e8623a21 | ||
|
|
077461a3ef | ||
|
|
795c989a49 | ||
|
|
5e595caf5e | ||
|
|
d941f11ccd | ||
|
|
c4e50cbf7d | ||
|
|
6f3527ed15 | ||
|
|
fe2ebeb872 | ||
|
|
ad7498c3fc | ||
|
|
cb7257f9e6 | ||
|
|
e3ea4e2398 | ||
|
|
e4e2d48f53 | ||
|
|
5bfe89be6e | ||
|
|
47661fad51 | ||
|
|
4b497b8cdc | ||
|
|
31fabad3e5 | ||
|
|
1e48d773ae | ||
|
|
4529429e99 | ||
|
|
ec4a471e7a | ||
|
|
77915544d2 | ||
|
|
5ba90b629e | ||
|
|
e9021bd173 | ||
|
|
49356236ac | ||
|
|
c9015fc0c8 | ||
|
|
4ea1101477 | ||
|
|
27948aa4e1 | ||
|
|
5263d5aced | ||
|
|
8832f667e4 | ||
|
|
f4e56b219d | ||
|
|
abdcdbca76 | ||
|
|
362016c91b | ||
|
|
f1634f092d | ||
|
|
8cd4e9b488 | ||
|
|
1fce77054a | ||
|
|
53c8c80f7f | ||
|
|
3bf7d41bf3 | ||
|
|
34259e24c0 | ||
|
|
1daeec356f | ||
|
|
5573e1c7ce | ||
|
|
1cba98e4a7 | ||
|
|
56d31fec77 | ||
|
|
564012b2c8 | ||
|
|
cfe0607b6a | ||
|
|
7f24d0c0c2 | ||
|
|
3f4e7465a7 | ||
|
|
9c32cb30d4 | ||
|
|
782d465c78 | ||
|
|
1412bf6232 | ||
|
|
e92acce4eb | ||
|
|
ac68e8c4fe | ||
|
|
97a4bb39b6 | ||
|
|
9e00337bc1 | ||
|
|
72672d6bbe | ||
|
|
51f52f6332 | ||
|
|
11b2b17d08 | ||
|
|
e17ff3e03a | ||
|
|
b998d93bfb | ||
|
|
b8ec94a0ae | ||
|
|
a02b332b10 | ||
|
|
56919c5d32 | ||
|
|
47f5c17b56 | ||
|
|
0fb800f5d0 | ||
|
|
d6e94f9c6f | ||
|
|
d5bdfa908a | ||
|
|
0a5acb6520 | ||
|
|
debc339f75 | ||
|
|
06f065766f | ||
|
|
16e672dd38 | ||
|
|
3d7420959e | ||
|
|
4bec46a910 | ||
|
|
0e70543d54 | ||
|
|
88fb30e0da | ||
|
|
558814ef3b | ||
|
|
ace5a0a2b3 | ||
|
|
8a917a5b70 | ||
|
|
1bd74a96d6 | ||
|
|
74ebb0ae59 | ||
|
|
fd56b7c590 | ||
|
|
7f02e64a0d | ||
|
|
d40a5dec8f | ||
|
|
5e481341bc | ||
|
|
0a1070834d | ||
|
|
c7de3b0528 | ||
|
|
a725778b17 | ||
|
|
3b520a8ee8 | ||
|
|
9a38971d47 | ||
|
|
06b3e54fb1 | ||
|
|
7f2e1d46bc | ||
|
|
12158bdcba | ||
|
|
f858eda6b1 | ||
|
|
c5297b0b86 | ||
|
|
e0633c9122 | ||
|
|
3a208a0be2 | ||
|
|
cfdfd96793 | ||
|
|
c4e697879d | ||
|
|
db7f0f9421 | ||
|
|
f1ee963bd0 | ||
|
|
7c3cbe6e58 | ||
|
|
87de0cf0b3 | ||
|
|
18f5dd6e04 | ||
|
|
89163f2915 | ||
|
|
59c9de2761 | ||
|
|
b58c71bb74 | ||
|
|
1caa2e0287 | ||
|
|
770b457430 | ||
|
|
d58df0f34a | ||
|
|
3f5e2a3cd3 | ||
|
|
2b59af3808 | ||
|
|
9e5fe7f5c6 | ||
|
|
093d204d19 | ||
|
|
e25bd931a1 | ||
|
|
8350bb3371 | ||
|
|
d6594ab602 | ||
|
|
b6b9802f9e | ||
|
|
0da94ada2b | ||
|
|
3b9e67ed1b | ||
|
|
14320bc8e6 | ||
|
|
3c5c9c6fde | ||
|
|
f5193e5ea5 | ||
|
|
03b73027e8 | ||
|
|
c06b6306ab | ||
|
|
45ce6d794e | ||
|
|
e94bb44082 | ||
|
|
be58906aed | ||
|
|
403e9bbfb5 | ||
|
|
ea29f4b91f | ||
|
|
3f2d757f4e | ||
|
|
feac93fd24 | ||
|
|
088373963b | ||
|
|
5818dcc980 | ||
|
|
dc6c353ecd | ||
|
|
50b56aa8cb | ||
|
|
3fec69799c | ||
|
|
2a2c34f567 | ||
|
|
558e92806b | ||
|
|
355fb125cb | ||
|
|
c8eeacacca | ||
|
|
d0a3c5a42b | ||
|
|
64139f960f | ||
|
|
eda494be63 | ||
|
|
4a0c371014 | ||
|
|
6b43da35e1 | ||
|
|
afa3b500d3 | ||
|
|
c3efb13020 | ||
|
|
eb28800082 | ||
|
|
3219b9b4ac | ||
|
|
e9a48cceba | ||
|
|
9a7fa1f3a6 | ||
|
|
a03d74d828 | ||
|
|
2274b4b4e4 | ||
|
|
c054d7c3d7 | ||
|
|
26d5d7afdc | ||
|
|
6b51b41897 | ||
|
|
ca3cf244fd | ||
|
|
88d7b24f55 | ||
|
|
ecdb353f6f | ||
|
|
d9932eaf6a | ||
|
|
cbc52fa19f | ||
|
|
cc77b31d4e | ||
|
|
b875c03f4a | ||
|
|
e87f804c92 | ||
|
|
f86cbf33aa | ||
|
|
6db6fe90fd | ||
|
|
bcbe9691e5 | ||
|
|
c850148ee3 | ||
|
|
b260a88810 | ||
|
|
a0937a8b7c | ||
|
|
c4c0cace88 | ||
|
|
a55bcafa3a | ||
|
|
d0c510563f | ||
|
|
d23fb17cd9 | ||
|
|
8668f2ad46 | ||
|
|
e210ee4077 | ||
|
|
47ff56c411 | ||
|
|
1e780aad38 | ||
|
|
80234c5600 | ||
|
|
c8510f7d75 | ||
|
|
6431050b36 | ||
|
|
5c360aeff3 | ||
|
|
44e043d75f | ||
|
|
ef223b0afb | ||
|
|
e9e8283f16 | ||
|
|
b73e8d8a56 | ||
|
|
6db6c6c5ba | ||
|
|
2b5ff9a6f9 | ||
|
|
97c169780d | ||
|
|
88c46b4573 | ||
|
|
53d27c933e | ||
|
|
c340fff643 | ||
|
|
61600a8252 | ||
|
|
521cda878e | ||
|
|
9ecd6ad0fb | ||
|
|
349af22d0f | ||
|
|
ad316fc2a3 | ||
|
|
e4abf634f0 | ||
|
|
bb144acee3 | ||
|
|
16d5456d2b | ||
|
|
abe8153358 | ||
|
|
86aabb297e | ||
|
|
65a7613c26 | ||
|
|
4d1790290e | ||
|
|
dca335d17c | ||
|
|
da48cffa12 | ||
|
|
a2eeb6e7b5 | ||
|
|
f8f6fff21e | ||
|
|
3e616f2770 | ||
|
|
7c6bef15ba | ||
|
|
27b48fe55b | ||
|
|
6b20ffbfdd | ||
|
|
43abeec3d7 | ||
|
|
bd8886a867 | ||
|
|
bd68317cfd | ||
|
|
f8818730d4 | ||
|
|
b41c9e5ba3 | ||
|
|
401be0c265 | ||
|
|
35be571eed | ||
|
|
8e7faa853e | ||
|
|
1ee46ab98a | ||
|
|
ac9f526cf0 | ||
|
|
7120e92078 | ||
|
|
7e6def8acc | ||
|
|
aa4842aea5 | ||
|
|
7547793792 | ||
|
|
7d0b207571 | ||
|
|
daa9282790 | ||
|
|
bdd0b9e4d9 | ||
|
|
1876849d89 | ||
|
|
e4dd2728ef | ||
|
|
88571f6dcb | ||
|
|
a1cdc07944 | ||
|
|
eea80c45d1 | ||
|
|
beb8021580 | ||
|
|
07565b5efc | ||
|
|
3755151cc5 | ||
|
|
36078651d3 | ||
|
|
8b768bcb01 | ||
|
|
16d9e1cfc7 | ||
|
|
2584f7359e | ||
|
|
286cec8bc3 | ||
|
|
64b1aa43b1 | ||
|
|
0fd9153cf7 | ||
|
|
c95624e27f | ||
|
|
5cf33f57a4 | ||
|
|
5c331934e2 | ||
|
|
7ac21b4922 | ||
|
|
04fe18d840 | ||
|
|
b9829e2bde | ||
|
|
a99d4a8419 | ||
|
|
676b29346c | ||
|
|
6c7ab97159 | ||
|
|
8077c910b0 | ||
|
|
feef39c5cc | ||
|
|
e80843846e | ||
|
|
208dbc1f92 | ||
|
|
2cb5046ec6 | ||
|
|
ecc68c1003 | ||
|
|
356b674a49 | ||
|
|
185c581007 | ||
|
|
789397d56c | ||
|
|
e816f73ecf | ||
|
|
bbe5789e70 | ||
|
|
ad1a7fc9c9 | ||
|
|
dd5f25186b | ||
|
|
ecb7147614 | ||
|
|
87396f968c | ||
|
|
87bfb82907 | ||
|
|
65e988b44c | ||
|
|
4381a7d75c | ||
|
|
3a6528dc0d | ||
|
|
c113c2db52 | ||
|
|
b7d2d6ad89 | ||
|
|
01d77d5407 | ||
|
|
87c6ed52cd | ||
|
|
0d5a46a6e1 | ||
|
|
f3ab3de1be | ||
|
|
871695ea5e | ||
|
|
0f5b694514 | ||
|
|
9567dc9e17 | ||
|
|
24d35e9593 | ||
|
|
b6be8ca445 | ||
|
|
beb10feb0c | ||
|
|
6ec9c45341 | ||
|
|
9a394a5726 | ||
|
|
25f4aa19b7 | ||
|
|
7ff5bacce5 | ||
|
|
3e820a88e1 | ||
|
|
c1ab118481 | ||
|
|
3952be9429 | ||
|
|
35f414ccf2 | ||
|
|
304bf6805b | ||
|
|
e733506477 | ||
|
|
f4366be419 | ||
|
|
64018a71bb | ||
|
|
0c9c349fb9 | ||
|
|
6dd4d04bf5 | ||
|
|
21b4455ee6 | ||
|
|
314e345825 | ||
|
|
90e047821d | ||
|
|
01fe89e43c | ||
|
|
1f2edd81ab | ||
|
|
862de0b6f3 | ||
|
|
d75c2d9b44 | ||
|
|
1b8ff1f846 | ||
|
|
a93b1aa339 | ||
|
|
4c6191041c | ||
|
|
edb3f6df55 | ||
|
|
7a3ece7fd2 | ||
|
|
73e867b6f5 | ||
|
|
acc34c1393 | ||
|
|
3d5a002676 | ||
|
|
bb6d9af90b | ||
|
|
da94b2dc9e | ||
|
|
a1c2db3db5 | ||
|
|
d849e81891 | ||
|
|
a5afac62ca | ||
|
|
66c98ca9bc | ||
|
|
a00e7c7050 | ||
|
|
cd1ff6b16a | ||
|
|
b560a21ca3 | ||
|
|
63fa367e9d | ||
|
|
d33daeee91 | ||
|
|
9d449c419b | ||
|
|
e34e88549f | ||
|
|
c073e39c69 | ||
|
|
4fcd2c594c | ||
|
|
457dc956f1 | ||
|
|
3e5428131c | ||
|
|
d08f59272e | ||
|
|
8b95d7be94 | ||
|
|
6c22ddf608 | ||
|
|
8227054d11 | ||
|
|
73b33e1435 | ||
|
|
deaf2028ad | ||
|
|
d812972d3c | ||
|
|
54b553b78c | ||
|
|
3e08bbeb93 | ||
|
|
22524589e3 | ||
|
|
85ec73bf4b | ||
|
|
ccd36b9560 | ||
|
|
61755e2838 | ||
|
|
be56a1d3df | ||
|
|
46c86ea6c0 | ||
|
|
401c7c3da2 | ||
|
|
f1120d39db | ||
|
|
80617df22d | ||
|
|
b5e5fea117 | ||
|
|
e3ec63e8e5 | ||
|
|
e232cd392c | ||
|
|
8301254f57 | ||
|
|
9cdfc19215 | ||
|
|
c50705a2dc | ||
|
|
9f948e90d9 | ||
|
|
e7f36eb2ea | ||
|
|
c261d6acf0 | ||
|
|
32ef805e23 | ||
|
|
d009ce49f5 | ||
|
|
d14bf00f6c | ||
|
|
5dc4e30820 | ||
|
|
afbeacf499 | ||
|
|
fc80cf5241 | ||
|
|
4a6db13daa | ||
|
|
d5372dae36 | ||
|
|
0b702ede4e | ||
|
|
3c7f596288 | ||
|
|
6207dad226 | ||
|
|
2b48e43946 | ||
|
|
4709f57f46 | ||
|
|
b055aad641 | ||
|
|
acfa6d056f | ||
|
|
51a069fcc4 | ||
|
|
fc89b627d7 | ||
|
|
e90ee5113d | ||
|
|
4ccca08cda | ||
|
|
b757fdebf8 | ||
|
|
3ee6f1f3c7 | ||
|
|
d4ba32d0c5 | ||
|
|
d97f516c3a | ||
|
|
52a8935b20 | ||
|
|
07752f48f6 | ||
|
|
10b5a10728 | ||
|
|
e11ff69f3e | ||
|
|
d3fa34c665 | ||
|
|
48a615231b | ||
|
|
b09ac71647 | ||
|
|
d5dd3c521f | ||
|
|
db43341f96 | ||
|
|
3234f246db | ||
|
|
6d6d99bcf8 | ||
|
|
a6cd32522f | ||
|
|
1fe28463da | ||
|
|
51a6194b8d | ||
|
|
e75f7b0beb | ||
|
|
179c62e2f3 | ||
|
|
98f5525d28 | ||
|
|
60a137225a | ||
|
|
c1bfcd73fb | ||
|
|
322b4ee1e4 | ||
|
|
98dc6179f5 | ||
|
|
07807c2dec | ||
|
|
16ecf17c69 | ||
|
|
1f0acef844 | ||
|
|
5a164cae15 | ||
|
|
b57405b696 | ||
|
|
5fdf6cf60f | ||
|
|
c1c382a941 | ||
|
|
a997b40852 | ||
|
|
99cd2e601d | ||
|
|
fc402aff29 | ||
|
|
2ec035f918 | ||
|
|
fe046b47b5 | ||
|
|
3e0e4b6c8f | ||
|
|
7fe57268f6 | ||
|
|
9eecb24c32 | ||
|
|
a8a45fca84 | ||
|
|
33df6f8ad2 | ||
|
|
44223003aa | ||
|
|
a60e7a7855 | ||
|
|
e971ec993b | ||
|
|
989ef3538e | ||
|
|
4db3e823bf | ||
|
|
c374316648 | ||
|
|
5dba49a7bc | ||
|
|
7b880c6552 | ||
|
|
5574cf0595 | ||
|
|
e706e0a2e2 | ||
|
|
5364e78397 | ||
|
|
f93ca814ac | ||
|
|
3bf1ad3028 | ||
|
|
e096ad18cb | ||
|
|
5ca73f1101 | ||
|
|
7e8fb29658 | ||
|
|
258689a9ed | ||
|
|
e80e3f7410 | ||
|
|
154b9c36ac | ||
|
|
deced917cf | ||
|
|
88b7256e96 | ||
|
|
033848a605 | ||
|
|
0e663921d6 | ||
|
|
0582079606 | ||
|
|
6536f5a453 | ||
|
|
c5079607aa | ||
|
|
26dcb000f6 | ||
|
|
8ba4f728c0 | ||
|
|
ee090d34fa | ||
|
|
bd30951a4f | ||
|
|
43cce83ba1 | ||
|
|
946d643795 | ||
|
|
1a6ea99d37 | ||
|
|
350046d495 | ||
|
|
b532012748 | ||
|
|
1c4042340c | ||
|
|
787c4af222 | ||
|
|
768280c9ba | ||
|
|
2e4e687d69 | ||
|
|
d8513a4e86 | ||
|
|
badd667efa | ||
|
|
7908f25747 | ||
|
|
0eef67713f | ||
|
|
6591efc160 | ||
|
|
fcc679489e | ||
|
|
94df58a55b | ||
|
|
0685b2fa35 | ||
|
|
232ea1e50c | ||
|
|
3423db6ed0 | ||
|
|
c32452d6b6 | ||
|
|
018dd4c1c3 | ||
|
|
4fc2c58ae7 | ||
|
|
b4014ebabf | ||
|
|
9955ee6548 | ||
|
|
c08d402e66 | ||
|
|
1c505beba6 | ||
|
|
8a0432efb7 | ||
|
|
320276f8ca | ||
|
|
f89061da41 | ||
|
|
c23d605a7a | ||
|
|
6d90cac3f9 | ||
|
|
89e92bd337 | ||
|
|
9271127c53 | ||
|
|
9fa5942791 | ||
|
|
e028ed878e | ||
|
|
838b2b7d1e | ||
|
|
7c0ad461a5 | ||
|
|
68926dad27 | ||
|
|
ceb6f6c47d | ||
|
|
167e99fce9 | ||
|
|
c930011616 | ||
|
|
aaaca63f83 | ||
|
|
d8a9f663b1 | ||
|
|
b0d0ccf44f | ||
|
|
c57754a29b | ||
|
|
65057c1fb7 | ||
|
|
d8be6490c2 | ||
|
|
b34208d1b6 | ||
|
|
0d5a9e9c8c | ||
|
|
22d4e60028 | ||
|
|
eaa766df77 | ||
|
|
7e5776c66f | ||
|
|
8b1806d4ca | ||
|
|
07232f3694 | ||
|
|
37a33f931a | ||
|
|
4912cbd2da | ||
|
|
4c40819791 | ||
|
|
a65fd497c6 | ||
|
|
d824209485 | ||
|
|
7ae1c7c3d2 | ||
|
|
341c6ae767 | ||
|
|
e6a94ed0cf | ||
|
|
3e6b6c05a6 | ||
|
|
544d4cd3b0 | ||
|
|
e0df2f511e | ||
|
|
255fd0a9cb | ||
|
|
f31adf8a85 | ||
|
|
a2b169626a | ||
|
|
6ffc78bcb0 | ||
|
|
8e9fc550f6 | ||
|
|
779d190855 | ||
|
|
89a4b03d45 | ||
|
|
ccd4cdd71a | ||
|
|
31dbf38a35 | ||
|
|
d0bec97bbb | ||
|
|
22307bba97 | ||
|
|
b4f5d44f65 | ||
|
|
d469870686 | ||
|
|
f561bf5754 | ||
|
|
2e3547d5cf | ||
|
|
ce8897d3e8 | ||
|
|
df77147d65 | ||
|
|
9b11df04b3 | ||
|
|
58c06d5aea | ||
|
|
1d3bb97b07 | ||
|
|
ba3253e2e2 | ||
|
|
e6f0c01aa6 | ||
|
|
9310d59e0a | ||
|
|
f2e1e71302 | ||
|
|
e6e31a9fc6 | ||
|
|
801aaf9323 | ||
|
|
2a8679234a | ||
|
|
54ab671512 | ||
|
|
866dd6b259 | ||
|
|
eba893c99b | ||
|
|
fd3f410cc6 | ||
|
|
03aaf93cef | ||
|
|
9aef57003a | ||
|
|
6065eb0e65 | ||
|
|
7e4634c81f | ||
|
|
a03d73776f | ||
|
|
f14eb4327d | ||
|
|
4ebd721cc5 | ||
|
|
21a92176b9 | ||
|
|
ad04b02e24 | ||
|
|
bc0511fe66 | ||
|
|
1accb9f939 | ||
|
|
9253f16e36 | ||
|
|
42387166bf | ||
|
|
0b5f892193 | ||
|
|
1a0d36a6fd | ||
|
|
cf3ed0dc88 | ||
|
|
8d26d7861e | ||
|
|
8e0ad2ef6e | ||
|
|
0aba4c36af | ||
|
|
44cd199078 | ||
|
|
ce909093c0 | ||
|
|
df13a8fea9 | ||
|
|
ff823c9fdb | ||
|
|
a42ff9865b | ||
|
|
7e13f78567 | ||
|
|
e2fb83db98 | ||
|
|
06eb1b6683 | ||
|
|
d62994ec02 | ||
|
|
f20859c85f | ||
|
|
b5b8adb451 | ||
|
|
70b287490b | ||
|
|
0976e9e569 | ||
|
|
83a96757db | ||
|
|
9013dcfea7 | ||
|
|
4ebc2573a3 | ||
|
|
fe9b03a189 | ||
|
|
d2f6c367f0 | ||
|
|
34b717d00c | ||
|
|
0d31b05f98 | ||
|
|
87a0e40331 | ||
|
|
764c0b2e15 | ||
|
|
23677b4963 | ||
|
|
96d9d41f19 | ||
|
|
a737f35653 | ||
|
|
ed8133be2d | ||
|
|
7c8c6b5333 | ||
|
|
46fceb03a5 | ||
|
|
4dee5eddeb | ||
|
|
709482bdac | ||
|
|
62ef1baace | ||
|
|
1fc3d2e914 | ||
|
|
d271a8c9fa | ||
|
|
3bd7b3b0f8 | ||
|
|
8075cda34c | ||
|
|
09d6da117a | ||
|
|
8f6b679c85 | ||
|
|
32e017bd03 | ||
|
|
74a31224e0 | ||
|
|
667b27fe78 | ||
|
|
4c8a4013cc | ||
|
|
5e4d73b6a3 | ||
|
|
da486d7788 | ||
|
|
30d97e2fa8 | ||
|
|
3a95114c3a | ||
|
|
1f3ad85403 | ||
|
|
90cb02e0bf | ||
|
|
6e2bd828a1 | ||
|
|
fbbf5046ac | ||
|
|
47abb6f85f | ||
|
|
717698b659 | ||
|
|
6a29a0898a | ||
|
|
1833872be9 | ||
|
|
4d06c812e6 | ||
|
|
3b71d2a37b | ||
|
|
0c0cacb0d6 | ||
|
|
f57fff732e | ||
|
|
54ddeaf046 | ||
|
|
69a1a02c70 | ||
|
|
c824f0d590 | ||
|
|
c336c989e7 | ||
|
|
f6523ab5a0 | ||
|
|
47c783da37 | ||
|
|
74afc7b424 | ||
|
|
4ac5a1e15a | ||
|
|
48eeeea7f3 | ||
|
|
aa6857fd38 | ||
|
|
25fe2a2ce6 | ||
|
|
3d1e3741cd | ||
|
|
2ef57e0221 | ||
|
|
bc08c02b89 | ||
|
|
50c74a2ec8 | ||
|
|
887469d73e | ||
|
|
f9debb8f94 | ||
|
|
b3929d1177 | ||
|
|
e3cfdb74ba | ||
|
|
1d0e752989 | ||
|
|
05a3bb0622 | ||
|
|
bc7fd26af6 | ||
|
|
048d4dbd95 | ||
|
|
c70e5357d3 | ||
|
|
7576ba2ade | ||
|
|
877e630a90 | ||
|
|
ef854aabb7 | ||
|
|
fc3f19bd2b | ||
|
|
2bbcd2d663 | ||
|
|
a786118415 | ||
|
|
65429e581a | ||
|
|
eb6f4dca55 | ||
|
|
ce09c4b3cd | ||
|
|
c971e9d61c | ||
|
|
e34bf90ca7 | ||
|
|
700296a558 | ||
|
|
492ea0616e | ||
|
|
eddb6e1faf | ||
|
|
f98b274177 | ||
|
|
662ff41fe9 | ||
|
|
fd146dde30 | ||
|
|
e394d0a6f6 | ||
|
|
5a1a47b7aa | ||
|
|
3d5c32c354 | ||
|
|
01cc0ac8f1 | ||
|
|
5a9248e619 | ||
|
|
1d84d03566 | ||
|
|
50ba4f9759 | ||
|
|
de55af6ae6 | ||
|
|
ca478ac880 | ||
|
|
78ea643460 | ||
|
|
0db0f81e53 | ||
|
|
c94680eaba | ||
|
|
5b4ed6dd59 | ||
|
|
4e811c744a | ||
|
|
cd6d2299a9 | ||
|
|
590199baff | ||
|
|
3b9dd3ba8c | ||
|
|
446021cf22 | ||
|
|
ef3ab29649 | ||
|
|
f4e09eee80 | ||
|
|
af4e4b4064 | ||
|
|
10c6297706 | ||
|
|
73a9541e39 | ||
|
|
3a2a61af82 | ||
|
|
774e7fb248 | ||
|
|
a5e3d9558f | ||
|
|
1ae86ae752 | ||
|
|
1a30a0e397 | ||
|
|
490b76b141 | ||
|
|
3831efb3be | ||
|
|
a8fa816165 | ||
|
|
11ccfd8449 | ||
|
|
c33cc82d53 | ||
|
|
c7516ec50e | ||
|
|
92cc597e84 | ||
|
|
7402ac29a8 | ||
|
|
5c3fe51982 | ||
|
|
f61af39f08 | ||
|
|
1ad7e663a1 | ||
|
|
ca85020b26 | ||
|
|
5d2912605f |
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@@ -30,8 +30,9 @@ https://www.ansible.com/security
|
||||
|
||||
##### STEPS TO REPRODUCE
|
||||
|
||||
<!-- For bugs, please show exactly how to reproduce the problem. For new
|
||||
features, show how the feature would be used. -->
|
||||
<!-- For new features, show how the feature would be used. For bugs, please show
|
||||
exactly how to reproduce the problem. Ideally, provide all steps and data needed
|
||||
to recreate the bug from a new awx install. -->
|
||||
|
||||
##### EXPECTED RESULTS
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -31,6 +31,7 @@ awx/ui/templates/ui/installing.html
|
||||
awx/ui_next/node_modules/
|
||||
awx/ui_next/coverage/
|
||||
awx/ui_next/build/locales/_build
|
||||
rsyslog.pid
|
||||
/tower-license
|
||||
/tower-license/**
|
||||
tools/prometheus/data
|
||||
|
||||
53
CHANGELOG.md
53
CHANGELOG.md
@@ -2,6 +2,57 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 11.0.0 (Apr 16, 2020)
|
||||
- As of AWX 11.0.0, Kubernetes-based deployments use a Deployment rather than a StatefulSet.
|
||||
- Reimplemented external logging support using rsyslogd to improve reliability and address a number of issues (https://github.com/ansible/awx/issues/5155)
|
||||
- Changed activity stream logs to include summary fields for related objects (https://github.com/ansible/awx/issues/1761)
|
||||
- Added code to more gracefully attempt to reconnect to redis if it restarts/becomes unavailable (https://github.com/ansible/awx/pull/6670)
|
||||
- Fixed a bug that caused REFRESH_TOKEN_EXPIRE_SECONDS to not properly be respected for OAuth2.0 refresh tokens generated by AWX (https://github.com/ansible/awx/issues/6630)
|
||||
- Fixed a bug that broke schedules containing RRULES with very old DTSTART dates (https://github.com/ansible/awx/pull/6550)
|
||||
- Fixed a bug that broke installs on older versions of Ansible packaged with certain Linux distributions (https://github.com/ansible/awx/issues/5501)
|
||||
- Fixed a bug that caused the activity stream to sometimes report the incorrect actor when associating user membership on SAML login (https://github.com/ansible/awx/pull/6525)
|
||||
- Fixed a bug in AWX's Grafana notification support when annotation tags are omitted (https://github.com/ansible/awx/issues/6580)
|
||||
- Fixed a bug that prevented some users from searching for Source Control credentials in the AWX user interface (https://github.com/ansible/awx/issues/6600)
|
||||
- Fixed a bug that prevented disassociating orphaned users from credentials (https://github.com/ansible/awx/pull/6554)
|
||||
- Updated Twisted to address CVE-2020-10108 and CVE-2020-10109.
|
||||
|
||||
## 10.0.0 (Mar 30, 2020)
|
||||
- As of AWX 10.0.0, the official AWX CLI no longer supports Python 2 (it requires at least Python 3.6) (https://github.com/ansible/awx/pull/6327)
|
||||
- AWX no longer relies on RabbitMQ; Redis is added as a new dependency (https://github.com/ansible/awx/issues/5443)
|
||||
- Altered AWX's event tables to allow more than ~2 billion total events (https://github.com/ansible/awx/issues/6010)
|
||||
- Improved the performance (time to execute, and memory consumption) of the periodic job cleanup system job (https://github.com/ansible/awx/pull/6166)
|
||||
- Updated Job Templates so they now have an explicit Organization field (it is no longer inferred from the associated Project) (https://github.com/ansible/awx/issues/3903)
|
||||
- Updated social-auth-core to address an upcoming GitHub API deprecation (https://github.com/ansible/awx/issues/5970)
|
||||
- Updated to ansible-runner 1.4.6 to address various bugs.
|
||||
- Updated Django to address CVE-2020-9402
|
||||
- Updated pyyaml version to address CVE-2017-18342
|
||||
- Fixed a bug which prevented the new `scm_branch` field from being used in custom notification templates (https://github.com/ansible/awx/issues/6258)
|
||||
- Fixed a race condition that sometimes causes success/failure notifications to include an incomplete list of hosts (https://github.com/ansible/awx/pull/6290)
|
||||
- Fixed a bug that can cause certain setting pages to lose unsaved form edits when a playbook is launched (https://github.com/ansible/awx/issues/5265)
|
||||
- Fixed a bug that can prevent the "Use TLS/SSL" field from properly saving when editing email notification templates (https://github.com/ansible/awx/issues/6383)
|
||||
- Fixed a race condition that sometimes broke event/stdout processing for jobs launched in container groups (https://github.com/ansible/awx/issues/6280)
|
||||
|
||||
## 9.3.0 (Mar 12, 2020)
|
||||
- Added the ability to specify an OAuth2 token description in the AWX CLI (https://github.com/ansible/awx/issues/6122)
|
||||
- Added support for K8S service account annotations to the installer (https://github.com/ansible/awx/pull/6007)
|
||||
- Added support for K8S imagePullSecrets to the installer (https://github.com/ansible/awx/pull/5989)
|
||||
- Launching jobs (and workflows) using the --monitor flag in the AWX CLI now returns a non-zero exit code on job failure (https://github.com/ansible/awx/issues/5920)
|
||||
- Improved UI performance for various job views when many simultaneous users are logged into AWX (https://github.com/ansible/awx/issues/5883)
|
||||
- Updated to the latest version of Django to address a few open CVEs (https://github.com/ansible/awx/pull/6080)
|
||||
- Fixed a critical bug which can cause AWX to hang and stop launching playbooks after a periodic of time (https://github.com/ansible/awx/issues/5617)
|
||||
- Fixed a bug which caused delays in project update stdout for certain large SCM clones (as of Ansible 2.9+) (https://github.com/ansible/awx/pull/6254)
|
||||
- Fixed a bug which caused certain smart inventory filters to mistakenly return duplicate hosts (https://github.com/ansible/awx/pull/5972)
|
||||
- Fixed an unclear server error when creating smart inventories with the AWX collection (https://github.com/ansible/awx/issues/6250)
|
||||
- Fixed a bug that broke Grafana notification support (https://github.com/ansible/awx/issues/6137)
|
||||
- Fixed a UI bug which prevent users with read access to an organization from editing credentials for that organization (https://github.com/ansible/awx/pull/6241)
|
||||
- Fixed a bug which prevent workflow approval records from recording a `started` and `elapsed` date (https://github.com/ansible/awx/issues/6202)
|
||||
- Fixed a bug which caused workflow nodes to have a confusing option for `verbosity` (https://github.com/ansible/awx/issues/6196)
|
||||
- Fixed an RBAC bug which prevented projects and inventory schedules from being created by certain users in certain contexts (https://github.com/ansible/awx/issues/5717)
|
||||
- Fixed a bug that caused `role_path` in a project's config to not be respected due to an error processing `/etc/ansible/ansible.cfg` (https://github.com/ansible/awx/pull/6038)
|
||||
- Fixed a bug that broke inventory updates for installs with custom home directories for the awx user (https://github.com/ansible/awx/pull/6152)
|
||||
- Fixed a bug that broke fact data collection when AWX encounters invalid/unexpected fact data (https://github.com/ansible/awx/issues/5935)
|
||||
|
||||
|
||||
## 9.2.0 (Feb 12, 2020)
|
||||
- Added the ability to configure the convergence behavior of workflow nodes https://github.com/ansible/awx/issues/3054
|
||||
- AWX now allows for a configurable global limit for fork count (per-job run). The default maximum is 200. https://github.com/ansible/awx/pull/5604
|
||||
@@ -58,7 +109,7 @@ This is a list of high-level changes for each release of AWX. A full list of com
|
||||
- Fixed a bug in the CLI which incorrectly parsed launch time arguments for `awx job_templates launch` and `awx workflow_job_templates launch` (https://github.com/ansible/awx/issues/5093).
|
||||
- Fixed a bug that caused inventory updates using "sourced from a project" to stop working (https://github.com/ansible/awx/issues/4750).
|
||||
- Fixed a bug that caused Slack notifications to sometimes show the wrong bot avatar (https://github.com/ansible/awx/pull/5125).
|
||||
- Fixed a bug that prevented the use of digits in Tower's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
- Fixed a bug that prevented the use of digits in AWX's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
|
||||
## 8.0.0 (Oct 21, 2019)
|
||||
|
||||
|
||||
@@ -155,12 +155,11 @@ If you start a second terminal session, you can take a look at the running conta
|
||||
(host)$ docker ps
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
aa4a75d6d77b gcr.io/ansible-tower-engineering/awx_devel:devel "/tini -- /bin/sh ..." 23 seconds ago Up 15 seconds 0.0.0.0:5555->5555/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 22/tcp, 0.0.0.0:8080->8080/tcp tools_awx_1
|
||||
e4c0afeb548c postgres:10 "docker-entrypoint..." 26 seconds ago Up 23 seconds 5432/tcp tools_postgres_1
|
||||
0089699d5afd tools_logstash "/docker-entrypoin..." 26 seconds ago Up 25 seconds tools_logstash_1
|
||||
4d4ff0ced266 memcached:alpine "docker-entrypoint..." 26 seconds ago Up 25 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
92842acd64cd rabbitmq:3-management "docker-entrypoint..." 26 seconds ago Up 24 seconds 4369/tcp, 5671-5672/tcp, 15671/tcp, 25672/tcp, 0.0.0.0:15672->15672/tcp tools_rabbitmq_1
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
44251b476f98 gcr.io/ansible-tower-engineering/awx_devel:devel "/entrypoint.sh /bin…" 27 seconds ago Up 23 seconds 0.0.0.0:6899->6899/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 22/tcp, 0.0.0.0:8888->8888/tcp tools_awx_run_9e820694d57e
|
||||
b049a43817b4 memcached:alpine "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:6379->6379/tcp tools_redis_1
|
||||
b66a506d3007 postgres:10 "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:5432->5432/tcp tools_postgres_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
@@ -216,18 +215,23 @@ Using `docker exec`, this will create a session in the running *awx* container,
|
||||
If you want to start and use the development environment, you'll first need to bootstrap it by running the following command:
|
||||
|
||||
```bash
|
||||
(container)# /bootstrap_development.sh
|
||||
(container)# /usr/bin/bootstrap_development.sh
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes.
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes. Once it's done it
|
||||
will drop you back to the shell.
|
||||
|
||||
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
|
||||
In order to launch all developer services:
|
||||
|
||||
```bash
|
||||
(container)# cd /awx_devel
|
||||
(container)# make server
|
||||
(container)# /usr/bin/launch_awx.sh
|
||||
```
|
||||
|
||||
`launch_awx.sh` also calls `bootstrap_development.sh` so if all you are doing is launching the supervisor to start all services, you don't
|
||||
need to call `bootstrap_development.sh` first.
|
||||
|
||||
|
||||
|
||||
### Post Build Steps
|
||||
|
||||
Before you can log in and use the system, you will need to create an admin user. Optionally, you may also want to load some demo data.
|
||||
|
||||
51
INSTALL.md
51
INSTALL.md
@@ -41,6 +41,8 @@ This document provides a guide for installing AWX.
|
||||
+ [Run the installer](#run-the-installer-2)
|
||||
+ [Post-install](#post-install-2)
|
||||
+ [Accessing AWX](#accessing-awx-2)
|
||||
- [Installing the AWX CLI](#installing-the-awx-cli)
|
||||
* [Building the CLI Documentation](#building-the-cli-documentation)
|
||||
|
||||
|
||||
## Getting started
|
||||
@@ -128,7 +130,6 @@ For convenience, you can create a file called `vars.yml`:
|
||||
```
|
||||
admin_password: 'adminpass'
|
||||
pg_password: 'pgpass'
|
||||
rabbitmq_password: 'rabbitpass'
|
||||
secret_key: 'mysupersecret'
|
||||
```
|
||||
|
||||
@@ -476,7 +477,7 @@ Before starting the install process, review the [inventory](./installer/inventor
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key.
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key. This needs to be a .pem-file
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
@@ -506,10 +507,6 @@ If you wish to tag and push built images to a Docker registry, set the following
|
||||
|
||||
> Username of the user that will push images to the registry. Defaults to *developer*.
|
||||
|
||||
*docker_remove_local_images*
|
||||
|
||||
> Due to the way that the docker_image module behaves, images will not be pushed to a remote repository if they are present locally. Set this to delete local versions of the images that will be pushed to the remote. This will fail if containers are currently running from those images.
|
||||
|
||||
**Note**
|
||||
|
||||
> These settings are ignored if using official images
|
||||
@@ -559,16 +556,7 @@ $ ansible-playbook -i inventory -e docker_registry_password=password install.yml
|
||||
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, Docker will report up to 5 running containers. If you chose to use an existing PostgresSQL database, then it will report 4. You can view the running containers using the `docker ps` command, as follows:
|
||||
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 8052/tcp awx_task
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:443->8052/tcp awx_web
|
||||
55a552142bcd memcached:alpine "docker-entrypoint..." 2 minutes ago Up 2 minutes 11211/tcp memcached
|
||||
84011c072aad rabbitmq:3 "docker-entrypoint..." 2 minutes ago Up 2 minutes 4369/tcp, 5671-5672/tcp, 25672/tcp rabbitmq
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
```
|
||||
After the playbook run completes, Docker starts a series of containers that provide the services that make up AWX. You can view the running containers using the `docker ps` command.
|
||||
|
||||
If you're deploying using Docker Compose, container names will be prefixed by the name of the folder where the docker-compose.yml file is created (by default, `awx`).
|
||||
|
||||
@@ -634,3 +622,34 @@ Added instance awx to tower
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
|
||||
# Installing the AWX CLI
|
||||
|
||||
`awx` is the official command-line client for AWX. It:
|
||||
|
||||
* Uses naming and structure consistent with the AWX HTTP API
|
||||
* Provides consistent output formats with optional machine-parsable formats
|
||||
* To the extent possible, auto-detects API versions, available endpoints, and
|
||||
feature support across multiple versions of AWX.
|
||||
|
||||
Potential uses include:
|
||||
|
||||
* Configuring and launching jobs/playbooks
|
||||
* Checking on the status and output of job runs
|
||||
* Managing objects like organizations, users, teams, etc...
|
||||
|
||||
The preferred way to install the AWX CLI is through pip directly from GitHub:
|
||||
|
||||
pip install "https://github.com/ansible/awx/archive/$VERSION.tar.gz#egg=awxkit&subdirectory=awxkit"
|
||||
awx --help
|
||||
|
||||
...where ``$VERSION`` is the version of AWX you're running. To see a list of all available releases, visit: https://github.com/ansible/awx/releases
|
||||
|
||||
## Building the CLI Documentation
|
||||
|
||||
To build the docs, spin up a real AWX server, `pip install sphinx sphinxcontrib-autoprogram`, and run:
|
||||
|
||||
~ TOWER_HOST=https://awx.example.org TOWER_USERNAME=example TOWER_PASSWORD=secret make clean html
|
||||
~ cd build/html/ && python -m http.server
|
||||
Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) ..
|
||||
|
||||
80
Makefile
80
Makefile
@@ -18,7 +18,6 @@ COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
COMPOSE_HOST ?= $(shell hostname)
|
||||
|
||||
VENV_BASE ?= /venv
|
||||
COLLECTION_VENV ?= /awx_devel/awx_collection_test_venv
|
||||
SCL_PREFIX ?=
|
||||
CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
|
||||
|
||||
@@ -167,8 +166,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP) && \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) flit; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -266,28 +264,6 @@ migrate:
|
||||
dbchange:
|
||||
$(MANAGEMENT_COMMAND) makemigrations
|
||||
|
||||
server_noattach:
|
||||
tmux new-session -d -s awx 'exec make uwsgi'
|
||||
tmux rename-window 'AWX'
|
||||
tmux select-window -t awx:0
|
||||
tmux split-window -v 'exec make dispatcher'
|
||||
tmux new-window 'exec make daphne'
|
||||
tmux select-window -t awx:1
|
||||
tmux rename-window 'WebSockets'
|
||||
tmux split-window -h 'exec make runworker'
|
||||
tmux split-window -v 'exec make nginx'
|
||||
tmux new-window 'exec make receiver'
|
||||
tmux select-window -t awx:2
|
||||
tmux rename-window 'Extra Services'
|
||||
tmux select-window -t awx:0
|
||||
|
||||
server: server_noattach
|
||||
tmux -2 attach-session -t awx
|
||||
|
||||
# Use with iterm2's native tmux protocol support
|
||||
servercc: server_noattach
|
||||
tmux -2 -CC attach-session -t awx
|
||||
|
||||
supervisor:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -312,18 +288,11 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
runworker:
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py runworker --only-channels websocket.*
|
||||
|
||||
# Run the built-in development webserver (by default on http://localhost:8013).
|
||||
runserver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py runserver
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
# Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@@ -395,40 +364,43 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py2,py3
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
prepare_collection_venv:
|
||||
rm -rf $(COLLECTION_VENV)
|
||||
mkdir $(COLLECTION_VENV)
|
||||
$(VENV_BASE)/awx/bin/pip install --target=$(COLLECTION_VENV) git+https://github.com/ansible/tower-cli.git
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
|
||||
test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(COLLECTION_VENV):/awx_devel/awx_collection:$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
|
||||
test_collection_all: prepare_collection_venv test_collection flake8_collection
|
||||
test_collection_all: test_collection flake8_collection
|
||||
|
||||
test_collection_sanity:
|
||||
rm -rf sanity
|
||||
mkdir -p sanity/ansible_collections/awx
|
||||
cp -Ra awx_collection sanity/ansible_collections/awx/awx # symlinks do not work
|
||||
cd sanity/ansible_collections/awx/awx && git init && git add . # requires both this file structure and a git repo, so there you go
|
||||
cd sanity/ansible_collections/awx/awx && ansible-test sanity
|
||||
# WARNING: symlinking a collection is fundamentally unstable
|
||||
# this is for rapid development iteration with playbooks, do not use with other test targets
|
||||
symlink_collection:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE) # in case it does not exist
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
rm -rf ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-galaxy collection install awx_collection/awx-awx-$(VERSION).tar.gz
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
ansible-galaxy collection install awx_collection/$(COLLECTION_NAMESPACE)-$(COLLECTION_PACKAGE)-$(VERSION).tar.gz
|
||||
|
||||
test_collection_sanity: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -690,11 +662,12 @@ docker-compose-isolated-build: awx-devel-build
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
|
||||
MACHINE?=default
|
||||
docker-clean:
|
||||
eval $$(docker-machine env $(MACHINE))
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-docker images | grep "awx_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi
|
||||
docker images | grep "awx_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi
|
||||
|
||||
docker-clean-volumes:
|
||||
docker volume rm tools_awx_db
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -708,9 +681,6 @@ docker-compose-cluster-elk: docker-auth awx/projects
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
docker stop tools_logstash_1
|
||||
|
||||
@@ -5,10 +5,12 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
import urllib.parse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import connection
|
||||
from django.db.models.fields import FieldDoesNotExist
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -192,7 +194,7 @@ class APIView(views.APIView):
|
||||
response.data['detail'] += ' To establish a login session, visit /api/login/.'
|
||||
logger.info(status_msg)
|
||||
else:
|
||||
logger.warn(status_msg)
|
||||
logger.warning(status_msg)
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
@@ -548,6 +550,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -964,6 +975,11 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into memcached, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (
|
||||
@@ -971,7 +987,7 @@ class CopyAPIView(GenericAPIView):
|
||||
)
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__,
|
||||
obj.pk, new_obj.pk, request.user.pk, sub_objs,
|
||||
obj.pk, new_obj.pk, request.user.pk, key,
|
||||
permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from collections import OrderedDict
|
||||
from uuid import UUID
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import PermissionDenied
|
||||
@@ -20,6 +21,7 @@ from rest_framework.fields import JSONField as DRFJSONField
|
||||
from rest_framework.request import clone_request
|
||||
|
||||
# AWX
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import InventorySource, NotificationTemplate
|
||||
from awx.main.scheduler.kubernetes import PodManager
|
||||
@@ -59,7 +61,8 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'type': _('Data type for this {}.'),
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _('Data structure with name/description for related resources.'),
|
||||
'summary_fields': _('Data structure with name/description for related resources. '
|
||||
'The output for some objects may be limited for performance reasons.'),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
}
|
||||
@@ -84,6 +87,8 @@ class Metadata(metadata.SimpleMetadata):
|
||||
# FIXME: Still isn't showing all default values?
|
||||
try:
|
||||
default = field.get_default()
|
||||
if type(default) is UUID:
|
||||
default = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
|
||||
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
|
||||
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
|
||||
field_info['default'] = default
|
||||
@@ -96,7 +101,15 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info['children'] = self.get_serializer_info(field)
|
||||
|
||||
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
|
||||
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
|
||||
choices = [
|
||||
(choice_value, choice_name) for choice_value, choice_name in field.choices.items()
|
||||
]
|
||||
if not any(choice in ('', None) for choice, _ in choices):
|
||||
if field.allow_blank:
|
||||
choices = [("", "---------")] + choices
|
||||
if field.allow_null and not isinstance(field, ChoiceNullField):
|
||||
choices = [(None, "---------")] + choices
|
||||
field_info['choices'] = choices
|
||||
|
||||
# Indicate if a field is write-only.
|
||||
if getattr(field, 'write_only', False):
|
||||
|
||||
@@ -72,6 +72,7 @@ from awx.main.utils import (
|
||||
prefetch_page_capabilities, get_external_account, truncate_stdout,
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -347,6 +348,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
|
||||
def _generate_named_url(self, url_path, obj, node):
|
||||
url_units = url_path.split('/')
|
||||
reset_counters()
|
||||
named_url = node.generate_named_url(obj)
|
||||
url_units[4] = named_url
|
||||
return '/'.join(url_units)
|
||||
@@ -642,7 +644,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
_capabilities_prefetch = [
|
||||
'admin', 'execute',
|
||||
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
|
||||
'workflowjobtemplate.organization.workflow_admin']}
|
||||
'organization.workflow_admin']}
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -884,6 +886,9 @@ class UserSerializer(BaseSerializer):
|
||||
fields = ('*', '-name', '-description', '-modified',
|
||||
'username', 'first_name', 'last_name',
|
||||
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
|
||||
extra_kwargs = {
|
||||
'last_login': {'read_only': True}
|
||||
}
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
@@ -1246,6 +1251,7 @@ class OrganizationSerializer(BaseSerializer):
|
||||
res.update(dict(
|
||||
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
|
||||
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
|
||||
job_templates = self.reverse('api:organization_job_templates_list', kwargs={'pk': obj.pk}),
|
||||
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
|
||||
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
|
||||
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1274,6 +1280,14 @@ class OrganizationSerializer(BaseSerializer):
|
||||
'job_templates': 0, 'admins': 0, 'projects': 0}
|
||||
else:
|
||||
summary_dict['related_field_counts'] = counts_dict[obj.id]
|
||||
|
||||
# Organization participation roles (admin, member) can't be assigned
|
||||
# to a team. This provides a hint to the ui so it can know to not
|
||||
# display these roles for team role selection.
|
||||
for key in ('admin_role', 'member_role',):
|
||||
if key in summary_dict.get('object_roles', {}):
|
||||
summary_dict['object_roles'][key]['user_only'] = True
|
||||
|
||||
return summary_dict
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -1387,12 +1401,6 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
organization = None
|
||||
if 'organization' in attrs:
|
||||
organization = attrs['organization']
|
||||
elif self.instance:
|
||||
organization = self.instance.organization
|
||||
|
||||
if 'allow_override' in attrs and self.instance:
|
||||
# case where user is turning off this project setting
|
||||
if self.instance.allow_override and not attrs['allow_override']:
|
||||
@@ -1408,11 +1416,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
' '.join([str(pk) for pk in used_by])
|
||||
)})
|
||||
|
||||
view = self.context.get('view', None)
|
||||
if not organization and not view.request.user.is_superuser:
|
||||
# Only allow super users to create orgless projects
|
||||
raise serializers.ValidationError(_('Organization is missing'))
|
||||
elif get_field_from_model_or_attrs('scm_type') == '':
|
||||
if get_field_from_model_or_attrs('scm_type') == '':
|
||||
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
|
||||
if get_field_from_model_or_attrs(fd):
|
||||
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
|
||||
@@ -1600,7 +1604,7 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
})
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
raise models.base.ValidationError(str(e))
|
||||
return host_filter
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -2030,11 +2034,6 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_group(self, obj): # TODO: remove in 3.3
|
||||
if obj.deprecated_group:
|
||||
return obj.deprecated_group.id
|
||||
return None
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
|
||||
# SCM Project and inventory are read-only unless creating a new inventory.
|
||||
@@ -2115,7 +2114,13 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') != 'scm':
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if (('source' in attrs or 'source_project' in attrs) and
|
||||
get_field_from_model_or_attrs('source_project') is None):
|
||||
raise serializers.ValidationError(
|
||||
{"source_project": _("Project required for scm type sources.")}
|
||||
)
|
||||
else:
|
||||
redundant_scm_fields = list(filter(
|
||||
lambda x: attrs.get(x, None),
|
||||
['source_project', 'source_path', 'update_on_project_update']
|
||||
@@ -2732,7 +2737,8 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
fields = ('*', 'job_type', 'inventory', 'project', 'playbook', 'scm_branch',
|
||||
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
|
||||
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
|
||||
'use_fact_cache',)
|
||||
'use_fact_cache', 'organization',)
|
||||
read_only_fields = ('organization',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobOptionsSerializer, self).get_related(obj)
|
||||
@@ -2747,6 +2753,8 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
|
||||
except ObjectDoesNotExist:
|
||||
setattr(obj, 'project', None)
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
@@ -2893,6 +2901,10 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
)
|
||||
if obj.host_config_key:
|
||||
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if obj.webhook_credential_id:
|
||||
res['webhook_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.webhook_credential_id})
|
||||
return res
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -3198,7 +3210,7 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
field_kwargs['choices'] = module_name_choices
|
||||
field_kwargs['required'] = bool(not module_name_default)
|
||||
field_kwargs['default'] = module_name_default or serializers.empty
|
||||
field_kwargs['allow_blank'] = bool(module_name_default)
|
||||
field_kwargs['allow_blank'] = False
|
||||
field_kwargs.pop('max_length', None)
|
||||
return field_class, field_kwargs
|
||||
|
||||
@@ -3383,6 +3395,8 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
)
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.webhook_credential_id:
|
||||
res['webhook_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.webhook_credential_id})
|
||||
return res
|
||||
|
||||
def validate_extra_vars(self, value):
|
||||
@@ -3597,9 +3611,11 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
elif self.instance:
|
||||
ujt = self.instance.unified_job_template
|
||||
if ujt is None:
|
||||
if 'workflow_job_template' in attrs:
|
||||
return {'workflow_job_template': attrs['workflow_job_template']}
|
||||
return {}
|
||||
ret = {}
|
||||
for fd in ('workflow_job_template', 'identifier'):
|
||||
if fd in attrs:
|
||||
ret[fd] = attrs[fd]
|
||||
return ret
|
||||
|
||||
# build additional field survey_passwords to track redacted variables
|
||||
password_dict = {}
|
||||
@@ -3677,7 +3693,8 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobTemplateNode
|
||||
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',
|
||||
'identifier',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
|
||||
@@ -3716,8 +3733,8 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'all_parents_must_converge', 'do_not_run',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'all_parents_must_converge', 'do_not_run', 'identifier')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -4048,6 +4065,13 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
**attrs)
|
||||
self._ignored_fields = rejected
|
||||
|
||||
# Basic validation - cannot run a playbook without a playbook
|
||||
if not template.project:
|
||||
errors['project'] = _("A project is required to run a job.")
|
||||
elif template.project.status in ('error', 'failed'):
|
||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||
|
||||
# cannot run a playbook without an inventory
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
|
||||
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
|
||||
@@ -4512,6 +4536,8 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
try:
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
return value
|
||||
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# Test Logging Configuration
|
||||
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
from datetime import timedelta
|
||||
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
from django.conf.urls import url
|
||||
|
||||
from oauthlib import oauth2
|
||||
from oauth2_provider import views
|
||||
|
||||
from awx.main.models import RefreshToken
|
||||
from awx.api.views import (
|
||||
ApiOAuthAuthorizationRootView,
|
||||
)
|
||||
@@ -14,6 +18,21 @@ from awx.api.views import (
|
||||
class TokenView(views.TokenView):
|
||||
|
||||
def create_token_response(self, request):
|
||||
# Django OAuth2 Toolkit has a bug whereby refresh tokens are *never*
|
||||
# properly expired (ugh):
|
||||
#
|
||||
# https://github.com/jazzband/django-oauth-toolkit/issues/746
|
||||
#
|
||||
# This code detects and auto-expires them on refresh grant
|
||||
# requests.
|
||||
if request.POST.get('grant_type') == 'refresh_token' and 'refresh_token' in request.POST:
|
||||
refresh_token = RefreshToken.objects.filter(
|
||||
token=request.POST['refresh_token']
|
||||
).first()
|
||||
if refresh_token:
|
||||
expire_seconds = settings.OAUTH2_PROVIDER.get('REFRESH_TOKEN_EXPIRE_SECONDS', 0)
|
||||
if refresh_token.created + timedelta(seconds=expire_seconds) < now():
|
||||
return request.build_absolute_uri(), {}, 'The refresh token has expired.', '403'
|
||||
try:
|
||||
return super(TokenView, self).create_token_response(request)
|
||||
except oauth2.AccessDeniedError as e:
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
OrganizationAdminsList,
|
||||
OrganizationInventoriesList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationCredentialList,
|
||||
@@ -33,6 +34,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/admins/$', OrganizationAdminsList.as_view(), name='organization_admins_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/inventories/$', OrganizationInventoriesList.as_view(), name='organization_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/projects/$', OrganizationProjectsList.as_view(), name='organization_projects_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_templates/$', OrganizationJobTemplatesList.as_view(), name='organization_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_job_templates/$', OrganizationWorkflowJobTemplatesList.as_view(), name='organization_workflow_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/teams/$', OrganizationTeamsList.as_view(), name='organization_teams_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/credentials/$', OrganizationCredentialList.as_view(), name='organization_credential_list'),
|
||||
|
||||
@@ -34,7 +34,9 @@ from awx.api.views import (
|
||||
OAuth2ApplicationDetail,
|
||||
)
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.metrics import (
|
||||
MetricsView,
|
||||
)
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
|
||||
@@ -111,6 +111,7 @@ from awx.api.views.organization import ( # noqa
|
||||
OrganizationUsersList,
|
||||
OrganizationAdminsList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationActivityStreamList,
|
||||
@@ -1091,7 +1092,7 @@ class UserRolesList(SubListAttachDetachAPIView):
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
if role.content_type == credential_content_type:
|
||||
if role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -4303,7 +4304,7 @@ class NotificationTemplateTest(GenericAPIView):
|
||||
msg = "Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
if obj.notification_type in ('email', 'pagerduty'):
|
||||
body = "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)
|
||||
elif obj.notification_type == 'webhook':
|
||||
elif obj.notification_type in ('webhook', 'grafana'):
|
||||
body = '{{"body": "Ansible Tower Test Notification {} {}"}}'.format(obj.id, settings.TOWER_URL_BASE)
|
||||
else:
|
||||
body = {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}
|
||||
@@ -4414,7 +4415,7 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
if role.content_type == credential_content_type:
|
||||
if role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
@@ -39,3 +39,4 @@ class MetricsView(APIView):
|
||||
if (request.user.is_superuser or request.user.is_system_auditor):
|
||||
return Response(metrics().decode('UTF-8'))
|
||||
raise PermissionDenied()
|
||||
|
||||
|
||||
@@ -4,10 +4,7 @@
|
||||
import dateutil
|
||||
import logging
|
||||
|
||||
from django.db.models import (
|
||||
Count,
|
||||
F,
|
||||
)
|
||||
from django.db.models import Count
|
||||
from django.db import transaction
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
@@ -175,28 +172,18 @@ class OrganizationCountsMixin(object):
|
||||
|
||||
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
project_qs = Project.accessible_objects(self.request.user, 'read_role')
|
||||
jt_qs = JobTemplate.accessible_objects(self.request.user, 'read_role')
|
||||
|
||||
# Produce counts of Foreign Key relationships
|
||||
db_results['inventories'] = inv_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
db_results['inventories'] = inv_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['teams'] = Team.accessible_objects(
|
||||
self.request.user, 'read_role').values('organization').annotate(
|
||||
Count('organization')).order_by('organization')
|
||||
|
||||
JT_project_reference = 'project__organization'
|
||||
JT_inventory_reference = 'inventory__organization'
|
||||
db_results['job_templates_project'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').exclude(
|
||||
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
|
||||
Count(JT_project_reference)).order_by(JT_project_reference)
|
||||
db_results['job_templates'] = jt_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
|
||||
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
|
||||
|
||||
db_results['projects'] = project_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
db_results['projects'] = project_qs.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
# Other members and admins of organization are always viewable
|
||||
db_results['users'] = org_qs.annotate(
|
||||
@@ -212,11 +199,7 @@ class OrganizationCountsMixin(object):
|
||||
'admins': 0, 'projects': 0}
|
||||
|
||||
for res, count_qs in db_results.items():
|
||||
if res == 'job_templates_project':
|
||||
org_reference = JT_project_reference
|
||||
elif res == 'job_templates_inventory':
|
||||
org_reference = JT_inventory_reference
|
||||
elif res == 'users':
|
||||
if res == 'users':
|
||||
org_reference = 'id'
|
||||
else:
|
||||
org_reference = 'organization'
|
||||
@@ -229,14 +212,6 @@ class OrganizationCountsMixin(object):
|
||||
continue
|
||||
count_context[org_id][res] = entry['%s__count' % org_reference]
|
||||
|
||||
# Combine the counts for job templates by project and inventory
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id]['job_templates'] = 0
|
||||
for related_path in ['job_templates_project', 'job_templates_inventory']:
|
||||
if related_path in count_context[org_id]:
|
||||
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
|
||||
|
||||
full_context['related_field_counts'] = count_context
|
||||
|
||||
return full_context
|
||||
|
||||
@@ -20,7 +20,7 @@ from awx.main.models import (
|
||||
Role,
|
||||
User,
|
||||
Team,
|
||||
InstanceGroup,
|
||||
InstanceGroup
|
||||
)
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
@@ -28,6 +28,7 @@ from awx.api.generics import (
|
||||
SubListAPIView,
|
||||
SubListCreateAttachDetachAPIView,
|
||||
SubListAttachDetachAPIView,
|
||||
SubListCreateAPIView,
|
||||
ResourceAccessList,
|
||||
BaseUsersList,
|
||||
)
|
||||
@@ -35,14 +36,13 @@ from awx.api.generics import (
|
||||
from awx.api.serializers import (
|
||||
OrganizationSerializer,
|
||||
InventorySerializer,
|
||||
ProjectSerializer,
|
||||
UserSerializer,
|
||||
TeamSerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
NotificationTemplateSerializer,
|
||||
WorkflowJobTemplateSerializer,
|
||||
InstanceGroupSerializer,
|
||||
ProjectSerializer, JobTemplateSerializer, WorkflowJobTemplateSerializer
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
@@ -94,7 +94,7 @@ class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPI
|
||||
org_counts['projects'] = Project.accessible_objects(**access_kwargs).filter(
|
||||
organization__id=org_id).count()
|
||||
org_counts['job_templates'] = JobTemplate.accessible_objects(**access_kwargs).filter(
|
||||
project__organization__id=org_id).count()
|
||||
organization__id=org_id).count()
|
||||
|
||||
full_context['related_field_counts'] = {}
|
||||
full_context['related_field_counts'][org_id] = org_counts
|
||||
@@ -128,21 +128,27 @@ class OrganizationAdminsList(BaseUsersList):
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
|
||||
class OrganizationProjectsList(SubListCreateAPIView):
|
||||
|
||||
model = Project
|
||||
serializer_class = ProjectSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'projects'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView):
|
||||
class OrganizationJobTemplatesList(SubListCreateAPIView):
|
||||
|
||||
model = JobTemplate
|
||||
serializer_class = JobTemplateSerializer
|
||||
parent_model = Organization
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'workflows'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
|
||||
10
awx/asgi.py
10
awx/asgi.py
@@ -2,14 +2,15 @@
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
import django
|
||||
from awx import __version__ as tower_version
|
||||
|
||||
# Prepare the AWX environment.
|
||||
from awx import prepare_env, MODE
|
||||
prepare_env() # NOQA
|
||||
|
||||
from django.core.wsgi import get_wsgi_application # NOQA
|
||||
from channels.asgi import get_channel_layer
|
||||
from channels.routing import get_default_application
|
||||
|
||||
|
||||
"""
|
||||
ASGI config for AWX project.
|
||||
@@ -32,6 +33,5 @@ if MODE == 'production':
|
||||
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")
|
||||
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
django.setup()
|
||||
channel_layer = get_default_application()
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.parse
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
@@ -57,15 +55,6 @@ SETTING_CACHE_DEFAULTS = True
|
||||
__all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
|
||||
|
||||
|
||||
def normalize_broker_url(value):
|
||||
parts = value.rsplit('@', 1)
|
||||
match = re.search('(amqp://[^:]+:)(.*)', parts[0])
|
||||
if match:
|
||||
prefix, password = match.group(1), match.group(2)
|
||||
parts[0] = prefix + urllib.parse.quote(password)
|
||||
return '@'.join(parts)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _ctit_db_wrapper(trans_safe=False):
|
||||
'''
|
||||
@@ -415,16 +404,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
# unescaped : and @ characters that confused urlparse, e.g.,
|
||||
# amqp://guest:a@ns:ibl3#@localhost:5672//
|
||||
#
|
||||
# detect these scenarios, and automatically escape the user's
|
||||
# password so it just works
|
||||
if name == 'BROKER_URL':
|
||||
value = normalize_broker_url(value)
|
||||
return value
|
||||
return self._get_default(name)
|
||||
|
||||
def _set_local(self, name, value):
|
||||
field = self.registry.get_setting_field(name)
|
||||
|
||||
@@ -325,17 +325,3 @@ def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 23
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_logging_test(api_request):
|
||||
with mock.patch('awx.conf.views.AWXProxyHandler.perform_test') as mock_func:
|
||||
api_request(
|
||||
'post',
|
||||
reverse('api:setting_logging_test'),
|
||||
data={'LOG_AGGREGATOR_HOST': 'http://foobar', 'LOG_AGGREGATOR_TYPE': 'logstash'}
|
||||
)
|
||||
call = mock_func.call_args_list[0]
|
||||
args, kwargs = call
|
||||
given_settings = kwargs['custom_settings']
|
||||
assert given_settings.LOG_AGGREGATOR_HOST == 'http://foobar'
|
||||
assert given_settings.LOG_AGGREGATOR_TYPE == 'logstash'
|
||||
|
||||
@@ -3,7 +3,11 @@
|
||||
|
||||
# Python
|
||||
import collections
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
import socket
|
||||
from socket import SHUT_RDWR
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -11,7 +15,7 @@ from django.http import Http404
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import PermissionDenied, ValidationError
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
@@ -26,7 +30,6 @@ from awx.api.generics import (
|
||||
from awx.api.permissions import IsSuperUser
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.main.tasks import handle_setting_changes
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
@@ -161,40 +164,47 @@ class SettingLoggingTest(GenericAPIView):
|
||||
filter_backends = []
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
defaults = dict()
|
||||
for key in settings_registry.get_registered_settings(category_slug='logging'):
|
||||
try:
|
||||
defaults[key] = settings_registry.get_setting_field(key).get_default()
|
||||
except serializers.SkipField:
|
||||
defaults[key] = None
|
||||
obj = type('Settings', (object,), defaults)()
|
||||
serializer = self.get_serializer(obj, data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
# Special validation specific to logging test.
|
||||
errors = {}
|
||||
for key in ['LOG_AGGREGATOR_TYPE', 'LOG_AGGREGATOR_HOST']:
|
||||
if not request.data.get(key, ''):
|
||||
errors[key] = 'This field is required.'
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
if request.data.get('LOG_AGGREGATOR_PASSWORD', '').startswith('$encrypted$'):
|
||||
serializer.validated_data['LOG_AGGREGATOR_PASSWORD'] = getattr(
|
||||
settings, 'LOG_AGGREGATOR_PASSWORD', ''
|
||||
)
|
||||
# Error if logging is not enabled
|
||||
enabled = getattr(settings, 'LOG_AGGREGATOR_ENABLED', False)
|
||||
if not enabled:
|
||||
return Response({'error': 'Logging not enabled'}, status=status.HTTP_409_CONFLICT)
|
||||
|
||||
# Send test message to configured logger based on db settings
|
||||
logging.getLogger('awx').error('AWX Connection Test Message')
|
||||
|
||||
hostname = getattr(settings, 'LOG_AGGREGATOR_HOST', None)
|
||||
protocol = getattr(settings, 'LOG_AGGREGATOR_PROTOCOL', None)
|
||||
|
||||
try:
|
||||
class MockSettings:
|
||||
pass
|
||||
mock_settings = MockSettings()
|
||||
for k, v in serializer.validated_data.items():
|
||||
setattr(mock_settings, k, v)
|
||||
AWXProxyHandler().perform_test(custom_settings=mock_settings)
|
||||
if mock_settings.LOG_AGGREGATOR_PROTOCOL.upper() == 'UDP':
|
||||
return Response(status=status.HTTP_201_CREATED)
|
||||
except LoggingConnectivityException as e:
|
||||
return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
return Response(status=status.HTTP_200_OK)
|
||||
subprocess.check_output(
|
||||
['rsyslogd', '-N1', '-f', '/var/lib/awx/rsyslog/rsyslog.conf'],
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
return Response({'error': exc.output}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Check to ensure port is open at host
|
||||
if protocol in ['udp', 'tcp']:
|
||||
port = getattr(settings, 'LOG_AGGREGATOR_PORT', None)
|
||||
# Error if port is not set when using UDP/TCP
|
||||
if not port:
|
||||
return Response({'error': 'Port required for ' + protocol}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if http/https by this point, domain is reacheable
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
|
||||
if protocol == 'udp':
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
else:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.settimeout(.5)
|
||||
s.connect((hostname, int(port)))
|
||||
s.shutdown(SHUT_RDWR)
|
||||
s.close()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
except Exception as e:
|
||||
return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
# Create view functions for all of the class-based views to simplify inclusion
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
6083
awx/locale/zh/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,6 @@ from functools import reduce
|
||||
from django.conf import settings
|
||||
from django.db.models import Q, Prefetch
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
@@ -405,14 +404,6 @@ class BaseAccess(object):
|
||||
# Cannot copy manual project without errors
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, Group): # TODO: remove in 3.3
|
||||
try:
|
||||
if obj.deprecated_inventory_source and not obj.deprecated_inventory_source._can_update():
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, (Project)):
|
||||
if obj.scm_type == '':
|
||||
user_capabilities[display_method] = False
|
||||
@@ -650,8 +641,8 @@ class UserAccess(BaseAccess):
|
||||
# in these cases only superusers can modify orphan users
|
||||
return False
|
||||
return not obj.roles.all().exclude(
|
||||
content_type=ContentType.objects.get_for_model(User)
|
||||
).filter(ancestors__in=self.user.roles.all()).exists()
|
||||
ancestors__in=self.user.roles.all()
|
||||
).exists()
|
||||
else:
|
||||
return self.is_all_org_admin(obj)
|
||||
|
||||
@@ -789,7 +780,6 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
self.check_license(check_expiration=False)
|
||||
is_change_possible = self.can_change(obj, None)
|
||||
if not is_change_possible:
|
||||
return False
|
||||
@@ -1411,7 +1401,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobTemplate
|
||||
select_related = ('created_by', 'modified_by', 'inventory', 'project',
|
||||
select_related = ('created_by', 'modified_by', 'inventory', 'project', 'organization',
|
||||
'next_schedule',)
|
||||
prefetch_related = (
|
||||
'instance_groups',
|
||||
@@ -1435,16 +1425,11 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
Users who are able to create deploy jobs can also run normal and check (dry run) jobs.
|
||||
'''
|
||||
if not data: # So the browseable API will work
|
||||
return (
|
||||
Project.accessible_objects(self.user, 'use_role').exists() or
|
||||
Inventory.accessible_objects(self.user, 'use_role').exists())
|
||||
return Project.accessible_objects(self.user, 'use_role').exists()
|
||||
|
||||
# if reference_obj is provided, determine if it can be copied
|
||||
reference_obj = data.get('reference_obj', None)
|
||||
|
||||
if 'survey_enabled' in data and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
@@ -1504,22 +1489,23 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
return self.user in obj.execute_role
|
||||
|
||||
def can_change(self, obj, data):
|
||||
data_for_change = data
|
||||
if self.user not in obj.admin_role and not self.user.is_superuser:
|
||||
return False
|
||||
if data is not None:
|
||||
data = dict(data)
|
||||
if data is None:
|
||||
return True
|
||||
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
if 'survey_enabled' in data and obj.survey_enabled != data['survey_enabled'] and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
return True
|
||||
data = dict(data)
|
||||
|
||||
for required_field in ('inventory', 'project'):
|
||||
required_obj = getattr(obj, required_field, None)
|
||||
if required_field not in data_for_change and required_obj is not None:
|
||||
data_for_change[required_field] = required_obj.pk
|
||||
return self.can_read(obj) and (self.can_add(data_for_change) if data is not None else True)
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
return True
|
||||
|
||||
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
||||
is_mandatory = True
|
||||
if not getattr(obj, '{}_id'.format(required_field)):
|
||||
is_mandatory = False
|
||||
if not self.check_related(required_field, cls, data, obj=obj, role_field='use_role', mandatory=is_mandatory):
|
||||
return False
|
||||
return True
|
||||
|
||||
def changes_are_non_sensitive(self, obj, data):
|
||||
'''
|
||||
@@ -1554,9 +1540,9 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == "instance_groups":
|
||||
if not obj.project.organization:
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.project.organization.admin_role
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return self.user in obj.admin_role and self.user in sub_obj.use_role
|
||||
return super(JobTemplateAccess, self).can_attach(
|
||||
@@ -1587,6 +1573,7 @@ class JobAccess(BaseAccess):
|
||||
select_related = ('created_by', 'modified_by', 'job_template', 'inventory',
|
||||
'project', 'project_update',)
|
||||
prefetch_related = (
|
||||
'organization',
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
'credentials__credential_type',
|
||||
@@ -1607,42 +1594,19 @@ class JobAccess(BaseAccess):
|
||||
|
||||
return qs.filter(
|
||||
Q(job_template__in=JobTemplate.accessible_objects(self.user, 'read_role')) |
|
||||
Q(inventory__organization__in=org_access_qs) |
|
||||
Q(project__organization__in=org_access_qs)).distinct()
|
||||
|
||||
def related_orgs(self, obj):
|
||||
orgs = []
|
||||
if obj.inventory and obj.inventory.organization:
|
||||
orgs.append(obj.inventory.organization)
|
||||
if obj.project and obj.project.organization and obj.project.organization not in orgs:
|
||||
orgs.append(obj.project.organization)
|
||||
return orgs
|
||||
|
||||
def org_access(self, obj, role_types=['admin_role']):
|
||||
orgs = self.related_orgs(obj)
|
||||
for org in orgs:
|
||||
for role_type in role_types:
|
||||
role = getattr(org, role_type)
|
||||
if self.user in role:
|
||||
return True
|
||||
return False
|
||||
Q(organization__in=org_access_qs)).distinct()
|
||||
|
||||
def can_add(self, data, validate_license=True):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
if not data: # So the browseable API will work
|
||||
return True
|
||||
return self.user.is_superuser
|
||||
raise NotImplementedError('Direct job creation not possible in v2 API')
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return (obj.status == 'new' and
|
||||
self.can_read(obj) and
|
||||
self.can_add(data, validate_license=False))
|
||||
raise NotImplementedError('Direct job editing not supported in v2 API')
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return self.org_access(obj)
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user in obj.organization.admin_role
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
if validate_license:
|
||||
@@ -1662,6 +1626,7 @@ class JobAccess(BaseAccess):
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
|
||||
# Standard permissions model
|
||||
if obj.job_template and (self.user not in obj.job_template.execute_role):
|
||||
return False
|
||||
|
||||
@@ -1676,24 +1641,17 @@ class JobAccess(BaseAccess):
|
||||
if JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
return True
|
||||
|
||||
org_access = bool(obj.inventory) and self.user in obj.inventory.organization.inventory_admin_role
|
||||
project_access = obj.project is None or self.user in obj.project.admin_role
|
||||
credential_access = all([self.user in cred.use_role for cred in obj.credentials.all()])
|
||||
# Standard permissions model without job template involved
|
||||
if obj.organization and self.user in obj.organization.execute_role:
|
||||
return True
|
||||
elif not (obj.job_template or obj.organization):
|
||||
raise PermissionDenied(_('Job has been orphaned from its job template and organization.'))
|
||||
elif obj.job_template and config is not None:
|
||||
raise PermissionDenied(_('Job was launched with prompted fields you do not have access to.'))
|
||||
elif obj.job_template and config is None:
|
||||
raise PermissionDenied(_('Job was launched with unknown prompted fields. Organization admin permissions required.'))
|
||||
|
||||
# job can be relaunched if user could make an equivalent JT
|
||||
ret = org_access and credential_access and project_access
|
||||
if not ret and self.save_messages and not self.messages:
|
||||
if not obj.job_template:
|
||||
pretext = _('Job has been orphaned from its job template.')
|
||||
elif config is None:
|
||||
pretext = _('Job was launched with unknown prompted fields.')
|
||||
else:
|
||||
pretext = _('Job was launched with prompted fields.')
|
||||
if credential_access:
|
||||
self.messages['detail'] = '{} {}'.format(pretext, _(' Organization level permissions required.'))
|
||||
else:
|
||||
self.messages['detail'] = '{} {}'.format(pretext, _(' You do not have permission to related resources.'))
|
||||
return ret
|
||||
return False
|
||||
|
||||
def get_method_capability(self, method, obj, parent_obj):
|
||||
if method == 'start':
|
||||
@@ -1706,10 +1664,16 @@ class JobAccess(BaseAccess):
|
||||
def can_cancel(self, obj):
|
||||
if not obj.can_cancel:
|
||||
return False
|
||||
# Delete access allows org admins to stop running jobs
|
||||
if self.user == obj.created_by or self.can_delete(obj):
|
||||
# Users may always cancel their own jobs
|
||||
if self.user == obj.created_by:
|
||||
return True
|
||||
return obj.job_template is not None and self.user in obj.job_template.admin_role
|
||||
# Users with direct admin to JT may cancel jobs started by anyone
|
||||
if obj.job_template and self.user in obj.job_template.admin_role:
|
||||
return True
|
||||
# If orphaned, allow org JT admins to stop running jobs
|
||||
if not obj.job_template and obj.organization and self.user in obj.organization.job_template_admin_role:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobTemplateAccess(BaseAccess):
|
||||
@@ -1944,11 +1908,11 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
# TODO: notification attachments?
|
||||
class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can only see/manage Workflow Job Templates if I'm a super user
|
||||
I can see/manage Workflow Job Templates based on object roles
|
||||
'''
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
select_related = ('created_by', 'modified_by', 'next_schedule',
|
||||
select_related = ('created_by', 'modified_by', 'organization', 'next_schedule',
|
||||
'admin_role', 'execute_role', 'read_role',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
@@ -1966,10 +1930,6 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
# will check this if surveys are added to WFJT
|
||||
if 'survey_enabled' in data and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
@@ -2038,7 +1998,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
I can also cancel it if I started it
|
||||
'''
|
||||
model = WorkflowJob
|
||||
select_related = ('created_by', 'modified_by',)
|
||||
select_related = ('created_by', 'modified_by', 'organization',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
@@ -2332,6 +2292,7 @@ class UnifiedJobTemplateAccess(BaseAccess):
|
||||
prefetch_related = (
|
||||
'last_job',
|
||||
'current_job',
|
||||
'organization',
|
||||
'credentials__credential_type',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
|
||||
)
|
||||
@@ -2371,6 +2332,7 @@ class UnifiedJobAccess(BaseAccess):
|
||||
prefetch_related = (
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'organization',
|
||||
'unified_job_node__workflow_job',
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
@@ -2401,8 +2363,7 @@ class UnifiedJobAccess(BaseAccess):
|
||||
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) |
|
||||
Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs) |
|
||||
Q(adhoccommand__inventory__id__in=inv_pk_qs) |
|
||||
Q(job__inventory__organization__in=org_auditor_qs) |
|
||||
Q(job__project__organization__in=org_auditor_qs)
|
||||
Q(organization__in=org_auditor_qs)
|
||||
)
|
||||
return qs
|
||||
|
||||
@@ -2429,6 +2390,9 @@ class ScheduleAccess(BaseAccess):
|
||||
def can_add(self, data):
|
||||
if not JobLaunchConfigAccess(self.user).can_add(data):
|
||||
return False
|
||||
if not data:
|
||||
return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists()
|
||||
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
|
||||
|
||||
@check_superuser
|
||||
|
||||
170
awx/main/analytics/broadcast_websocket.py
Normal file
170
awx/main/analytics/broadcast_websocket.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import datetime
|
||||
import asyncio
|
||||
import logging
|
||||
import aioredis
|
||||
import redis
|
||||
import re
|
||||
|
||||
from prometheus_client import (
|
||||
generate_latest,
|
||||
Gauge,
|
||||
Counter,
|
||||
Enum,
|
||||
CollectorRegistry,
|
||||
parser,
|
||||
)
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
BROADCAST_WEBSOCKET_REDIS_KEY_NAME = 'broadcast_websocket_stats'
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics.broadcast_websocket')
|
||||
|
||||
|
||||
def dt_to_seconds(dt):
|
||||
return int((dt - datetime.datetime(1970,1,1)).total_seconds())
|
||||
|
||||
|
||||
def now_seconds():
|
||||
return dt_to_seconds(datetime.datetime.now())
|
||||
|
||||
|
||||
def safe_name(s):
|
||||
# Replace all non alpha-numeric characters with _
|
||||
return re.sub('[^0-9a-zA-Z]+', '_', s)
|
||||
|
||||
|
||||
# Second granularity; Per-minute
|
||||
class FixedSlidingWindow():
|
||||
def __init__(self, start_time=None):
|
||||
self.buckets = dict()
|
||||
self.start_time = start_time or now_seconds()
|
||||
|
||||
def cleanup(self, now_bucket=None):
|
||||
now_bucket = now_bucket or now_seconds()
|
||||
if self.start_time + 60 <= now_bucket:
|
||||
self.start_time = now_bucket + 60 + 1
|
||||
|
||||
# Delete old entries
|
||||
for k in list(self.buckets.keys()):
|
||||
if k < self.start_time:
|
||||
del self.buckets[k]
|
||||
|
||||
def record(self, ts=None):
|
||||
ts = ts or datetime.datetime.now()
|
||||
now_bucket = int((ts - datetime.datetime(1970,1,1)).total_seconds())
|
||||
|
||||
val = self.buckets.get(now_bucket, 0)
|
||||
self.buckets[now_bucket] = val + 1
|
||||
|
||||
self.cleanup(now_bucket)
|
||||
|
||||
def render(self):
|
||||
self.cleanup()
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager():
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
self._event_loop = event_loop
|
||||
self._stats = dict()
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname,
|
||||
remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
del self._stats[remote_hostname]
|
||||
|
||||
async def run_loop(self):
|
||||
try:
|
||||
redis_conn = await aioredis.create_redis_pool(settings.BROKER_URL)
|
||||
while True:
|
||||
stats_data_str = ''.join(stat.serialize() for stat in self._stats.values())
|
||||
await redis_conn.set(self._redis_key, stats_data_str)
|
||||
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
except Exception as e:
|
||||
logger.warn(e)
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS)
|
||||
self.start()
|
||||
|
||||
def start(self):
|
||||
self.async_task = self._event_loop.create_task(self.run_loop())
|
||||
return self.async_task
|
||||
|
||||
@classmethod
|
||||
def get_stats_sync(cls):
|
||||
'''
|
||||
Stringified verion of all the stats
|
||||
'''
|
||||
redis_conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
stats_str = redis_conn.get(BROADCAST_WEBSOCKET_REDIS_KEY_NAME) or b''
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats():
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
self._registry = CollectorRegistry()
|
||||
|
||||
# TODO: More robust replacement
|
||||
self.name = safe_name(self._local_hostname)
|
||||
self.remote_name = safe_name(self._remote_hostname)
|
||||
|
||||
self._messages_received_total = Counter(f'awx_{self.remote_name}_messages_received_total',
|
||||
'Number of messages received, to be forwarded, by the broadcast websocket system',
|
||||
registry=self._registry)
|
||||
self._messages_received = Gauge(f'awx_{self.remote_name}_messages_received',
|
||||
'Number forwarded messages received by the broadcast websocket system, for the duration of the current connection',
|
||||
registry=self._registry)
|
||||
self._connection = Enum(f'awx_{self.remote_name}_connection',
|
||||
'Websocket broadcast connection',
|
||||
states=['disconnected', 'connected'],
|
||||
registry=self._registry)
|
||||
self._connection.state('disconnected')
|
||||
self._connection_start = Gauge(f'awx_{self.remote_name}_connection_start',
|
||||
'Time the connection was established',
|
||||
registry=self._registry)
|
||||
|
||||
self._messages_received_per_minute = Gauge(f'awx_{self.remote_name}_messages_received_per_minute',
|
||||
'Messages received per minute',
|
||||
registry=self._registry)
|
||||
self._internal_messages_received_per_minute = FixedSlidingWindow()
|
||||
|
||||
def unregister(self):
|
||||
self._registry.unregister(f'awx_{self.remote_name}_messages_received')
|
||||
self._registry.unregister(f'awx_{self.remote_name}_connection')
|
||||
|
||||
def record_message_received(self):
|
||||
self._internal_messages_received_per_minute.record()
|
||||
self._messages_received.inc()
|
||||
self._messages_received_total.inc()
|
||||
|
||||
def record_connection_established(self):
|
||||
self._connection.state('connected')
|
||||
self._connection_start.set_to_current_time()
|
||||
self._messages_received.set(0)
|
||||
|
||||
def record_connection_lost(self):
|
||||
self._connection.state('disconnected')
|
||||
|
||||
def get_connection_duration(self):
|
||||
return (datetime.datetime.now() - self._connection_established_ts).total_seconds()
|
||||
|
||||
def render(self):
|
||||
msgs_per_min = self._internal_messages_received_per_minute.render()
|
||||
self._messages_received_per_minute.set(msgs_per_min)
|
||||
|
||||
def serialize(self):
|
||||
self.render()
|
||||
|
||||
registry_data = generate_latest(self._registry).decode('UTF-8')
|
||||
return registry_data
|
||||
@@ -31,7 +31,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
'''
|
||||
|
||||
|
||||
@register('config', '1.0')
|
||||
@register('config', '1.1')
|
||||
def config(since):
|
||||
license_info = get_license(show_key=False)
|
||||
install_type = 'traditional'
|
||||
@@ -53,6 +53,7 @@ def config(since):
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
@@ -121,22 +122,27 @@ def cred_type_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('inventory_counts', '1.0')
|
||||
@register('inventory_counts', '1.2')
|
||||
def inventory_counts(since):
|
||||
counts = {}
|
||||
for inv in models.Inventory.objects.filter(kind='').annotate(num_sources=Count('inventory_sources', distinct=True),
|
||||
num_hosts=Count('hosts', distinct=True)).only('id', 'name', 'kind'):
|
||||
source_list = []
|
||||
for source in inv.inventory_sources.filter().annotate(num_hosts=Count('hosts', distinct=True)).values('name','source', 'num_hosts'):
|
||||
source_list.append(source)
|
||||
counts[inv.id] = {'name': inv.name,
|
||||
'kind': inv.kind,
|
||||
'hosts': inv.num_hosts,
|
||||
'sources': inv.num_sources
|
||||
'sources': inv.num_sources,
|
||||
'source_list': source_list
|
||||
}
|
||||
|
||||
for smart_inv in models.Inventory.objects.filter(kind='smart'):
|
||||
counts[smart_inv.id] = {'name': smart_inv.name,
|
||||
'kind': smart_inv.kind,
|
||||
'num_hosts': smart_inv.hosts.count(),
|
||||
'num_sources': smart_inv.inventory_sources.count()
|
||||
'hosts': smart_inv.hosts.count(),
|
||||
'sources': 0,
|
||||
'source_list': []
|
||||
}
|
||||
return counts
|
||||
|
||||
@@ -221,7 +227,7 @@ def query_info(since, collection_type):
|
||||
|
||||
|
||||
# Copies Job Events from db to a .csv to be shipped
|
||||
@table_version('events_table.csv', '1.0')
|
||||
@table_version('events_table.csv', '1.1')
|
||||
@table_version('unified_jobs_table.csv', '1.0')
|
||||
@table_version('unified_job_template_table.csv', '1.0')
|
||||
def copy_tables(since, full_path):
|
||||
@@ -248,6 +254,11 @@ def copy_tables(since, full_path):
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name
|
||||
, CAST(main_jobevent.event_data::json->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST(main_jobevent.event_data::json->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
main_jobevent.event_data::json->'duration' AS duration,
|
||||
main_jobevent.event_data::json->'res'->'warnings' AS warnings,
|
||||
main_jobevent.event_data::json->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
WHERE main_jobevent.created > {}
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
@@ -256,7 +267,7 @@ def copy_tables(since, full_path):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_project.organization_id,
|
||||
main_unifiedjob.organization_id,
|
||||
main_organization.name as organization_name,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
@@ -274,12 +285,10 @@ def copy_tables(since, full_path):
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id
|
||||
FROM main_unifiedjob
|
||||
JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
JOIN main_project ON main_project.unifiedjobtemplate_ptr_id = main_job.project_id
|
||||
JOIN main_organization ON main_organization.id = main_project.organization_id
|
||||
WHERE main_unifiedjob.created > {}
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
LEFT JOIN main_organization ON main_organization.id = main_unifiedjob.organization_id
|
||||
WHERE (main_unifiedjob.created > {0} OR main_unifiedjob.finished > {0})
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
|
||||
|
||||
@@ -134,13 +134,17 @@ def gather(dest=None, module=None, collection_type='scheduled'):
|
||||
settings.SYSTEM_UUID,
|
||||
run_now.strftime('%Y-%m-%d-%H%M%S%z')
|
||||
])
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
shutil.rmtree(dest)
|
||||
return tgz
|
||||
try:
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
return tgz
|
||||
except Exception:
|
||||
logger.exception("Failed to write analytics archive file")
|
||||
finally:
|
||||
shutil.rmtree(dest)
|
||||
|
||||
|
||||
def ship(path):
|
||||
|
||||
@@ -667,7 +667,7 @@ register(
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('Logging Aggregator Username'),
|
||||
help_text=_('Username for external log aggregator (if required).'),
|
||||
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
@@ -679,7 +679,7 @@ register(
|
||||
default='',
|
||||
encrypted=True,
|
||||
label=_('Logging Aggregator Password/Token'),
|
||||
help_text=_('Password or authentication token for external log aggregator (if required).'),
|
||||
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
|
||||
@@ -38,7 +38,7 @@ ENV_BLACKLIST = frozenset((
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
'AWX_HOST', 'PROJECT_REVISION', 'SUPERVISOR_WEB_CONFIG_PATH'
|
||||
))
|
||||
|
||||
# loggers that may be called in process of emitting a log
|
||||
|
||||
@@ -1,97 +1,241 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
|
||||
from channels import Group
|
||||
from channels.auth import channel_session_user_from_http, channel_session_user
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
from django.http.cookie import parse_cookie
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import force_bytes
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from channels.generic.websocket import AsyncJsonWebsocketConsumer
|
||||
from channels.layers import get_channel_layer
|
||||
from channels.db import database_sync_to_async
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.consumers')
|
||||
XRF_KEY = '_auth_user_xrf'
|
||||
|
||||
|
||||
def discard_groups(message):
|
||||
if 'groups' in message.channel_session:
|
||||
for group in message.channel_session['groups']:
|
||||
Group(group).discard(message.reply_channel)
|
||||
class WebsocketSecretAuthHelper:
|
||||
"""
|
||||
Middlewareish for websockets to verify node websocket broadcast interconnect.
|
||||
|
||||
Note: The "ish" is due to the channels routing interface. Routing occurs
|
||||
_after_ authentication; making it hard to apply this auth to _only_ a subset of
|
||||
websocket endpoints.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def construct_secret(cls):
|
||||
nonce_serialized = f"{int(time.time())}"
|
||||
payload_dict = {
|
||||
'secret': settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
'nonce': nonce_serialized
|
||||
}
|
||||
payload_serialized = json.dumps(payload_dict)
|
||||
|
||||
secret_serialized = hmac.new(force_bytes(settings.BROADCAST_WEBSOCKET_SECRET),
|
||||
msg=force_bytes(payload_serialized),
|
||||
digestmod='sha256').hexdigest()
|
||||
|
||||
return 'HMAC-SHA256 {}:{}'.format(nonce_serialized, secret_serialized)
|
||||
|
||||
|
||||
@channel_session_user_from_http
|
||||
def ws_connect(message):
|
||||
headers = dict(message.content.get('headers', ''))
|
||||
message.reply_channel.send({"accept": True})
|
||||
message.content['method'] = 'FAKE'
|
||||
if message.user.is_authenticated:
|
||||
message.reply_channel.send(
|
||||
{"text": json.dumps({"accept": True, "user": message.user.id})}
|
||||
)
|
||||
# store the valid CSRF token from the cookie so we can compare it later
|
||||
# on ws_receive
|
||||
cookie_token = parse_cookie(
|
||||
smart_str(headers.get(b'cookie'))
|
||||
).get('csrftoken')
|
||||
if cookie_token:
|
||||
message.channel_session[XRF_KEY] = cookie_token
|
||||
else:
|
||||
logger.error("Request user is not authenticated to use websocket.")
|
||||
message.reply_channel.send({"close": True})
|
||||
return None
|
||||
@classmethod
|
||||
def verify_secret(cls, s, nonce_tolerance=300):
|
||||
try:
|
||||
(prefix, payload) = s.split(' ')
|
||||
if prefix != 'HMAC-SHA256':
|
||||
raise ValueError('Unsupported encryption algorithm')
|
||||
(nonce_parsed, secret_parsed) = payload.split(':')
|
||||
except Exception:
|
||||
raise ValueError("Failed to parse secret")
|
||||
|
||||
try:
|
||||
payload_expected = {
|
||||
'secret': settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
'nonce': nonce_parsed,
|
||||
}
|
||||
payload_serialized = json.dumps(payload_expected)
|
||||
except Exception:
|
||||
raise ValueError("Failed to create hash to compare to secret.")
|
||||
|
||||
secret_serialized = hmac.new(force_bytes(settings.BROADCAST_WEBSOCKET_SECRET),
|
||||
msg=force_bytes(payload_serialized),
|
||||
digestmod='sha256').hexdigest()
|
||||
|
||||
if secret_serialized != secret_parsed:
|
||||
raise ValueError("Invalid secret")
|
||||
|
||||
# Avoid timing attack and check the nonce after all the heavy lifting
|
||||
now = int(time.time())
|
||||
nonce_parsed = int(nonce_parsed)
|
||||
nonce_diff = now - nonce_parsed
|
||||
if abs(nonce_diff) > nonce_tolerance:
|
||||
logger.warn(f"Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
raise ValueError("Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def is_authorized(cls, scope):
|
||||
secret = ''
|
||||
for k, v in scope['headers']:
|
||||
if k.decode("utf-8") == 'secret':
|
||||
secret = v.decode("utf-8")
|
||||
break
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
@channel_session_user
|
||||
def ws_disconnect(message):
|
||||
discard_groups(message)
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
except Exception:
|
||||
# TODO: log ip of connected client
|
||||
logger.warn("Broadcast client failed to authorize for reason.")
|
||||
await self.close()
|
||||
return
|
||||
|
||||
# TODO: log ip of connected client
|
||||
logger.info(f"Broadcast client connected.")
|
||||
await self.accept()
|
||||
await self.channel_layer.group_add(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
|
||||
async def disconnect(self, code):
|
||||
# TODO: log ip of disconnected client
|
||||
logger.info("Client disconnected")
|
||||
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
|
||||
@channel_session_user
|
||||
def ws_receive(message):
|
||||
from awx.main.access import consumer_access
|
||||
user = message.user
|
||||
raw_data = message.content['text']
|
||||
data = json.loads(raw_data)
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
user = self.scope['user']
|
||||
if user and not user.is_anonymous:
|
||||
await self.accept()
|
||||
await self.send_json({"accept": True, "user": user.id})
|
||||
# store the valid CSRF token from the cookie so we can compare it later
|
||||
# on ws_receive
|
||||
cookie_token = self.scope['cookies'].get('csrftoken')
|
||||
if cookie_token:
|
||||
self.scope['session'][XRF_KEY] = cookie_token
|
||||
else:
|
||||
logger.error("Request user is not authenticated to use websocket.")
|
||||
# TODO: Carry over from channels 1 implementation
|
||||
# We should never .accept() the client and close without sending a close message
|
||||
await self.accept()
|
||||
await self.send_json({"close": True})
|
||||
await self.close()
|
||||
|
||||
xrftoken = data.get('xrftoken')
|
||||
if (
|
||||
not xrftoken or
|
||||
XRF_KEY not in message.channel_session or
|
||||
xrftoken != message.channel_session[XRF_KEY]
|
||||
):
|
||||
logger.error(
|
||||
"access denied to channel, XRF mismatch for {}".format(user.username)
|
||||
)
|
||||
message.reply_channel.send({
|
||||
"text": json.dumps({"error": "access denied to channel"})
|
||||
})
|
||||
return
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
# This causes problems with our generic role permissions checking.
|
||||
# Specifically, type(user) != User
|
||||
# Therefore, get the "real" User objects from the database before
|
||||
# calling the access permission methods
|
||||
user_access.user = User.objects.get(id=user_access.user.id)
|
||||
res = user_access.get_queryset().filter(pk=oid).exists()
|
||||
return res
|
||||
|
||||
if 'groups' in data:
|
||||
discard_groups(message)
|
||||
groups = data['groups']
|
||||
current_groups = set(message.channel_session.pop('groups') if 'groups' in message.channel_session else [])
|
||||
for group_name,v in groups.items():
|
||||
if type(v) is list:
|
||||
for oid in v:
|
||||
name = '{}-{}'.format(group_name, oid)
|
||||
access_cls = consumer_access(group_name)
|
||||
if access_cls is not None:
|
||||
user_access = access_cls(user)
|
||||
if not user_access.get_queryset().filter(pk=oid).exists():
|
||||
message.reply_channel.send({"text": json.dumps(
|
||||
{"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})})
|
||||
continue
|
||||
current_groups.add(name)
|
||||
Group(name).add(message.reply_channel)
|
||||
else:
|
||||
current_groups.add(group_name)
|
||||
Group(group_name).add(message.reply_channel)
|
||||
message.channel_session['groups'] = list(current_groups)
|
||||
async def receive_json(self, data):
|
||||
from awx.main.access import consumer_access
|
||||
user = self.scope['user']
|
||||
xrftoken = data.get('xrftoken')
|
||||
if (
|
||||
not xrftoken or
|
||||
XRF_KEY not in self.scope["session"] or
|
||||
xrftoken != self.scope["session"][XRF_KEY]
|
||||
):
|
||||
logger.error(f"access denied to channel, XRF mismatch for {user.username}")
|
||||
await self.send_json({"error": "access denied to channel"})
|
||||
return
|
||||
|
||||
if 'groups' in data:
|
||||
groups = data['groups']
|
||||
new_groups = set()
|
||||
current_groups = set(self.scope['session'].pop('groups') if 'groups' in self.scope['session'] else [])
|
||||
for group_name,v in groups.items():
|
||||
if type(v) is list:
|
||||
for oid in v:
|
||||
name = '{}-{}'.format(group_name, oid)
|
||||
access_cls = consumer_access(group_name)
|
||||
if access_cls is not None:
|
||||
user_access = access_cls(user)
|
||||
if not await self.user_can_see_object_id(user_access, oid):
|
||||
await self.send_json({"error": "access denied to channel {0} for resource id {1}".format(group_name, oid)})
|
||||
continue
|
||||
new_groups.add(name)
|
||||
else:
|
||||
await self.send_json({"error": "access denied to channel"})
|
||||
logger.error(f"groups must be a list, not {groups}")
|
||||
return
|
||||
|
||||
old_groups = current_groups - new_groups
|
||||
for group_name in old_groups:
|
||||
await self.channel_layer.group_discard(
|
||||
group_name,
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(
|
||||
group_name,
|
||||
self.channel_name
|
||||
)
|
||||
logger.debug(f"Channel {self.channel_name} left groups {old_groups} and joined {new_groups_exclusive}")
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({
|
||||
"groups_current": list(new_groups),
|
||||
"groups_left": list(old_groups),
|
||||
"groups_joined": list(new_groups_exclusive)
|
||||
})
|
||||
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
|
||||
def run_sync(func):
|
||||
event_loop = asyncio.new_event_loop()
|
||||
event_loop.run_until_complete(func)
|
||||
event_loop.close()
|
||||
|
||||
|
||||
def _dump_payload(payload):
|
||||
try:
|
||||
return json.dumps(payload, cls=DjangoJSONEncoder)
|
||||
except ValueError:
|
||||
logger.error("Invalid payload to emit")
|
||||
return None
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
try:
|
||||
Group(group).send({"text": json.dumps(payload, cls=DjangoJSONEncoder)})
|
||||
except ValueError:
|
||||
logger.error("Invalid payload emitting channel {} on topic: {}".format(group, payload))
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
|
||||
run_sync(channel_layer.group_send(
|
||||
group,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": payload_dumped
|
||||
},
|
||||
))
|
||||
|
||||
run_sync(channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
))
|
||||
|
||||
@@ -43,7 +43,7 @@ aim_inputs = {
|
||||
'id': 'object_query',
|
||||
'label': _('Object Query'),
|
||||
'type': 'string',
|
||||
'help_text': _('Lookup query for the object. Ex: "Safe=TestSafe;Object=testAccountName123"'),
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
}, {
|
||||
'id': 'object_query_format',
|
||||
'label': _('Object Query Format'),
|
||||
|
||||
@@ -64,7 +64,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'runworker'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@@ -1,5 +1,62 @@
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
assert conn.autocommit, "Connection must be in autocommit mode."
|
||||
self.conn = conn
|
||||
|
||||
def listen(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('LISTEN "%s";' % channel)
|
||||
|
||||
def unlisten(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('UNLISTEN "%s";' % channel)
|
||||
|
||||
def notify(self, channel, payload):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
yield None
|
||||
else:
|
||||
self.conn.poll()
|
||||
while self.conn.notifies:
|
||||
yield self.conn.notifies.pop(0)
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn():
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(dbname=conf['NAME'],
|
||||
host=conf['HOST'],
|
||||
user=conf['USER'],
|
||||
password=conf['PASSWORD'],
|
||||
port=conf['PORT'],
|
||||
**conf.get("OPTIONS", {}))
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
||||
conn.set_session(autocommit=True)
|
||||
pubsub = PubSub(conn)
|
||||
yield pubsub
|
||||
conn.close()
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from django.conf import settings
|
||||
import uuid
|
||||
import json
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from kombu import Queue, Exchange, Producer, Consumer
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -20,15 +19,6 @@ class Control(object):
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queue = Queue(self.queuename, Exchange(self.queuename), routing_key=self.queuename)
|
||||
|
||||
def publish(self, msg, conn, **kwargs):
|
||||
producer = Producer(
|
||||
exchange=self.queue.exchange,
|
||||
channel=conn,
|
||||
routing_key=self.queuename
|
||||
)
|
||||
producer.publish(msg, expiration=5, **kwargs)
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
return self.control_with_reply('status', *args, **kwargs)
|
||||
@@ -36,24 +26,28 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def generate_reply_queue_name(cls):
|
||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Queue(name="amq.rabbitmq.reply-to")
|
||||
reply_queue = Control.generate_reply_queue_name()
|
||||
self.result = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
with Consumer(conn, reply_queue, callbacks=[self.process_message], no_ack=True):
|
||||
self.publish({'control': command}, conn, reply_to='amq.rabbitmq.reply-to')
|
||||
try:
|
||||
conn.drain_events(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('{} did not reply within {}s'.format(self.service, timeout))
|
||||
raise
|
||||
return self.result
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen(reply_queue)
|
||||
conn.notify(self.queuename,
|
||||
json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
if reply is None:
|
||||
logger.error(f'{self.service} did not reply within {timeout}s')
|
||||
raise RuntimeError("{self.service} did not reply within {timeout}s")
|
||||
break
|
||||
|
||||
return json.loads(reply.payload)
|
||||
|
||||
def control(self, msg, **kwargs):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
self.publish(msg, conn)
|
||||
|
||||
def process_message(self, body, message):
|
||||
self.result = body
|
||||
message.ack()
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(self.queuename, json.dumps(msg))
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
from amqp.exceptions import PreconditionFailed
|
||||
from django.conf import settings
|
||||
from kombu.connection import Connection as KombuConnection
|
||||
from kombu.transport import pyamqp
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
__all__ = ['Connection']
|
||||
|
||||
|
||||
class Connection(KombuConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
class _Channel(pyamqp.Channel):
|
||||
|
||||
def queue_declare(self, queue, *args, **kwargs):
|
||||
kwargs['durable'] = settings.BROKER_DURABILITY
|
||||
try:
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
except PreconditionFailed as e:
|
||||
if "inequivalent arg 'durable'" in getattr(e, 'reply_text', None):
|
||||
logger.error(
|
||||
'queue {} durability is not {}, deleting and recreating'.format(
|
||||
|
||||
queue,
|
||||
kwargs['durable']
|
||||
)
|
||||
)
|
||||
self.queue_delete(queue)
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
|
||||
class _Connection(pyamqp.Connection):
|
||||
Channel = _Channel
|
||||
|
||||
class _Transport(pyamqp.Transport):
|
||||
Connection = _Connection
|
||||
|
||||
self.transport_cls = _Transport
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import threading
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
@@ -14,33 +15,36 @@ logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
cease_continuous_run = threading.Event()
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
|
||||
class ScheduleThread(threading.Thread):
|
||||
@classmethod
|
||||
def run(cls):
|
||||
while not cease_continuous_run.is_set():
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
logger.debug('periodic thread exiting...')
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warn(f'periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
time.sleep(idle_seconds)
|
||||
|
||||
thread = ScheduleThread()
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
return cease_continuous_run
|
||||
process = Process(target=run)
|
||||
process.daemon = True
|
||||
process.start()
|
||||
|
||||
|
||||
def run_continuously():
|
||||
@@ -49,4 +53,4 @@ def run_continuously():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
return scheduler.run_continuously()
|
||||
scheduler.run_continuously()
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -244,7 +246,7 @@ class WorkerPool(object):
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% if "age" in task %}for: {{ "%.1f" % task["age"] }}s {% endif %}{% else %}queued {% endif %}'
|
||||
'{{ task["uuid"] }} '
|
||||
'{% if "task" in task %}'
|
||||
'{{ task["task"].rsplit(".", 1)[-1] }}'
|
||||
@@ -365,6 +367,26 @@ class AutoscalePool(WorkerPool):
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
if w.alive:
|
||||
# if we discover a task manager invocation that's been running
|
||||
# too long, reap it (because otherwise it'll just hold the postgres
|
||||
# advisory lock forever); the goal of this code is to discover
|
||||
# deadlocks or other serious issues in the task manager that cause
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[
|
||||
current_task['uuid']
|
||||
]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(
|
||||
f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}'
|
||||
) # noqa
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from kombu import Exchange, Producer
|
||||
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from . import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -39,24 +39,22 @@ class task:
|
||||
add.apply_async([1, 1])
|
||||
Adder.apply_async([1, 1])
|
||||
|
||||
# Tasks can also define a specific target queue or exchange type:
|
||||
# Tasks can also define a specific target queue or use the special fan-out queue tower_broadcast:
|
||||
|
||||
@task(queue='slow-tasks')
|
||||
def snooze():
|
||||
time.sleep(10)
|
||||
|
||||
@task(queue='tower_broadcast', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast')
|
||||
def announce():
|
||||
print("Run this everywhere!")
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None, exchange_type=None):
|
||||
def __init__(self, queue=None):
|
||||
self.queue = queue
|
||||
self.exchange_type = exchange_type
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
exchange_type = self.exchange_type
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
@@ -73,9 +71,12 @@ class task:
|
||||
kwargs = kwargs or {}
|
||||
queue = (
|
||||
queue or
|
||||
getattr(cls.queue, 'im_func', cls.queue) or
|
||||
settings.CELERY_DEFAULT_QUEUE
|
||||
getattr(cls.queue, 'im_func', cls.queue)
|
||||
)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {
|
||||
'uuid': task_id,
|
||||
'args': args,
|
||||
@@ -86,21 +87,8 @@ class task:
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not settings.IS_TESTING(sys.argv):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
exchange = Exchange(queue, type=exchange_type or 'direct')
|
||||
producer = Producer(conn)
|
||||
logger.debug('publish {}({}, queue={})'.format(
|
||||
cls.name,
|
||||
task_id,
|
||||
queue
|
||||
))
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
compression='bzip2',
|
||||
exchange=exchange,
|
||||
declare=[exchange],
|
||||
delivery_mode="persistent",
|
||||
routing_key=queue)
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(queue, json.dumps(obj))
|
||||
return (obj, queue)
|
||||
|
||||
# If the object we're wrapping *is* a class (e.g., RunJob), return
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from .base import AWXConsumer, BaseWorker # noqa
|
||||
from .base import AWXConsumerRedis, AWXConsumerPG, BaseWorker # noqa
|
||||
from .callback import CallbackBrokerWorker # noqa
|
||||
from .task import TaskWorker # noqa
|
||||
|
||||
@@ -5,14 +5,17 @@ import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
from django import db
|
||||
from kombu import Producer
|
||||
from kombu.mixins import ConsumerMixin
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -37,10 +40,11 @@ class WorkerSignalHandler:
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class AWXConsumer(ConsumerMixin):
|
||||
class AWXConsumerBase(object):
|
||||
def __init__(self, name, worker, queues=[], pool=None):
|
||||
self.should_stop = False
|
||||
|
||||
def __init__(self, name, connection, worker, queues=[], pool=None):
|
||||
self.connection = connection
|
||||
self.name = name
|
||||
self.total_messages = 0
|
||||
self.queues = queues
|
||||
self.worker = worker
|
||||
@@ -49,25 +53,15 @@ class AWXConsumer(ConsumerMixin):
|
||||
self.pool = WorkerPool()
|
||||
self.pool.init_workers(self.worker.work_loop)
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
logger.debug(self.listening_on)
|
||||
return [Consumer(queues=self.queues, accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
@property
|
||||
def listening_on(self):
|
||||
return 'listening on {}'.format([
|
||||
'{} [{}]'.format(q.name, q.exchange.type) for q in self.queues
|
||||
])
|
||||
return f'listening on {self.queues}'
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn('Consumer received control message {}'.format(body))
|
||||
def control(self, body):
|
||||
logger.warn(body)
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
channel=self.connection,
|
||||
routing_key=message.properties['reply_to']
|
||||
)
|
||||
reply_queue = body['reply_to']
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
elif control == 'running':
|
||||
@@ -75,20 +69,21 @@ class AWXConsumer(ConsumerMixin):
|
||||
for worker in self.pool.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
msg.extend(worker.managed_tasks.keys())
|
||||
producer.publish(msg)
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
message.ack()
|
||||
|
||||
def process_task(self, body, message):
|
||||
def process_task(self, body):
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body, message)
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception("Exception handling control message:")
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
@@ -102,21 +97,63 @@ class AWXConsumer(ConsumerMixin):
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
signal.signal(signal.SIGTERM, self.stop)
|
||||
self.worker.on_start()
|
||||
super(AWXConsumer, self).run(*args, **kwargs)
|
||||
|
||||
# Child should implement other things here
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
self.should_stop = True
|
||||
logger.warn('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
class AWXConsumerRedis(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception(f"encountered an error communicating with redis")
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception(f"failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.warn(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
try:
|
||||
with pg_bus_conn() as conn:
|
||||
for queue in self.queues:
|
||||
conn.listen(queue)
|
||||
if init is False:
|
||||
self.worker.on_start()
|
||||
init = True
|
||||
for e in conn.events():
|
||||
self.process_task(json.loads(e.payload))
|
||||
if self.should_stop:
|
||||
return
|
||||
except psycopg2.InterfaceError:
|
||||
logger.warn("Stale Postgres message bus connection, reconnecting")
|
||||
continue
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def read(self, queue):
|
||||
|
||||
@@ -15,7 +15,9 @@ from django.db.utils import InterfaceError, InternalError, IntegrityError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
|
||||
Job)
|
||||
from awx.main.tasks import handle_success_and_failure_notifications
|
||||
from awx.main.models.events import emit_event_detail
|
||||
|
||||
from .base import BaseWorker
|
||||
@@ -137,19 +139,14 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
|
||||
if isinstance(uj, Job):
|
||||
# *actual playbooks* send their success/failure
|
||||
# notifications in response to the playbook_on_stats
|
||||
# event handling code in main.models.events
|
||||
pass
|
||||
elif hasattr(uj, 'send_notification_templates'):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
@@ -56,7 +56,8 @@ from awx.main import utils
|
||||
|
||||
__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField',
|
||||
'SmartFilterField', 'OrderedManyToManyField',
|
||||
'update_role_parentage_for_instance', 'is_implicit_parent']
|
||||
'update_role_parentage_for_instance',
|
||||
'is_implicit_parent']
|
||||
|
||||
|
||||
# Provide a (better) custom error message for enum jsonschema validation
|
||||
@@ -140,8 +141,9 @@ def resolve_role_field(obj, field):
|
||||
return []
|
||||
|
||||
if len(field_components) == 1:
|
||||
role_cls = str(utils.get_current_apps().get_model('main', 'Role'))
|
||||
if not str(type(obj)) == role_cls:
|
||||
# use extremely generous duck typing to accomidate all possible forms
|
||||
# of the model that may be used during various migrations
|
||||
if obj._meta.model_name != 'role' or obj._meta.app_label != 'main':
|
||||
raise Exception(smart_text('{} refers to a {}, not a Role'.format(field, type(obj))))
|
||||
ret.append(obj.id)
|
||||
else:
|
||||
@@ -197,18 +199,27 @@ def update_role_parentage_for_instance(instance):
|
||||
updates the parents listing for all the roles
|
||||
of a given instance if they have changed
|
||||
'''
|
||||
parents_removed = set()
|
||||
parents_added = set()
|
||||
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
|
||||
cur_role = getattr(instance, implicit_role_field.name)
|
||||
original_parents = set(json.loads(cur_role.implicit_parents))
|
||||
new_parents = implicit_role_field._resolve_parent_roles(instance)
|
||||
cur_role.parents.remove(*list(original_parents - new_parents))
|
||||
cur_role.parents.add(*list(new_parents - original_parents))
|
||||
removals = original_parents - new_parents
|
||||
if removals:
|
||||
cur_role.parents.remove(*list(removals))
|
||||
parents_removed.add(cur_role.pk)
|
||||
additions = new_parents - original_parents
|
||||
if additions:
|
||||
cur_role.parents.add(*list(additions))
|
||||
parents_added.add(cur_role.pk)
|
||||
new_parents_list = list(new_parents)
|
||||
new_parents_list.sort()
|
||||
new_parents_json = json.dumps(new_parents_list)
|
||||
if cur_role.implicit_parents != new_parents_json:
|
||||
cur_role.implicit_parents = new_parents_json
|
||||
cur_role.save()
|
||||
cur_role.save(update_fields=['implicit_parents'])
|
||||
return (parents_added, parents_removed)
|
||||
|
||||
|
||||
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
|
||||
@@ -256,20 +267,18 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
field_names = [field_names]
|
||||
|
||||
for field_name in field_names:
|
||||
# Handle the OR syntax for role parents
|
||||
if type(field_name) == tuple:
|
||||
continue
|
||||
|
||||
if type(field_name) == bytes:
|
||||
field_name = field_name.decode('utf-8')
|
||||
|
||||
if field_name.startswith('singleton:'):
|
||||
continue
|
||||
|
||||
field_name, sep, field_attr = field_name.partition('.')
|
||||
field = getattr(cls, field_name)
|
||||
# Non existent fields will occur if ever a parent model is
|
||||
# moved inside a migration, needed for job_template_organization_field
|
||||
# migration in particular
|
||||
# consistency is assured by unit test awx.main.tests.functional
|
||||
field = getattr(cls, field_name, None)
|
||||
|
||||
if type(field) is ReverseManyToOneDescriptor or \
|
||||
if field and type(field) is ReverseManyToOneDescriptor or \
|
||||
type(field) is ManyToManyDescriptor:
|
||||
|
||||
if '.' in field_attr:
|
||||
|
||||
@@ -15,7 +15,6 @@ import awx
|
||||
from awx.main.utils import (
|
||||
get_system_task_capacity
|
||||
)
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
@@ -32,12 +31,14 @@ def set_pythonpath(venv_libdir, env):
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
def __init__(self, event_handler, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
"""
|
||||
:param event_handler: a callable used to persist event data from isolated nodes
|
||||
:param canceled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
canceled
|
||||
"""
|
||||
self.event_handler = event_handler
|
||||
self.canceled_callback = canceled_callback
|
||||
self.check_callback = check_callback
|
||||
self.started_at = None
|
||||
@@ -208,7 +209,6 @@ class IsolatedManager(object):
|
||||
status = 'failed'
|
||||
rc = None
|
||||
last_check = time.time()
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
while status == 'failed':
|
||||
canceled = self.canceled_callback() if self.canceled_callback else False
|
||||
@@ -238,7 +238,7 @@ class IsolatedManager(object):
|
||||
except json.decoder.JSONDecodeError: # Just in case it's not fully here yet.
|
||||
pass
|
||||
|
||||
self.consume_events(dispatcher)
|
||||
self.consume_events()
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
@@ -266,19 +266,11 @@ class IsolatedManager(object):
|
||||
|
||||
# consume events one last time just to be sure we didn't miss anything
|
||||
# in the final sync
|
||||
self.consume_events(dispatcher)
|
||||
|
||||
# emit an EOF event
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': len(self.handled_events)
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
self.consume_events()
|
||||
|
||||
return status, rc
|
||||
|
||||
def consume_events(self, dispatcher):
|
||||
def consume_events(self):
|
||||
# discover new events and ingest them
|
||||
events_path = self.path_to('artifacts', self.ident, 'job_events')
|
||||
|
||||
@@ -288,7 +280,7 @@ class IsolatedManager(object):
|
||||
if os.path.exists(events_path):
|
||||
for event in set(os.listdir(events_path)) - self.handled_events:
|
||||
path = os.path.join(events_path, event)
|
||||
if os.path.exists(path):
|
||||
if os.path.exists(path) and os.path.isfile(path):
|
||||
try:
|
||||
event_data = json.load(
|
||||
open(os.path.join(events_path, event), 'r')
|
||||
@@ -302,16 +294,10 @@ class IsolatedManager(object):
|
||||
# practice
|
||||
# in this scenario, just ignore this event and try it
|
||||
# again on the next sync
|
||||
pass
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
continue
|
||||
self.event_handler(event_data)
|
||||
self.handled_events.add(event)
|
||||
|
||||
# handle artifacts
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
|
||||
def cleanup(self):
|
||||
extravars = {
|
||||
@@ -400,8 +386,7 @@ class IsolatedManager(object):
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args,
|
||||
event_data_key, ident=None):
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args, ident=None):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
@@ -412,14 +397,12 @@ class IsolatedManager(object):
|
||||
:param playbook: the playbook to run
|
||||
:param module: the module to run
|
||||
:param module_args: the module args to use
|
||||
:param event_data_key: e.g., job_id, inventory_id, ...
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.ident = ident
|
||||
self.event_data_key = event_data_key
|
||||
self.instance = instance
|
||||
self.private_data_dir = private_data_dir
|
||||
self.runner_params = self.build_runner_params(
|
||||
@@ -430,9 +413,4 @@ class IsolatedManager(object):
|
||||
status, rc = self.dispatch(playbook, module, module_args)
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
else:
|
||||
# emit an EOF event
|
||||
event_data = {'event': 'EOF', 'final_counter': 0}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
CallbackQueueDispatcher().dispatch(event_data)
|
||||
return status, rc
|
||||
|
||||
@@ -21,6 +21,8 @@ from awx.main.signals import (
|
||||
disable_computed_fields
|
||||
)
|
||||
|
||||
from awx.main.management.commands.deletion import AWXCollector, pre_delete
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
@@ -57,27 +59,37 @@ class Command(BaseCommand):
|
||||
action='store_true', dest='only_workflow_jobs',
|
||||
help='Remove workflow jobs')
|
||||
|
||||
def cleanup_jobs(self):
|
||||
#jobs_qs = Job.objects.exclude(status__in=('pending', 'running'))
|
||||
#jobs_qs = jobs_qs.filter(created__lte=self.cutoff)
|
||||
skipped, deleted = 0, 0
|
||||
jobs = Job.objects.filter(created__lt=self.cutoff)
|
||||
for job in jobs.iterator():
|
||||
job_display = '"%s" (%d host summaries, %d events)' % \
|
||||
(str(job),
|
||||
job.job_host_summaries.count(), job.job_events.count())
|
||||
if job.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, job.status, job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, job_display)
|
||||
if not self.dry_run:
|
||||
job.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += Job.objects.filter(created__gte=self.cutoff).count()
|
||||
def cleanup_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
|
||||
batch_size = 1000000
|
||||
|
||||
while True:
|
||||
# get queryset for available jobs to remove
|
||||
qs = Job.objects.filter(created__lt=self.cutoff).exclude(status__in=['pending', 'waiting', 'running'])
|
||||
# get pk list for the first N (batch_size) objects
|
||||
pk_list = qs[0:batch_size].values_list('pk')
|
||||
# You cannot delete queries with sql LIMIT set, so we must
|
||||
# create a new query from this pk_list
|
||||
qs_batch = Job.objects.filter(pk__in=pk_list)
|
||||
just_deleted = 0
|
||||
if not self.dry_run:
|
||||
del_query = pre_delete(qs_batch)
|
||||
collector = AWXCollector(del_query.db)
|
||||
collector.collect(del_query)
|
||||
_, models_deleted = collector.delete()
|
||||
if models_deleted:
|
||||
just_deleted = models_deleted['main.Job']
|
||||
deleted += just_deleted
|
||||
else:
|
||||
just_deleted = 0 # break from loop, this is dry run
|
||||
deleted = qs.count()
|
||||
|
||||
if just_deleted == 0:
|
||||
break
|
||||
|
||||
skipped += (Job.objects.filter(created__gte=self.cutoff) | Job.objects.filter(status__in=['pending', 'waiting', 'running'])).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
|
||||
177
awx/main/management/commands/deletion.py
Normal file
177
awx/main/management/commands/deletion.py
Normal file
@@ -0,0 +1,177 @@
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.db.models.deletion import (
|
||||
DO_NOTHING, Collector, get_candidate_relations_to_delete,
|
||||
)
|
||||
from collections import Counter, OrderedDict
|
||||
from django.db import transaction
|
||||
from django.db.models import sql
|
||||
|
||||
|
||||
def bulk_related_objects(field, objs, using):
|
||||
# This overrides the method in django.contrib.contenttypes.fields.py
|
||||
"""
|
||||
Return all objects related to ``objs`` via this ``GenericRelation``.
|
||||
"""
|
||||
return field.remote_field.model._base_manager.db_manager(using).filter(**{
|
||||
"%s__pk" % field.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
|
||||
field.model, for_concrete_model=field.for_concrete_model).pk,
|
||||
"%s__in" % field.object_id_field_name: list(objs.values_list('pk', flat=True))
|
||||
})
|
||||
|
||||
|
||||
def pre_delete(qs):
|
||||
# taken from .delete method in django.db.models.query.py
|
||||
assert qs.query.can_filter(), \
|
||||
"Cannot use 'limit' or 'offset' with delete."
|
||||
|
||||
if qs._fields is not None:
|
||||
raise TypeError("Cannot call delete() after .values() or .values_list()")
|
||||
|
||||
del_query = qs._chain()
|
||||
|
||||
# The delete is actually 2 queries - one to find related objects,
|
||||
# and one to delete. Make sure that the discovery of related
|
||||
# objects is performed on the same database as the deletion.
|
||||
del_query._for_write = True
|
||||
|
||||
# Disable non-supported fields.
|
||||
del_query.query.select_for_update = False
|
||||
del_query.query.select_related = False
|
||||
del_query.query.clear_ordering(force_empty=True)
|
||||
return del_query
|
||||
|
||||
|
||||
class AWXCollector(Collector):
|
||||
|
||||
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
|
||||
"""
|
||||
Add 'objs' to the collection of objects to be deleted. If the call is
|
||||
the result of a cascade, 'source' should be the model that caused it,
|
||||
and 'nullable' should be set to True if the relation can be null.
|
||||
|
||||
Return a list of all objects that were not already collected.
|
||||
"""
|
||||
if not objs.exists():
|
||||
return objs
|
||||
model = objs.model
|
||||
self.data.setdefault(model, [])
|
||||
self.data[model].append(objs)
|
||||
# Nullable relationships can be ignored -- they are nulled out before
|
||||
# deleting, and therefore do not affect the order in which objects have
|
||||
# to be deleted.
|
||||
if source is not None and not nullable:
|
||||
if reverse_dependency:
|
||||
source, model = model, source
|
||||
self.dependencies.setdefault(
|
||||
source._meta.concrete_model, set()).add(model._meta.concrete_model)
|
||||
return objs
|
||||
|
||||
def add_field_update(self, field, value, objs):
|
||||
"""
|
||||
Schedule a field update. 'objs' must be a homogeneous iterable
|
||||
collection of model instances (e.g. a QuerySet).
|
||||
"""
|
||||
if not objs.exists():
|
||||
return
|
||||
model = objs.model
|
||||
self.field_updates.setdefault(model, {})
|
||||
self.field_updates[model].setdefault((field, value), [])
|
||||
self.field_updates[model][(field, value)].append(objs)
|
||||
|
||||
def collect(self, objs, source=None, nullable=False, collect_related=True,
|
||||
source_attr=None, reverse_dependency=False, keep_parents=False):
|
||||
"""
|
||||
Add 'objs' to the collection of objects to be deleted as well as all
|
||||
parent instances. 'objs' must be a homogeneous iterable collection of
|
||||
model instances (e.g. a QuerySet). If 'collect_related' is True,
|
||||
related objects will be handled by their respective on_delete handler.
|
||||
|
||||
If the call is the result of a cascade, 'source' should be the model
|
||||
that caused it and 'nullable' should be set to True, if the relation
|
||||
can be null.
|
||||
|
||||
If 'reverse_dependency' is True, 'source' will be deleted before the
|
||||
current model, rather than after. (Needed for cascading to parent
|
||||
models, the one case in which the cascade follows the forwards
|
||||
direction of an FK rather than the reverse direction.)
|
||||
|
||||
If 'keep_parents' is True, data of parent model's will be not deleted.
|
||||
"""
|
||||
|
||||
if hasattr(objs, 'polymorphic_disabled'):
|
||||
objs.polymorphic_disabled = True
|
||||
|
||||
if self.can_fast_delete(objs):
|
||||
self.fast_deletes.append(objs)
|
||||
return
|
||||
new_objs = self.add(objs, source, nullable,
|
||||
reverse_dependency=reverse_dependency)
|
||||
if not new_objs.exists():
|
||||
return
|
||||
|
||||
model = new_objs.model
|
||||
|
||||
if not keep_parents:
|
||||
# Recursively collect concrete model's parent models, but not their
|
||||
# related objects. These will be found by meta.get_fields()
|
||||
concrete_model = model._meta.concrete_model
|
||||
for ptr in concrete_model._meta.parents.keys():
|
||||
if ptr:
|
||||
parent_objs = ptr.objects.filter(pk__in = new_objs.values_list('pk', flat=True))
|
||||
self.collect(parent_objs, source=model,
|
||||
collect_related=False,
|
||||
reverse_dependency=True)
|
||||
if collect_related:
|
||||
parents = model._meta.parents
|
||||
for related in get_candidate_relations_to_delete(model._meta):
|
||||
# Preserve parent reverse relationships if keep_parents=True.
|
||||
if keep_parents and related.model in parents:
|
||||
continue
|
||||
field = related.field
|
||||
if field.remote_field.on_delete == DO_NOTHING:
|
||||
continue
|
||||
related_qs = self.related_objects(related, new_objs)
|
||||
if self.can_fast_delete(related_qs, from_field=field):
|
||||
self.fast_deletes.append(related_qs)
|
||||
elif related_qs:
|
||||
field.remote_field.on_delete(self, field, related_qs, self.using)
|
||||
for field in model._meta.private_fields:
|
||||
if hasattr(field, 'bulk_related_objects'):
|
||||
# It's something like generic foreign key.
|
||||
sub_objs = bulk_related_objects(field, new_objs, self.using)
|
||||
self.collect(sub_objs, source=model, nullable=True)
|
||||
|
||||
def delete(self):
|
||||
self.sort()
|
||||
|
||||
# collect pk_list before deletion (once things start to delete
|
||||
# queries might not be able to retreive pk list)
|
||||
del_dict = OrderedDict()
|
||||
for model, instances in self.data.items():
|
||||
del_dict.setdefault(model, [])
|
||||
for inst in instances:
|
||||
del_dict[model] += list(inst.values_list('pk', flat=True))
|
||||
|
||||
deleted_counter = Counter()
|
||||
|
||||
with transaction.atomic(using=self.using, savepoint=False):
|
||||
|
||||
# update fields
|
||||
for model, instances_for_fieldvalues in self.field_updates.items():
|
||||
for (field, value), instances in instances_for_fieldvalues.items():
|
||||
for inst in instances:
|
||||
query = sql.UpdateQuery(model)
|
||||
query.update_batch(inst.values_list('pk', flat=True),
|
||||
{field.name: value}, self.using)
|
||||
# fast deletes
|
||||
for qs in self.fast_deletes:
|
||||
count = qs._raw_delete(using=self.using)
|
||||
deleted_counter[qs.model._meta.label] += count
|
||||
|
||||
# delete instances
|
||||
for model, pk_list in del_dict.items():
|
||||
query = sql.DeleteQuery(model)
|
||||
count = query.delete_batch(pk_list, self.using)
|
||||
deleted_counter[model._meta.label] += count
|
||||
|
||||
return sum(deleted_counter.values()), dict(deleted_counter)
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
import subprocess
|
||||
|
||||
from django.db import transaction
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
@@ -33,18 +31,9 @@ class Command(BaseCommand):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = Instance.objects.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
isolated = instance.first().is_isolated()
|
||||
instance.delete()
|
||||
print("Instance Removed")
|
||||
if isolated:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
else:
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('No instance found matching name {}'.format(hostname))
|
||||
|
||||
@@ -496,12 +496,6 @@ class Command(BaseCommand):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
for group_pk in groups_qs.filter(name__in=group_names).values_list('pk', flat=True):
|
||||
del_group_pks.discard(group_pk)
|
||||
if self.inventory_source.deprecated_group_id in del_group_pks: # TODO: remove in 3.3
|
||||
logger.warning(
|
||||
'Group "%s" from v1 API is not deleted by overwrite',
|
||||
self.inventory_source.deprecated_group.name
|
||||
)
|
||||
del_group_pks.discard(self.inventory_source.deprecated_group_id)
|
||||
# Now delete all remaining groups in batches.
|
||||
all_del_pks = sorted(list(del_group_pks))
|
||||
for offset in range(0, len(all_del_pks), self._batch_size):
|
||||
@@ -534,12 +528,6 @@ class Command(BaseCommand):
|
||||
# Set of all host pks managed by this inventory source
|
||||
all_source_host_pks = self._existing_host_pks()
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.debug(
|
||||
'Group "%s" from v1 API child group/host connections preserved',
|
||||
db_group.name
|
||||
)
|
||||
continue
|
||||
# Delete child group relationships not present in imported data.
|
||||
db_children = db_group.children
|
||||
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
|
||||
@@ -1006,12 +994,6 @@ class Command(BaseCommand):
|
||||
except re.error:
|
||||
raise CommandError('invalid regular expression for --host-filter')
|
||||
|
||||
'''
|
||||
TODO: Remove this deprecation when we remove support for rax.py
|
||||
'''
|
||||
if self.source == "rax.py":
|
||||
logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
|
||||
begin = time.time()
|
||||
self.load_inventory_from_database()
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.models import Instance, InstanceGroup
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
|
||||
|
||||
class InstanceNotFound(Exception):
|
||||
@@ -31,7 +32,6 @@ class Command(BaseCommand):
|
||||
|
||||
|
||||
def get_create_update_instance_group(self, queuename, instance_percent, instance_min):
|
||||
ig = InstanceGroup.objects.filter(name=queuename)
|
||||
created = False
|
||||
changed = False
|
||||
|
||||
@@ -98,26 +98,27 @@ class Command(BaseCommand):
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
with advisory_lock('instance_group_registration_{}'.format(queuename)):
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
with advisory_lock('cluster_policy_lock'):
|
||||
with transaction.atomic():
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
|
||||
if any([changed1, changed2, changed3]):
|
||||
print('(changed: True)')
|
||||
|
||||
@@ -3,10 +3,8 @@
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from kombu import Exchange, Queue
|
||||
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.worker import AWXConsumer, CallbackBrokerWorker
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -18,23 +16,15 @@ class Command(BaseCommand):
|
||||
help = 'Launch the job callback receiver'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
consumer = None
|
||||
try:
|
||||
consumer = AWXConsumer(
|
||||
'callback_receiver',
|
||||
conn,
|
||||
CallbackBrokerWorker(),
|
||||
[
|
||||
Queue(
|
||||
settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE
|
||||
)
|
||||
]
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
consumer = None
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
'callback_receiver',
|
||||
CallbackBrokerWorker(),
|
||||
queues=[getattr(settings, 'CALLBACK_QUEUE', '')],
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection, connections
|
||||
from kombu import Exchange, Queue
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.utils.handlers import AWXProxyHandler
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
@@ -55,41 +50,21 @@ class Command(BaseCommand):
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
cease_continuous_run = periodic.run_continuously()
|
||||
periodic.run_continuously()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
|
||||
# don't ship external logs inside the dispatcher's parent process
|
||||
# this exists to work around a race condition + deadlock bug on fork
|
||||
# in cpython itself:
|
||||
# https://bugs.python.org/issue37429
|
||||
AWXProxyHandler.disable()
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
try:
|
||||
bcast = 'tower_broadcast_all'
|
||||
queues = [
|
||||
Queue(q, Exchange(q), routing_key=q)
|
||||
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
|
||||
]
|
||||
queues.append(
|
||||
Queue(
|
||||
construct_bcast_queue_name(bcast),
|
||||
exchange=Exchange(bcast, type='fanout'),
|
||||
routing_key=bcast,
|
||||
reply=True
|
||||
)
|
||||
)
|
||||
consumer = AWXConsumer(
|
||||
'dispatcher',
|
||||
conn,
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
cease_continuous_run.set()
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
consumer = AWXConsumerPG(
|
||||
'dispatcher',
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
134
awx/main/management/commands/run_wsbroadcast.py
Normal file
134
awx/main/management/commands/run_wsbroadcast.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import logging
|
||||
import asyncio
|
||||
import datetime
|
||||
import re
|
||||
import redis
|
||||
from datetime import datetime as dt
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db.models import Q
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the websocket broadcaster'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running broadcast websocket')
|
||||
|
||||
@classmethod
|
||||
def display_len(cls, s):
|
||||
return len(re.sub('\x1b.*?m', '', s))
|
||||
|
||||
@classmethod
|
||||
def _format_lines(cls, host_stats, padding=5):
|
||||
widths = [0 for i in host_stats[0]]
|
||||
for entry in host_stats:
|
||||
for i, e in enumerate(entry):
|
||||
if Command.display_len(e) > widths[i]:
|
||||
widths[i] = Command.display_len(e)
|
||||
paddings = [padding for i in widths]
|
||||
|
||||
lines = []
|
||||
for entry in host_stats:
|
||||
line = ""
|
||||
for pad, width, value in zip(paddings, widths, entry):
|
||||
if len(value) > Command.display_len(value):
|
||||
width += len(value) - Command.display_len(value)
|
||||
total_width = width + pad
|
||||
line += f'{value:{total_width}}'
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
@classmethod
|
||||
def get_connection_status(cls, me, hostnames, data):
|
||||
host_stats = [('hostname', 'state', 'start time', 'duration (sec)')]
|
||||
for h in hostnames:
|
||||
connection_color = '91' # red
|
||||
h_safe = safe_name(h)
|
||||
prefix = f'awx_{h_safe}'
|
||||
connection_state = data.get(f'{prefix}_connection', 'N/A')
|
||||
connection_started = 'N/A'
|
||||
connection_duration = 'N/A'
|
||||
if connection_state is None:
|
||||
connection_state = 'unknown'
|
||||
if connection_state == 'connected':
|
||||
connection_color = '92' # green
|
||||
connection_started = data.get(f'{prefix}_connection_start', 'Error')
|
||||
if connection_started != 'Error':
|
||||
connection_started = datetime.datetime.fromtimestamp(connection_started)
|
||||
connection_duration = int((dt.now() - connection_started).total_seconds())
|
||||
|
||||
connection_state = f'\033[{connection_color}m{connection_state}\033[0m'
|
||||
|
||||
host_stats.append((h, connection_state, str(connection_started), str(connection_duration)))
|
||||
|
||||
return host_stats
|
||||
|
||||
@classmethod
|
||||
def get_connection_stats(cls, me, hostnames, data):
|
||||
host_stats = [('hostname', 'total', 'per minute')]
|
||||
for h in hostnames:
|
||||
h_safe = safe_name(h)
|
||||
prefix = f'awx_{h_safe}'
|
||||
messages_total = data.get(f'{prefix}_messages_received', '0')
|
||||
messages_per_minute = data.get(f'{prefix}_messages_received_per_minute', '0')
|
||||
|
||||
host_stats.append((h, str(int(messages_total)), str(int(messages_per_minute))))
|
||||
|
||||
return host_stats
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
for family in stats_all:
|
||||
if family.type == 'gauge' and len(family.samples) > 1:
|
||||
for sample in family.samples:
|
||||
if sample.value >= 1:
|
||||
data[family.name] = sample.labels[family.name]
|
||||
break
|
||||
else:
|
||||
data[family.name] = family.samples[0].value
|
||||
me = Instance.objects.me()
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(Q(hostname=me.hostname) | Q(rampart_groups__controller__isnull=False))]
|
||||
|
||||
host_stats = Command.get_connection_status(me, hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{me.hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(me, hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{me.hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
@@ -78,8 +79,7 @@ class HostManager(models.Manager):
|
||||
self.core_filters = {}
|
||||
|
||||
qs = qs & q
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
return qs.order_by('name', 'pk').distinct('name')
|
||||
return qs
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, uuid=None, hostname=None):
|
||||
def register(self, uuid=None, hostname=None, ip_address=None):
|
||||
if not uuid:
|
||||
uuid = settings.SYSTEM_UUID
|
||||
if not hostname:
|
||||
@@ -123,13 +123,23 @@ class InstanceManager(models.Manager):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = self.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
return (False, instance[0])
|
||||
instance = self.create(uuid=uuid, hostname=hostname, capacity=0)
|
||||
instance = instance.get()
|
||||
if instance.ip_address != ip_address:
|
||||
instance.ip_address = ip_address
|
||||
instance.save(update_fields=['ip_address'])
|
||||
return (True, instance)
|
||||
else:
|
||||
return (False, instance)
|
||||
instance = self.create(uuid=uuid,
|
||||
hostname=hostname,
|
||||
ip_address=ip_address,
|
||||
capacity=0)
|
||||
return (True, instance)
|
||||
|
||||
def get_or_register(self):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
return self.register()
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
return self.register(ip_address=pod_ip)
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
|
||||
@@ -12,23 +12,19 @@ import urllib.parse
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models.signals import post_save
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db import IntegrityError, connection
|
||||
from django.utils.functional import curry
|
||||
from django.db import connection
|
||||
from django.shortcuts import get_object_or_404, redirect
|
||||
from django.apps import apps
|
||||
from django.utils.deprecation import MiddlewareMixin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.urls import reverse, resolve
|
||||
|
||||
from awx.main.models import ActivityStream
|
||||
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
||||
from awx.conf import fields, register
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.middleware')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
@@ -76,61 +72,6 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
return filepath
|
||||
|
||||
|
||||
class ActivityStreamMiddleware(threading.local, MiddlewareMixin):
|
||||
|
||||
def __init__(self, get_response=None):
|
||||
self.disp_uid = None
|
||||
self.instance_ids = []
|
||||
super().__init__(get_response)
|
||||
|
||||
def process_request(self, request):
|
||||
if hasattr(request, 'user') and request.user.is_authenticated:
|
||||
user = request.user
|
||||
else:
|
||||
user = None
|
||||
|
||||
set_actor = curry(self.set_actor, user)
|
||||
self.disp_uid = str(uuid.uuid1())
|
||||
self.instance_ids = []
|
||||
post_save.connect(set_actor, sender=ActivityStream, dispatch_uid=self.disp_uid, weak=False)
|
||||
|
||||
def process_response(self, request, response):
|
||||
drf_request = getattr(request, 'drf_request', None)
|
||||
drf_user = getattr(drf_request, 'user', None)
|
||||
if self.disp_uid is not None:
|
||||
post_save.disconnect(dispatch_uid=self.disp_uid)
|
||||
|
||||
for instance in ActivityStream.objects.filter(id__in=self.instance_ids):
|
||||
if drf_user and drf_user.id:
|
||||
instance.actor = drf_user
|
||||
try:
|
||||
instance.save(update_fields=['actor'])
|
||||
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
|
||||
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
|
||||
actor=drf_user.username, operation=instance.operation,
|
||||
object1=instance.object1, object2=instance.object2))
|
||||
except IntegrityError:
|
||||
logger.debug("Integrity Error saving Activity Stream instance for id : " + str(instance.id))
|
||||
# else:
|
||||
# obj1_type_actual = instance.object1_type.split(".")[-1]
|
||||
# if obj1_type_actual in ("InventoryUpdate", "ProjectUpdate", "Job") and instance.id is not None:
|
||||
# instance.delete()
|
||||
|
||||
self.instance_ids = []
|
||||
return response
|
||||
|
||||
def set_actor(self, user, sender, instance, **kwargs):
|
||||
if sender == ActivityStream:
|
||||
if isinstance(user, User) and instance.actor is None:
|
||||
user = User.objects.filter(id=user.id)
|
||||
if user.exists():
|
||||
user = user[0]
|
||||
instance.actor = user
|
||||
else:
|
||||
if instance.id not in self.instance_ids:
|
||||
self.instance_ids.append(instance.id)
|
||||
|
||||
|
||||
class SessionTimeoutMiddleware(MiddlewareMixin):
|
||||
"""
|
||||
Resets the session timeout for both the UI and the actual session for the API
|
||||
@@ -192,21 +133,41 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
)
|
||||
super().__init__(get_response)
|
||||
|
||||
def _named_url_to_pk(self, node, named_url):
|
||||
kwargs = {}
|
||||
if not node.populate_named_url_query_kwargs(kwargs, named_url):
|
||||
return named_url
|
||||
return str(get_object_or_404(node.model, **kwargs).pk)
|
||||
@staticmethod
|
||||
def _hijack_for_old_jt_name(node, kwargs, named_url):
|
||||
try:
|
||||
int(named_url)
|
||||
return False
|
||||
except ValueError:
|
||||
pass
|
||||
JobTemplate = node.model
|
||||
name = urllib.parse.unquote(named_url)
|
||||
return JobTemplate.objects.filter(name=name).order_by('organization__created').first()
|
||||
|
||||
def _convert_named_url(self, url_path):
|
||||
@classmethod
|
||||
def _named_url_to_pk(cls, node, resource, named_url):
|
||||
kwargs = {}
|
||||
if node.populate_named_url_query_kwargs(kwargs, named_url):
|
||||
return str(get_object_or_404(node.model, **kwargs).pk)
|
||||
if resource == 'job_templates' and '++' not in named_url:
|
||||
# special case for deprecated job template case
|
||||
# will not raise a 404 on its own
|
||||
jt = cls._hijack_for_old_jt_name(node, kwargs, named_url)
|
||||
if jt:
|
||||
return str(jt.pk)
|
||||
return named_url
|
||||
|
||||
@classmethod
|
||||
def _convert_named_url(cls, url_path):
|
||||
url_units = url_path.split('/')
|
||||
# If the identifier is an empty string, it is always invalid.
|
||||
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2'] or not url_units[4]:
|
||||
return url_path
|
||||
resource = url_units[3]
|
||||
if resource in settings.NAMED_URL_MAPPINGS:
|
||||
url_units[4] = self._named_url_to_pk(settings.NAMED_URL_GRAPH[settings.NAMED_URL_MAPPINGS[resource]],
|
||||
url_units[4])
|
||||
url_units[4] = cls._named_url_to_pk(
|
||||
settings.NAMED_URL_GRAPH[settings.NAMED_URL_MAPPINGS[resource]],
|
||||
resource, url_units[4])
|
||||
return '/'.join(url_units)
|
||||
|
||||
def process_request(self, request):
|
||||
|
||||
@@ -464,7 +464,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='instance_group',
|
||||
field=models.ForeignKey(on_delete=models.SET_NULL, default=None, blank=True, to='main.InstanceGroup', help_text='The Rampart/Instance group the job was run under', null=True),
|
||||
field=models.ForeignKey(on_delete=models.SET_NULL, default=None, blank=True, to='main.InstanceGroup', help_text='The Instance group the job was run under', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
|
||||
@@ -16,6 +16,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='instance_group',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The Rampart/Instance group the job was run under', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.InstanceGroup'),
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The Instance group the job was run under', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.InstanceGroup'),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
# Generated by Django 2.2.4 on 2019-08-07 19:56
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
from awx.main.migrations._rbac import (
|
||||
rebuild_role_parentage, rebuild_role_hierarchy,
|
||||
migrate_ujt_organization, migrate_ujt_organization_backward,
|
||||
restore_inventory_admins, restore_inventory_admins_backward
|
||||
)
|
||||
|
||||
|
||||
def rebuild_jt_parents(apps, schema_editor):
|
||||
rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',))
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0108_v370_unifiedjob_dependencies_processed'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# backwards parents and ancestors caching
|
||||
migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents),
|
||||
# add new organization field for JT and all other unified jobs
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='tmp_organization',
|
||||
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='tmp_organization',
|
||||
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'),
|
||||
),
|
||||
# while new and old fields exist, copy the organization fields
|
||||
migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward),
|
||||
# with data saved, remove old fields
|
||||
migrations.RemoveField(
|
||||
model_name='project',
|
||||
name='organization',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobtemplate',
|
||||
name='organization',
|
||||
),
|
||||
# now, without safely rename the new field without conflicts from old field
|
||||
migrations.RenameField(
|
||||
model_name='unifiedjobtemplate',
|
||||
old_name='tmp_organization',
|
||||
new_name='organization',
|
||||
),
|
||||
migrations.RenameField(
|
||||
model_name='unifiedjob',
|
||||
old_name='tmp_organization',
|
||||
new_name='organization',
|
||||
),
|
||||
# parentage of job template roles has genuinely changed at this point
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='execute_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='jobtemplate',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
# Re-compute the role parents and ancestors caching
|
||||
migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop),
|
||||
# for all permissions that will be removed, make them explicit
|
||||
migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward),
|
||||
]
|
||||
18
awx/main/migrations/0110_v370_instance_ip_address.py
Normal file
18
awx/main/migrations/0110_v370_instance_ip_address.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-12 17:55
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0109_v370_job_template_organization_field'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='ip_address',
|
||||
field=models.CharField(blank=True, default=None, max_length=50, null=True, unique=True),
|
||||
),
|
||||
]
|
||||
16
awx/main/migrations/0111_v370_delete_channelgroup.py
Normal file
16
awx/main/migrations/0111_v370_delete_channelgroup.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-17 14:50
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0110_v370_instance_ip_address'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='ChannelGroup',
|
||||
),
|
||||
]
|
||||
61
awx/main/migrations/0112_v370_workflow_node_identifier.py
Normal file
61
awx/main/migrations/0112_v370_workflow_node_identifier.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# Generated by Django 2.2.8 on 2020-03-14 02:29
|
||||
|
||||
from django.db import migrations, models
|
||||
import uuid
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def create_uuid(apps, schema_editor):
|
||||
WorkflowJobTemplateNode = apps.get_model('main', 'WorkflowJobTemplateNode')
|
||||
ct = 0
|
||||
for node in WorkflowJobTemplateNode.objects.iterator():
|
||||
node.identifier = uuid.uuid4()
|
||||
node.save(update_fields=['identifier'])
|
||||
ct += 1
|
||||
if ct:
|
||||
logger.info(f'Automatically created uuid4 identifier for {ct} workflow nodes')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0111_v370_delete_channelgroup'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='identifier',
|
||||
field=models.CharField(blank=True, help_text='An identifier coresponding to the workflow job template node that this node was created from.', max_length=512),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='identifier',
|
||||
field=models.CharField(blank=True, null=True, help_text='An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.', max_length=512),
|
||||
),
|
||||
migrations.RunPython(create_uuid, migrations.RunPython.noop), # this fixes the uuid4 issue
|
||||
migrations.AlterField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='identifier',
|
||||
field=models.CharField(default=uuid.uuid4, help_text='An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.', max_length=512),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='workflowjobtemplatenode',
|
||||
unique_together={('identifier', 'workflow_job_template')},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobnode',
|
||||
index=models.Index(fields=['identifier', 'workflow_job'], name='main_workfl_identif_87b752_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobnode',
|
||||
index=models.Index(fields=['identifier'], name='main_workfl_identif_efdfe8_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='workflowjobtemplatenode',
|
||||
index=models.Index(fields=['identifier'], name='main_workfl_identif_0cc025_idx'),
|
||||
),
|
||||
]
|
||||
118
awx/main/migrations/0113_v370_event_bigint.py
Normal file
118
awx/main/migrations/0113_v370_event_bigint.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Generated by Django 2.2.8 on 2020-02-21 16:31
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
#
|
||||
# the goal of this function is to end with event tables (e.g., main_jobevent)
|
||||
# that have a bigint primary key (because the old usage of an integer
|
||||
# numeric isn't enough, as its range is about 2.1B, see:
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
|
||||
# unfortunately, we can't do this with a simple ALTER TABLE, because
|
||||
# for tables with hundreds of millions or billions of rows, the ALTER TABLE
|
||||
# can take *hours* on modest hardware.
|
||||
#
|
||||
# the approach in this migration means that post-migration, event data will
|
||||
# *not* immediately show up, but will be repopulated over time progressively
|
||||
# the trade-off here is not having to wait hours for the full data migration
|
||||
# before you can start and run AWX again (including new playbook runs)
|
||||
for tblname in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent',
|
||||
'main_systemjobevent'
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
# rename the current event table
|
||||
cursor.execute(
|
||||
f'ALTER TABLE {tblname} RENAME TO _old_{tblname};'
|
||||
)
|
||||
# create a *new* table with the same schema
|
||||
cursor.execute(
|
||||
f'CREATE TABLE {tblname} (LIKE _old_{tblname} INCLUDING ALL);'
|
||||
)
|
||||
# alter the *new* table so that the primary key is a big int
|
||||
cursor.execute(
|
||||
f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;'
|
||||
)
|
||||
|
||||
# recreate counter for the new table's primary key to
|
||||
# start where the *old* table left off (we have to do this because the
|
||||
# counter changed from an int to a bigint)
|
||||
cursor.execute(f'DROP SEQUENCE IF EXISTS "{tblname}_id_seq" CASCADE;')
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(
|
||||
f'ALTER TABLE "{tblname}" ALTER COLUMN "id" '
|
||||
f"SET DEFAULT nextval('{tblname}_id_seq');"
|
||||
)
|
||||
cursor.execute(
|
||||
f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);"
|
||||
)
|
||||
|
||||
# replace the BTREE index on main_jobevent.job_id with
|
||||
# a BRIN index to drastically improve per-UJ lookup performance
|
||||
# see: https://info.crunchydata.com/blog/postgresql-brin-indexes-big-data-performance-with-minimal-storage
|
||||
if tblname == 'main_jobevent':
|
||||
cursor.execute("SELECT indexname FROM pg_indexes WHERE tablename='main_jobevent' AND indexdef LIKE '%USING btree (job_id)';")
|
||||
old_index = cursor.fetchone()[0]
|
||||
cursor.execute(f'DROP INDEX {old_index}')
|
||||
cursor.execute('CREATE INDEX main_jobevent_job_id_brin_idx ON main_jobevent USING brin (job_id);')
|
||||
|
||||
# remove all of the indexes and constraints from the old table
|
||||
# (they just slow down the data migration)
|
||||
cursor.execute(f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename='_old_{tblname}' AND indexname != '{tblname}_pkey';")
|
||||
indexes = cursor.fetchall()
|
||||
|
||||
cursor.execute(f"SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = '_old_{tblname}'::regclass AND conname != '{tblname}_pkey';")
|
||||
constraints = cursor.fetchall()
|
||||
|
||||
for indexname, indexdef in indexes:
|
||||
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
|
||||
for conname, contype, condef in constraints:
|
||||
cursor.execute(f'ALTER TABLE _old_{tblname} DROP CONSTRAINT IF EXISTS {conname}')
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
# going to accomplish the migration with some custom raw SQL
|
||||
pass
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0112_v370_workflow_node_identifier'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='inventoryupdateevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='jobevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='projectupdateevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
FakeAlterField(
|
||||
model_name='systemjobevent',
|
||||
name='id',
|
||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,39 @@
|
||||
# Generated by Django 2.2.11 on 2020-04-03 00:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def remove_manual_inventory_sources(apps, schema_editor):
|
||||
'''Previously we would automatically create inventory sources after
|
||||
Group creation and we would use the parent Group as our interface for the user.
|
||||
During that process we would create InventorySource that had a source of "manual".
|
||||
'''
|
||||
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||
InventoryUpdate.objects.filter(source='').delete()
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
InventorySource.objects.filter(source='').delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0113_v370_event_bigint'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='deprecated_group',
|
||||
),
|
||||
migrations.RunPython(remove_manual_inventory_sources),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
]
|
||||
@@ -1,6 +1,9 @@
|
||||
import logging
|
||||
from time import time
|
||||
|
||||
from django.db.models import Subquery, OuterRef, F
|
||||
|
||||
from awx.main.fields import update_role_parentage_for_instance
|
||||
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
|
||||
|
||||
logger = logging.getLogger('rbac_migrations')
|
||||
@@ -10,11 +13,11 @@ def create_roles(apps, schema_editor):
|
||||
'''
|
||||
Implicit role creation happens in our post_save hook for all of our
|
||||
resources. Here we iterate through all of our resource types and call
|
||||
.save() to ensure all that happens for every object in the system before we
|
||||
get busy with the actual migration work.
|
||||
.save() to ensure all that happens for every object in the system.
|
||||
|
||||
This gets run after migrate_users, which does role creation for users a
|
||||
little differently.
|
||||
This can be used whenever new roles are introduced in a migration to
|
||||
create those roles for pre-existing objects that did not previously
|
||||
have them created via signals.
|
||||
'''
|
||||
|
||||
models = [
|
||||
@@ -35,7 +38,189 @@ def create_roles(apps, schema_editor):
|
||||
obj.save()
|
||||
|
||||
|
||||
def delete_all_user_roles(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||
Role = apps.get_model('main', "Role")
|
||||
User = apps.get_model('auth', "User")
|
||||
user_content_type = ContentType.objects.get_for_model(User)
|
||||
for role in Role.objects.filter(content_type=user_content_type).iterator():
|
||||
role.delete()
|
||||
|
||||
|
||||
UNIFIED_ORG_LOOKUPS = {
|
||||
# Job Templates had an implicit organization via their project
|
||||
'jobtemplate': 'project',
|
||||
# Inventory Sources had an implicit organization via their inventory
|
||||
'inventorysource': 'inventory',
|
||||
# Projects had an explicit organization in their subclass table
|
||||
'project': None,
|
||||
# Workflow JTs also had an explicit organization in their subclass table
|
||||
'workflowjobtemplate': None,
|
||||
# Jobs inherited project from job templates as a convenience field
|
||||
'job': 'project',
|
||||
# Inventory Sources had an convenience field of inventory
|
||||
'inventoryupdate': 'inventory',
|
||||
# Project Updates did not have a direct organization field, obtained it from project
|
||||
'projectupdate': 'project',
|
||||
# Workflow Jobs are handled same as project updates
|
||||
# Sliced jobs are a special case, but old data is not given special treatment for simplicity
|
||||
'workflowjob': 'workflow_job_template',
|
||||
# AdHocCommands do not have a template, but still migrate them
|
||||
'adhoccommand': 'inventory'
|
||||
}
|
||||
|
||||
|
||||
def implicit_org_subquery(UnifiedClass, cls, backward=False):
|
||||
"""Returns a subquery that returns the so-called organization for objects
|
||||
in the class in question, before migration to the explicit unified org field.
|
||||
In some cases, this can still be applied post-migration.
|
||||
"""
|
||||
if cls._meta.model_name not in UNIFIED_ORG_LOOKUPS:
|
||||
return None
|
||||
cls_name = cls._meta.model_name
|
||||
source_field = UNIFIED_ORG_LOOKUPS[cls_name]
|
||||
|
||||
unified_field = UnifiedClass._meta.get_field(cls_name)
|
||||
unified_ptr = unified_field.remote_field.name
|
||||
if backward:
|
||||
qs = UnifiedClass.objects.filter(**{cls_name: OuterRef('id')}).order_by().values_list('tmp_organization')[:1]
|
||||
elif source_field is None:
|
||||
qs = cls.objects.filter(**{unified_ptr: OuterRef('id')}).order_by().values_list('organization')[:1]
|
||||
else:
|
||||
intermediary_field = cls._meta.get_field(source_field)
|
||||
intermediary_model = intermediary_field.related_model
|
||||
intermediary_reverse_rel = intermediary_field.remote_field.name
|
||||
qs = intermediary_model.objects.filter(**{
|
||||
# this filter leverages the fact that the Unified models have same pk as subclasses.
|
||||
# For instance... filters projects used in job template, where that job template
|
||||
# has same id same as UJT from the outer reference (which it does)
|
||||
intermediary_reverse_rel: OuterRef('id')}
|
||||
).order_by().values_list('organization')[:1]
|
||||
return Subquery(qs)
|
||||
|
||||
|
||||
def _migrate_unified_organization(apps, unified_cls_name, backward=False):
|
||||
"""Given a unified base model (either UJT or UJ)
|
||||
and a dict org_field_mapping which gives related model to get org from
|
||||
saves organization for those objects to the temporary migration
|
||||
variable tmp_organization on the unified model
|
||||
(optimized method)
|
||||
"""
|
||||
start = time()
|
||||
UnifiedClass = apps.get_model('main', unified_cls_name)
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
|
||||
for cls in UnifiedClass.__subclasses__():
|
||||
cls_name = cls._meta.model_name
|
||||
if backward and UNIFIED_ORG_LOOKUPS.get(cls_name, 'not-found') is not None:
|
||||
logger.debug('Not reverse migrating {}, existing data should remain valid'.format(cls_name))
|
||||
continue
|
||||
logger.debug('{}Migrating {} to new organization field'.format('Reverse ' if backward else '', cls_name))
|
||||
|
||||
sub_qs = implicit_org_subquery(UnifiedClass, cls, backward=backward)
|
||||
if sub_qs is None:
|
||||
logger.debug('Class {} has no organization migration'.format(cls_name))
|
||||
continue
|
||||
|
||||
this_ct = ContentType.objects.get_for_model(cls)
|
||||
if backward:
|
||||
r = cls.objects.order_by().update(organization=sub_qs)
|
||||
else:
|
||||
r = UnifiedClass.objects.order_by().filter(polymorphic_ctype=this_ct).update(tmp_organization=sub_qs)
|
||||
if r:
|
||||
logger.info('Organization migration on {} affected {} rows.'.format(cls_name, r))
|
||||
logger.info('Unified organization migration completed in {:.4f} seconds'.format(time() - start))
|
||||
|
||||
|
||||
def migrate_ujt_organization(apps, schema_editor):
|
||||
'''Move organization field to UJT and UJ models'''
|
||||
_migrate_unified_organization(apps, 'UnifiedJobTemplate')
|
||||
_migrate_unified_organization(apps, 'UnifiedJob')
|
||||
|
||||
|
||||
def migrate_ujt_organization_backward(apps, schema_editor):
|
||||
'''Move organization field from UJT and UJ models back to their original places'''
|
||||
_migrate_unified_organization(apps, 'UnifiedJobTemplate', backward=True)
|
||||
_migrate_unified_organization(apps, 'UnifiedJob', backward=True)
|
||||
|
||||
|
||||
def _restore_inventory_admins(apps, schema_editor, backward=False):
|
||||
"""With the JT.organization changes, admins of organizations connected to
|
||||
job templates via inventory will have their permissions demoted.
|
||||
This maintains current permissions over the migration by granting the
|
||||
permissions they used to have explicitly on the JT itself.
|
||||
"""
|
||||
start = time()
|
||||
JobTemplate = apps.get_model('main', 'JobTemplate')
|
||||
User = apps.get_model('auth', 'User')
|
||||
changed_ct = 0
|
||||
jt_qs = JobTemplate.objects.filter(inventory__isnull=False)
|
||||
jt_qs = jt_qs.exclude(inventory__organization=F('project__organization'))
|
||||
jt_qs = jt_qs.only('id', 'admin_role_id', 'execute_role_id', 'inventory_id')
|
||||
for jt in jt_qs.iterator():
|
||||
org = jt.inventory.organization
|
||||
for jt_role, org_roles in (
|
||||
('admin_role', ('admin_role', 'job_template_admin_role',)),
|
||||
('execute_role', ('execute_role',))
|
||||
):
|
||||
role_id = getattr(jt, '{}_id'.format(jt_role))
|
||||
|
||||
user_qs = User.objects
|
||||
if not backward:
|
||||
# In this specific case, the name for the org role and JT roles were the same
|
||||
org_role_ids = [getattr(org, '{}_id'.format(role_name)) for role_name in org_roles]
|
||||
user_qs = user_qs.filter(roles__in=org_role_ids)
|
||||
# bizarre migration behavior - ancestors / descendents of
|
||||
# migration version of Role model is reversed, using current model briefly
|
||||
ancestor_ids = list(
|
||||
Role.objects.filter(descendents=role_id).values_list('id', flat=True)
|
||||
)
|
||||
# same as Role.__contains__, filter for "user in jt.admin_role"
|
||||
user_qs = user_qs.exclude(roles__in=ancestor_ids)
|
||||
else:
|
||||
# use the database to filter intersection of users without access
|
||||
# to the JT role and either organization role
|
||||
user_qs = user_qs.filter(roles__in=[org.admin_role_id, org.execute_role_id])
|
||||
# in reverse, intersection of users who have both
|
||||
user_qs = user_qs.filter(roles=role_id)
|
||||
|
||||
user_ids = list(user_qs.values_list('id', flat=True))
|
||||
if not user_ids:
|
||||
continue
|
||||
|
||||
role = getattr(jt, jt_role)
|
||||
logger.debug('{} {} on jt {} for users {} via inventory.organization {}'.format(
|
||||
'Removing' if backward else 'Setting',
|
||||
jt_role, jt.pk, user_ids, org.pk
|
||||
))
|
||||
if not backward:
|
||||
# in reverse, explit role becomes redundant
|
||||
role.members.add(*user_ids)
|
||||
else:
|
||||
role.members.remove(*user_ids)
|
||||
changed_ct += len(user_ids)
|
||||
|
||||
if changed_ct:
|
||||
logger.info('{} explicit JT permission for {} users in {:.4f} seconds'.format(
|
||||
'Removed' if backward else 'Added',
|
||||
changed_ct, time() - start
|
||||
))
|
||||
|
||||
|
||||
def restore_inventory_admins(apps, schema_editor):
|
||||
_restore_inventory_admins(apps, schema_editor)
|
||||
|
||||
|
||||
def restore_inventory_admins_backward(apps, schema_editor):
|
||||
_restore_inventory_admins(apps, schema_editor, backward=True)
|
||||
|
||||
|
||||
def rebuild_role_hierarchy(apps, schema_editor):
|
||||
'''
|
||||
This should be called in any migration when ownerships are changed.
|
||||
Ex. I remove a user from the admin_role of a credential.
|
||||
Ancestors are cached from parents for performance, this re-computes ancestors.
|
||||
'''
|
||||
logger.info('Computing role roots..')
|
||||
start = time()
|
||||
roots = Role.objects \
|
||||
@@ -46,14 +231,74 @@ def rebuild_role_hierarchy(apps, schema_editor):
|
||||
start = time()
|
||||
Role.rebuild_role_ancestor_list(roots, [])
|
||||
stop = time()
|
||||
logger.info('Rebuild completed in %f seconds' % (stop - start))
|
||||
logger.info('Rebuild ancestors completed in %f seconds' % (stop - start))
|
||||
logger.info('Done.')
|
||||
|
||||
|
||||
def delete_all_user_roles(apps, schema_editor):
|
||||
def rebuild_role_parentage(apps, schema_editor, models=None):
|
||||
'''
|
||||
This should be called in any migration when any parent_role entry
|
||||
is modified so that the cached parent fields will be updated. Ex:
|
||||
foo_role = ImplicitRoleField(
|
||||
parent_role=['bar_role'] # change to parent_role=['admin_role']
|
||||
)
|
||||
|
||||
This is like rebuild_role_hierarchy, but that method updates ancestors,
|
||||
whereas this method updates parents.
|
||||
'''
|
||||
start = time()
|
||||
seen_models = set()
|
||||
model_ct = 0
|
||||
noop_ct = 0
|
||||
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||
Role = apps.get_model('main', "Role")
|
||||
User = apps.get_model('auth', "User")
|
||||
user_content_type = ContentType.objects.get_for_model(User)
|
||||
for role in Role.objects.filter(content_type=user_content_type).iterator():
|
||||
role.delete()
|
||||
additions = set()
|
||||
removals = set()
|
||||
|
||||
role_qs = Role.objects
|
||||
if models:
|
||||
# update_role_parentage_for_instance is expensive
|
||||
# if the models have been downselected, ignore those which are not in the list
|
||||
ct_ids = list(ContentType.objects.filter(
|
||||
model__in=[name.lower() for name in models]
|
||||
).values_list('id', flat=True))
|
||||
role_qs = role_qs.filter(content_type__in=ct_ids)
|
||||
|
||||
for role in role_qs.iterator():
|
||||
if not role.object_id:
|
||||
continue
|
||||
model_tuple = (role.content_type_id, role.object_id)
|
||||
if model_tuple in seen_models:
|
||||
continue
|
||||
seen_models.add(model_tuple)
|
||||
|
||||
# The GenericForeignKey does not work right in migrations
|
||||
# with the usage as role.content_object
|
||||
# so we do the lookup ourselves with current migration models
|
||||
ct = role.content_type
|
||||
app = ct.app_label
|
||||
ct_model = apps.get_model(app, ct.model)
|
||||
content_object = ct_model.objects.get(pk=role.object_id)
|
||||
|
||||
parents_added, parents_removed = update_role_parentage_for_instance(content_object)
|
||||
additions.update(parents_added)
|
||||
removals.update(parents_removed)
|
||||
if parents_added:
|
||||
model_ct += 1
|
||||
logger.debug('Added to parents of roles {} of {}'.format(parents_added, content_object))
|
||||
if parents_removed:
|
||||
model_ct += 1
|
||||
logger.debug('Removed from parents of roles {} of {}'.format(parents_removed, content_object))
|
||||
else:
|
||||
noop_ct += 1
|
||||
|
||||
logger.debug('No changes to role parents for {} resources'.format(noop_ct))
|
||||
logger.debug('Added parents to {} roles'.format(len(additions)))
|
||||
logger.debug('Removed parents from {} roles'.format(len(removals)))
|
||||
if model_ct:
|
||||
logger.info('Updated implicit parents of {} resources'.format(model_ct))
|
||||
|
||||
logger.info('Rebuild parentage completed in %f seconds' % (time() - start))
|
||||
|
||||
# this is ran because the ordinary signals for
|
||||
# Role.parents.add and Role.parents.remove not called in migration
|
||||
Role.rebuild_role_ancestor_list(list(additions), list(removals))
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Django
|
||||
from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
@@ -58,7 +59,6 @@ from awx.main.models.workflow import ( # noqa
|
||||
WorkflowJob, WorkflowJobNode, WorkflowJobOptions, WorkflowJobTemplate,
|
||||
WorkflowJobTemplateNode, WorkflowApproval, WorkflowApprovalTemplate,
|
||||
)
|
||||
from awx.main.models.channels import ChannelGroup # noqa
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.oauth import ( # noqa
|
||||
OAuth2AccessToken, OAuth2Application
|
||||
@@ -80,6 +80,26 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def enforce_bigint_pk_migration():
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
# look at all the event tables and verify that they have been fully migrated
|
||||
# from the *old* int primary key table to the replacement bigint table
|
||||
# if not, attempt to migrate them in the background
|
||||
for tblname in (
|
||||
'main_jobevent', 'main_inventoryupdateevent',
|
||||
'main_projectupdateevent', 'main_adhoccommandevent',
|
||||
'main_systemjobevent'
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
'SELECT 1 FROM information_schema.tables WHERE table_name=%s',
|
||||
(f'_old_{tblname}',)
|
||||
)
|
||||
if bool(cursor.rowcount):
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
|
||||
|
||||
def cleanup_created_modified_by(sender, **kwargs):
|
||||
# work around a bug in django-polymorphic that doesn't properly
|
||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
from django.db import models
|
||||
|
||||
|
||||
class ChannelGroup(models.Model):
|
||||
group = models.CharField(max_length=200, unique=True)
|
||||
channels = models.TextField()
|
||||
@@ -4,7 +4,7 @@ import datetime
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.db import models, DatabaseError
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
@@ -356,6 +356,14 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
job_id=self.job_id, uuid__in=failed
|
||||
).update(failed=True)
|
||||
|
||||
# send success/failure notifications when we've finished handling the playbook_on_stats event
|
||||
from awx.main.tasks import handle_success_and_failure_notifications # circular import
|
||||
|
||||
def _send_notifications():
|
||||
handle_success_and_failure_notifications.apply_async([self.job.id])
|
||||
connection.on_commit(_send_notifications)
|
||||
|
||||
|
||||
for field in ('playbook', 'play', 'task', 'role'):
|
||||
value = force_text(event_data.get(field, '')).strip()
|
||||
if value != getattr(self, field):
|
||||
@@ -430,6 +438,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
('job', 'parent_uuid'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
job = models.ForeignKey(
|
||||
'Job',
|
||||
related_name='job_events',
|
||||
@@ -518,6 +527,7 @@ class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
('project_update', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
project_update = models.ForeignKey(
|
||||
'ProjectUpdate',
|
||||
related_name='project_update_events',
|
||||
@@ -669,6 +679,7 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
FAILED_EVENTS = [x[0] for x in EVENT_TYPES if x[2]]
|
||||
EVENT_CHOICES = [(x[0], x[1]) for x in EVENT_TYPES]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
event = models.CharField(
|
||||
max_length=100,
|
||||
choices=EVENT_CHOICES,
|
||||
@@ -731,6 +742,7 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
||||
('inventory_update', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
inventory_update = models.ForeignKey(
|
||||
'InventoryUpdate',
|
||||
related_name='inventory_update_events',
|
||||
@@ -764,6 +776,7 @@ class SystemJobEvent(BaseCommandEvent):
|
||||
('system_job', 'end_line'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
system_job = models.ForeignKey(
|
||||
'SystemJob',
|
||||
related_name='system_job_events',
|
||||
|
||||
@@ -53,6 +53,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
|
||||
uuid = models.CharField(max_length=40)
|
||||
hostname = models.CharField(max_length=250, unique=True)
|
||||
ip_address = models.CharField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
max_length=50,
|
||||
unique=True,
|
||||
)
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
last_isolated_check = models.DateTimeField(
|
||||
|
||||
@@ -426,9 +426,9 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___inventory=self) |
|
||||
Q(InventoryUpdate___inventory_source__inventory=self) |
|
||||
Q(AdHocCommand___inventory=self)
|
||||
Q(job__inventory=self) |
|
||||
Q(inventoryupdate__inventory=self) |
|
||||
Q(adhoccommand__inventory=self)
|
||||
)
|
||||
|
||||
|
||||
@@ -808,8 +808,8 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___inventory=self.inventory) |
|
||||
Q(InventoryUpdate___inventory_source__groups=self)
|
||||
Q(job__inventory=self.inventory) |
|
||||
Q(inventoryupdate__inventory_source__groups=self)
|
||||
)
|
||||
|
||||
|
||||
@@ -821,7 +821,6 @@ class InventorySourceOptions(BaseModel):
|
||||
injectors = dict()
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('', _('Manual')),
|
||||
('file', _('File, Directory or Script')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
@@ -932,8 +931,8 @@ class InventorySourceOptions(BaseModel):
|
||||
source = models.CharField(
|
||||
max_length=32,
|
||||
choices=SOURCE_CHOICES,
|
||||
blank=True,
|
||||
default='',
|
||||
blank=False,
|
||||
default=None,
|
||||
)
|
||||
source_path = models.CharField(
|
||||
max_length=1024,
|
||||
@@ -1237,14 +1236,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
|
||||
deprecated_group = models.OneToOneField(
|
||||
'Group',
|
||||
related_name='deprecated_inventory_source',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
|
||||
source_project = models.ForeignKey(
|
||||
'Project',
|
||||
related_name='scm_inventory_sources',
|
||||
@@ -1277,10 +1268,14 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in InventorySourceOptions._meta.fields) | set(
|
||||
['name', 'description', 'credentials', 'inventory']
|
||||
['name', 'description', 'organization', 'credentials', 'inventory']
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# if this is a new object, inherit organization from its inventory
|
||||
if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@@ -1410,16 +1405,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
started=list(started_notification_templates),
|
||||
success=list(success_notification_templates))
|
||||
|
||||
def clean_source(self): # TODO: remove in 3.3
|
||||
source = self.source
|
||||
if source and self.deprecated_group:
|
||||
qs = self.deprecated_group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
existing_sources = qs.exclude(pk=self.pk)
|
||||
if existing_sources.count():
|
||||
s = u', '.join([x.deprecated_group.name for x in existing_sources])
|
||||
raise ValidationError(_('Unable to configure this item for cloud sync. It is already managed by %s.') % s)
|
||||
return source
|
||||
|
||||
def clean_update_on_project_update(self):
|
||||
if self.update_on_project_update is True and \
|
||||
self.source == 'scm' and \
|
||||
@@ -1508,8 +1493,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
if self.inventory_source.inventory is not None:
|
||||
websocket_data.update(dict(inventory_id=self.inventory_source.inventory.pk))
|
||||
|
||||
if self.inventory_source.deprecated_group is not None: # TODO: remove in 3.3
|
||||
websocket_data.update(dict(group_id=self.inventory_source.deprecated_group.id))
|
||||
return websocket_data
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
|
||||
@@ -199,7 +199,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
'labels', 'instance_groups', 'credentials', 'survey_spec'
|
||||
]
|
||||
FIELDS_TO_DISCARD_AT_COPY = ['vault_credential', 'credential']
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
@@ -262,13 +262,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
|
||||
parent_role=['organization.job_template_admin_role']
|
||||
)
|
||||
execute_role = ImplicitRoleField(
|
||||
parent_role=['admin_role', 'project.organization.execute_role', 'inventory.organization.execute_role'],
|
||||
parent_role=['admin_role', 'organization.execute_role'],
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'],
|
||||
parent_role=[
|
||||
'organization.auditor_role',
|
||||
'inventory.organization.auditor_role', # partial support for old inheritance via inventory
|
||||
'execute_role', 'admin_role'
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -279,7 +283,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'survey_passwords', 'labels', 'credentials',
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials',
|
||||
'job_slice_number', 'job_slice_count']
|
||||
)
|
||||
|
||||
@@ -319,6 +323,41 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
else:
|
||||
return self.job_slice_count
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
# if project is deleted for some reason, then keep the old organization
|
||||
# to retain ownership for organization admins
|
||||
if self.project and self.project.organization_id != self.organization_id:
|
||||
self.organization_id = self.project.organization_id
|
||||
if 'organization' not in update_fields and 'organization_id' not in update_fields:
|
||||
update_fields.append('organization_id')
|
||||
return super(JobTemplate, self).save(*args, **kwargs)
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append(
|
||||
'%s with this (%s) combination already exists.' % (
|
||||
JobTemplate.__name__,
|
||||
', '.join(set(ut) - {'polymorphic_ctype'})
|
||||
)
|
||||
)
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_ct = self.get_effective_slice_ct(kwargs)
|
||||
@@ -479,13 +518,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
success_notification_templates = list(base_notification_templates.filter(
|
||||
unifiedjobtemplate_notification_templates_for_success__in=[self, self.project]))
|
||||
# Get Organization NotificationTemplates
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
if self.organization is not None:
|
||||
error_notification_templates = set(error_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_errors=self.project.organization)))
|
||||
organization_notification_templates_for_errors=self.organization)))
|
||||
started_notification_templates = set(started_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_started=self.project.organization)))
|
||||
organization_notification_templates_for_started=self.organization)))
|
||||
success_notification_templates = set(success_notification_templates + list(base_notification_templates.filter(
|
||||
organization_notification_templates_for_success=self.project.organization)))
|
||||
organization_notification_templates_for_success=self.organization)))
|
||||
return dict(error=list(error_notification_templates),
|
||||
started=list(started_notification_templates),
|
||||
success=list(success_notification_templates))
|
||||
@@ -588,7 +627,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
for virtualenv in (
|
||||
self.job_template.custom_virtualenv if self.job_template else None,
|
||||
self.project.custom_virtualenv,
|
||||
self.project.organization.custom_virtualenv if self.project.organization else None
|
||||
self.organization.custom_virtualenv if self.organization else None
|
||||
):
|
||||
if virtualenv:
|
||||
return virtualenv
|
||||
@@ -741,8 +780,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
organization_groups = [x for x in self.project.organization.instance_groups.all()]
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
organization_groups = []
|
||||
if self.inventory is not None:
|
||||
@@ -829,8 +868,10 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
ansible_local_system_id = ansible_facts.get('ansible_local', {}).get('insights', {}).get('system_id', None)
|
||||
ansible_facts_system_id = ansible_facts.get('insights', {}).get('system_id', None)
|
||||
ansible_local = ansible_facts.get('ansible_local', {}).get('insights', {})
|
||||
ansible_facts = ansible_facts.get('insights', {})
|
||||
ansible_local_system_id = ansible_local.get('system_id', None) if isinstance(ansible_local, dict) else None
|
||||
ansible_facts_system_id = ansible_facts.get('system_id', None) if isinstance(ansible_facts, dict) else None
|
||||
if ansible_local_system_id:
|
||||
print("Setting local {}".format(ansible_local_system_id))
|
||||
logger.debug("Insights system_id {} found for host <{}, {}> in"
|
||||
@@ -1142,7 +1183,7 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'job_type', 'extra_vars']
|
||||
return ['name', 'description', 'organization', 'job_type', 'extra_vars']
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:system_job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -269,22 +269,20 @@ class JobNotificationMixin(object):
|
||||
'timeout', 'use_fact_cache', 'launch_type', 'status', 'failed', 'started', 'finished',
|
||||
'elapsed', 'job_explanation', 'execution_node', 'controller_node', 'allow_simultaneous',
|
||||
'scm_revision', 'diff_mode', 'job_slice_number', 'job_slice_count', 'custom_virtualenv',
|
||||
'approval_status', 'approval_node_name', 'workflow_url',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failures', 'dark']},
|
||||
{'playbook_counts': ['play_count', 'task_count']},
|
||||
'approval_status', 'approval_node_name', 'workflow_url', 'scm_branch',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark'
|
||||
'processed', 'rescued', 'ignored']},
|
||||
{'summary_fields': [{'inventory': ['id', 'name', 'description', 'has_active_failures',
|
||||
'total_hosts', 'hosts_with_active_failures', 'total_groups',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources', 'inventory_sources_with_failures',
|
||||
'organization_id', 'kind']},
|
||||
{'project': ['id', 'name', 'description', 'status', 'scm_type']},
|
||||
{'project_update': ['id', 'name', 'description', 'status', 'failed']},
|
||||
{'job_template': ['id', 'name', 'description']},
|
||||
{'unified_job_template': ['id', 'name', 'description', 'unified_job_type']},
|
||||
{'instance_group': ['name', 'id']},
|
||||
{'created_by': ['id', 'username', 'first_name', 'last_name']},
|
||||
{'labels': ['count', 'results']},
|
||||
{'source_workflow_job': ['description', 'elapsed', 'failed', 'id', 'name', 'status']}]}]
|
||||
{'labels': ['count', 'results']}]}]
|
||||
|
||||
@classmethod
|
||||
def context_stub(cls):
|
||||
@@ -303,7 +301,7 @@ class JobNotificationMixin(object):
|
||||
'finished': False,
|
||||
'force_handlers': False,
|
||||
'forks': 0,
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0},
|
||||
'host_status_counts': {'skipped': 1, 'ok': 5, 'changed': 3, 'failures': 0, 'dark': 0, 'failed': False, 'processed': 0, 'rescued': 0},
|
||||
'id': 42,
|
||||
'job_explanation': 'Sample job explanation',
|
||||
'job_slice_count': 1,
|
||||
@@ -314,8 +312,8 @@ class JobNotificationMixin(object):
|
||||
'limit': 'bar_limit',
|
||||
'modified': datetime.datetime(2018, 12, 13, 6, 4, 0, 0, tzinfo=datetime.timezone.utc),
|
||||
'name': 'Stub JobTemplate',
|
||||
'playbook_counts': {'play_count': 5, 'task_count': 10},
|
||||
'playbook': 'ping.yml',
|
||||
'scm_branch': '',
|
||||
'scm_revision': '',
|
||||
'skip_tags': '',
|
||||
'start_at_task': '',
|
||||
@@ -347,18 +345,10 @@ class JobNotificationMixin(object):
|
||||
'name': 'Stub project',
|
||||
'scm_type': 'git',
|
||||
'status': 'successful'},
|
||||
'project_update': {'id': 5, 'name': 'Stub Project Update', 'description': 'Project Update',
|
||||
'status': 'running', 'failed': False},
|
||||
'unified_job_template': {'description': 'Sample unified job template description',
|
||||
'id': 39,
|
||||
'name': 'Stub Job Template',
|
||||
'unified_job_type': 'job'},
|
||||
'source_workflow_job': {'description': 'Sample workflow job description',
|
||||
'elapsed': 0.000,
|
||||
'failed': False,
|
||||
'id': 88,
|
||||
'name': 'Stub WorkflowJobTemplate',
|
||||
'status': 'running'}},
|
||||
'unified_job_type': 'job'}},
|
||||
'timeout': 0,
|
||||
'type': 'job',
|
||||
'url': '/api/v2/jobs/13/',
|
||||
@@ -392,10 +382,20 @@ class JobNotificationMixin(object):
|
||||
The context will contain whitelisted content retrieved from a serialized job object
|
||||
(see JobNotificationMixin.JOB_FIELDS_WHITELIST), the job's friendly name,
|
||||
and a url to the job run."""
|
||||
context = {'job': {},
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)}
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
if hasattr(self, 'job_host_summaries'):
|
||||
summary = self.job_host_summaries.first()
|
||||
if summary:
|
||||
from awx.api.serializers import JobHostSummarySerializer
|
||||
summary_data = JobHostSummarySerializer(summary).to_representation(summary)
|
||||
job_context['host_status_counts'] = summary_data
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
'url': self.get_ui_url(),
|
||||
'job_metadata': json.dumps(self.notification_data(), indent=4)
|
||||
}
|
||||
|
||||
def build_context(node, fields, whitelisted_fields):
|
||||
for safe_field in whitelisted_fields:
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now as tz_now
|
||||
@@ -106,12 +105,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
project_ids = self.projects.all().values_list('id')
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
Q(Job___project__in=project_ids) |
|
||||
Q(ProjectUpdate___project__in=project_ids) |
|
||||
Q(InventoryUpdate___inventory_source__inventory__organization=self)
|
||||
)
|
||||
return UnifiedJob.objects.non_polymorphic().filter(organization=self)
|
||||
|
||||
|
||||
class Team(CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
@@ -254,13 +254,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
app_label = 'main'
|
||||
ordering = ('id',)
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
related_name='projects',
|
||||
)
|
||||
scm_update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Update the project when a job is launched that uses the project.'),
|
||||
@@ -329,9 +322,16 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in ProjectOptions._meta.fields) | set(
|
||||
['name', 'description']
|
||||
['name', 'description', 'organization']
|
||||
)
|
||||
|
||||
def clean_organization(self):
|
||||
if self.pk:
|
||||
old_org_id = getattr(self, '_prior_values_store', {}).get('organization_id', None)
|
||||
if self.organization_id != old_org_id and self.jobtemplates.exists():
|
||||
raise ValidationError({'organization': _('Organization cannot be changed when in use by job templates.')})
|
||||
return self.organization
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
new_instance = not bool(self.pk)
|
||||
pre_save_vals = getattr(self, '_prior_values_store', {})
|
||||
@@ -450,8 +450,8 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return UnifiedJob.objects.non_polymorphic().filter(
|
||||
models.Q(Job___project=self) |
|
||||
models.Q(ProjectUpdate___project=self)
|
||||
models.Q(job__project=self) |
|
||||
models.Q(projectupdate__project=self)
|
||||
)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
@@ -584,8 +584,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
if self.project is not None and self.project.organization is not None:
|
||||
organization_groups = [x for x in self.project.organization.instance_groups.all()]
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
organization_groups = []
|
||||
template_groups = [x for x in super(ProjectUpdate, self).preferred_instance_groups]
|
||||
|
||||
@@ -191,7 +191,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
return rrule
|
||||
|
||||
@classmethod
|
||||
def rrulestr(cls, rrule, **kwargs):
|
||||
def rrulestr(cls, rrule, fast_forward=True, **kwargs):
|
||||
"""
|
||||
Apply our own custom rrule parsing requirements
|
||||
"""
|
||||
@@ -205,11 +205,17 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
'A valid TZID must be provided (e.g., America/New_York)'
|
||||
)
|
||||
|
||||
if 'MINUTELY' in rrule or 'HOURLY' in rrule:
|
||||
if fast_forward and ('MINUTELY' in rrule or 'HOURLY' in rrule):
|
||||
try:
|
||||
first_event = x[0]
|
||||
if first_event < now() - datetime.timedelta(days=365 * 5):
|
||||
raise ValueError('RRULE values with more than 1000 events are not allowed.')
|
||||
if first_event < now():
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
dtstart = x._rrule[0]._dtstart.strftime(':%Y%m%dT')
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime(':%Y%m%dT')
|
||||
new_rrule = rrule.replace(dtstart, new_start)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
except IndexError:
|
||||
pass
|
||||
return x
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Python
|
||||
from io import StringIO
|
||||
import datetime
|
||||
import codecs
|
||||
import json
|
||||
import logging
|
||||
@@ -35,6 +36,7 @@ from awx.main.models.base import (
|
||||
NotificationFieldsModel,
|
||||
prevent_search
|
||||
)
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
@@ -101,7 +103,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
ordering = ('name',)
|
||||
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
||||
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
#unique_together = [('polymorphic_ctype', 'name')]
|
||||
#unique_together = [('polymorphic_ctype', 'name', 'organization')]
|
||||
|
||||
old_pk = models.PositiveIntegerField(
|
||||
null=True,
|
||||
@@ -156,6 +158,14 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
default='ok',
|
||||
editable=False,
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The organization used to determine access to this template.'),
|
||||
)
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
@@ -697,7 +707,15 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
help_text=_('The Rampart/Instance group the job was run under'),
|
||||
help_text=_('The Instance group the job was run under'),
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The organization used to determine access to this unified job.'),
|
||||
)
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
@@ -1218,12 +1236,17 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
status_data['instance_group_name'] = self.instance_group.name
|
||||
else:
|
||||
status_data['instance_group_name'] = None
|
||||
elif status in ['successful', 'failed', 'canceled'] and self.finished:
|
||||
status_data['finished'] = datetime.datetime.strftime(self.finished, "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
status_data.update(self.websocket_emit_data())
|
||||
status_data['group_name'] = 'jobs'
|
||||
if getattr(self, 'unified_job_template_id', None):
|
||||
status_data['unified_job_template_id'] = self.unified_job_template_id
|
||||
emit_channel_notification('jobs-status_changed', status_data)
|
||||
|
||||
if self.spawned_by_workflow:
|
||||
status_data['group_name'] = "workflow_events"
|
||||
status_data['workflow_job_template_id'] = self.unified_job_template.id
|
||||
emit_channel_notification('workflow_events-' + str(self.workflow_job_id), status_data)
|
||||
except IOError: # includes socket errors
|
||||
logger.exception('%s failed to emit channel msg about status change', self.log_format)
|
||||
@@ -1338,7 +1361,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
timeout = 5
|
||||
try:
|
||||
running = self.celery_task_id in ControlDispatcher(
|
||||
'dispatcher', self.execution_node
|
||||
'dispatcher', self.controller_node or self.execution_node
|
||||
).running(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('could not reach dispatcher on {} within {}s'.format(
|
||||
@@ -1444,7 +1467,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
|
||||
def is_isolated(self):
|
||||
return bool(self.controller_node)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
from uuid import uuid4
|
||||
from copy import copy
|
||||
from urllib.parse import urljoin
|
||||
|
||||
@@ -121,6 +122,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
create_kwargs[field_name] = kwargs[field_name]
|
||||
elif hasattr(self, field_name):
|
||||
create_kwargs[field_name] = getattr(self, field_name)
|
||||
create_kwargs['identifier'] = self.identifier
|
||||
new_node = WorkflowJobNode.objects.create(**create_kwargs)
|
||||
if self.pk:
|
||||
allowed_creds = self.credentials.all()
|
||||
@@ -135,7 +137,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'unified_job_template', 'workflow_job_template', 'success_nodes', 'failure_nodes',
|
||||
'always_nodes', 'credentials', 'inventory', 'extra_data', 'survey_passwords',
|
||||
'char_prompts', 'all_parents_must_converge'
|
||||
'char_prompts', 'all_parents_must_converge', 'identifier'
|
||||
]
|
||||
REENCRYPTION_BLACKLIST_AT_COPY = ['extra_data', 'survey_passwords']
|
||||
|
||||
@@ -144,6 +146,21 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
related_name='workflow_job_template_nodes',
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
identifier = models.CharField(
|
||||
max_length=512,
|
||||
default=uuid4,
|
||||
blank=False,
|
||||
help_text=_(
|
||||
'An identifier for this node that is unique within its workflow. '
|
||||
'It is copied to workflow job nodes corresponding to this node.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
unique_together = (("identifier", "workflow_job_template"),)
|
||||
indexes = [
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_template_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -213,6 +230,18 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
"semantics will mark this True if the node is in a path that will "
|
||||
"decidedly not be ran. A value of False means the node may not run."),
|
||||
)
|
||||
identifier = models.CharField(
|
||||
max_length=512,
|
||||
blank=True, # blank denotes pre-migration job nodes
|
||||
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
indexes = [
|
||||
models.Index(fields=["identifier", "workflow_job"]),
|
||||
models.Index(fields=['identifier']),
|
||||
]
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -335,7 +364,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'survey_passwords', 'labels', 'limit', 'scm_branch']
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch']
|
||||
)
|
||||
r.remove('char_prompts') # needed due to copying launch config to launch config
|
||||
return r
|
||||
@@ -376,19 +405,12 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
|
||||
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
|
||||
FIELDS_TO_PRESERVE_AT_COPY = [
|
||||
'labels', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'
|
||||
'labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'
|
||||
]
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='workflows',
|
||||
)
|
||||
ask_inventory_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
@@ -749,6 +771,8 @@ class WorkflowApproval(UnifiedJob, JobNotificationMixin):
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
can_start = super(WorkflowApproval, self).signal_start(**kwargs)
|
||||
self.started = self.created
|
||||
self.save(update_fields=['started'])
|
||||
self.send_approval_notification('running')
|
||||
return can_start
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
import dateutil.parser as dp
|
||||
@@ -23,6 +24,33 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
recipient_parameter = "grafana_url"
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {
|
||||
"started": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"success": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"error": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"workflow_approval": {
|
||||
"running": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG, "body": None
|
||||
},
|
||||
"approved": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG, "body": None
|
||||
},
|
||||
"timed_out": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None
|
||||
},
|
||||
"denied": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG, "body": None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
@@ -34,6 +62,13 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
self.isRegion = isRegion
|
||||
|
||||
def format_body(self, body):
|
||||
# expect body to be a string representing a dict
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
if isinstance(potential_body, dict):
|
||||
body = potential_body
|
||||
except json.JSONDecodeError:
|
||||
body = {}
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
@@ -41,18 +76,21 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
for m in messages:
|
||||
grafana_data = {}
|
||||
grafana_headers = {}
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if 'started' in m.body:
|
||||
try:
|
||||
epoch=datetime.datetime.utcfromtimestamp(0)
|
||||
grafana_data['time'] = grafana_data['timeEnd'] = int((dp.parse(m.body['started']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
if m.body.get('finished'):
|
||||
grafana_data['timeEnd'] = int((dp.parse(m.body['finished']).replace(tzinfo=None) - epoch).total_seconds() * 1000)
|
||||
except ValueError:
|
||||
logger.error(smart_text(_("Error converting time {} or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error converting time {} and/or timeEnd {} to int.").format(m.body['started'],m.body['finished'])))
|
||||
grafana_data['isRegion'] = self.isRegion
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
grafana_data['panelId'] = self.panelId
|
||||
grafana_data['tags'] = self.annotation_tags
|
||||
if self.annotation_tags:
|
||||
grafana_data['tags'] = self.annotation_tags
|
||||
grafana_data['text'] = m.subject
|
||||
grafana_headers['Authorization'] = "Bearer {}".format(self.grafana_key)
|
||||
grafana_headers['Content-Type'] = "application/json"
|
||||
|
||||
@@ -4,15 +4,11 @@
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import redis
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
# Kombu
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from kombu import Exchange, Producer
|
||||
from kombu.serialization import registry
|
||||
|
||||
__all__ = ['CallbackQueueDispatcher']
|
||||
|
||||
@@ -28,47 +24,12 @@ class AnsibleJSONEncoder(json.JSONEncoder):
|
||||
return super(AnsibleJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
registry.register(
|
||||
'json-ansible',
|
||||
lambda obj: json.dumps(obj, cls=AnsibleJSONEncoder),
|
||||
lambda obj: json.loads(obj),
|
||||
content_type='application/json',
|
||||
content_encoding='utf-8'
|
||||
)
|
||||
|
||||
|
||||
class CallbackQueueDispatcher(object):
|
||||
|
||||
def __init__(self):
|
||||
self.callback_connection = getattr(settings, 'BROKER_URL', None)
|
||||
self.connection_queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.connection = None
|
||||
self.exchange = None
|
||||
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
|
||||
self.connection = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
def dispatch(self, obj):
|
||||
if not self.callback_connection or not self.connection_queue:
|
||||
return
|
||||
active_pid = os.getpid()
|
||||
for retry_count in range(4):
|
||||
try:
|
||||
if not hasattr(self, 'connection_pid'):
|
||||
self.connection_pid = active_pid
|
||||
if self.connection_pid != active_pid:
|
||||
self.connection = None
|
||||
if self.connection is None:
|
||||
self.connection = Connection(self.callback_connection)
|
||||
self.exchange = Exchange(self.connection_queue, type='direct')
|
||||
|
||||
producer = Producer(self.connection)
|
||||
producer.publish(obj,
|
||||
serializer='json-ansible',
|
||||
compression='bzip2',
|
||||
exchange=self.exchange,
|
||||
declare=[self.exchange],
|
||||
delivery_mode="persistent" if settings.PERSISTENT_CALLBACK_MESSAGES else "transient",
|
||||
routing_key=self.connection_queue)
|
||||
return
|
||||
except Exception as e:
|
||||
self.logger.info('Publish Job Event Exception: %r, retry=%d', e,
|
||||
retry_count, exc_info=True)
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
|
||||
@@ -8,7 +8,7 @@ REPLACE_STR = '$encrypted$'
|
||||
|
||||
class UriCleaner(object):
|
||||
REPLACE_STR = REPLACE_STR
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w+:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
SENSITIVE_URI_PATTERN = re.compile(r'(\w{1,20}:(\/?\/?)[^\s]+)', re.MULTILINE) # NOQA
|
||||
|
||||
@staticmethod
|
||||
def remove_sensitive(cleartext):
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
from channels.routing import route
|
||||
from django.conf.urls import url
|
||||
from channels.auth import AuthMiddlewareStack
|
||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||
from . import consumers
|
||||
|
||||
|
||||
channel_routing = [
|
||||
route("websocket.connect", "awx.main.consumers.ws_connect", path=r'^/websocket/$'),
|
||||
route("websocket.disconnect", "awx.main.consumers.ws_disconnect", path=r'^/websocket/$'),
|
||||
route("websocket.receive", "awx.main.consumers.ws_receive", path=r'^/websocket/$'),
|
||||
websocket_urlpatterns = [
|
||||
url(r'websocket/$', consumers.EventConsumer),
|
||||
url(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
]
|
||||
|
||||
application = ProtocolTypeRouter({
|
||||
'websocket': AuthMiddlewareStack(
|
||||
URLRouter(websocket_urlpatterns)
|
||||
),
|
||||
})
|
||||
|
||||
@@ -83,8 +83,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
elif p.job and p.job.status == "failed":
|
||||
status = "failure_nodes"
|
||||
#check that the nodes status matches either a pathway of the same status or is an always path.
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)]
|
||||
and p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
if (p not in [node['node_object'] for node in self.get_parents(obj, status)] and
|
||||
p not in [node['node_object'] for node in self.get_parents(obj, "always_nodes")]):
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -102,13 +102,13 @@ class WorkflowDAG(SimpleDAG):
|
||||
elif obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_children(obj, 'success_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_children(obj, 'failure_nodes') +
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
self.get_children(obj, 'always_nodes'))
|
||||
else:
|
||||
# This catches root nodes or ANY convergence nodes
|
||||
if not obj.all_parents_must_converge and self._are_relevant_parents_finished(n):
|
||||
@@ -167,8 +167,8 @@ class WorkflowDAG(SimpleDAG):
|
||||
failed_path_nodes_id_status.append((str(obj.id), obj.job.status))
|
||||
|
||||
if res is True:
|
||||
s = _("No error handle path for workflow job node(s) [{node_status}] workflow job "
|
||||
"node(s) missing unified job template and error handle path [{no_ufjt}].")
|
||||
s = _("No error handling path for workflow job node(s) [{node_status}]. Workflow job "
|
||||
"node(s) missing unified job template and error handling path [{no_ufjt}].")
|
||||
parms = {
|
||||
'node_status': '',
|
||||
'no_ufjt': '',
|
||||
@@ -231,9 +231,9 @@ class WorkflowDAG(SimpleDAG):
|
||||
|
||||
|
||||
r'''
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
determine if the current node is a convergence node by checking if all the
|
||||
parents are finished then checking to see if all parents meet the needed
|
||||
path criteria to run the convergence child.
|
||||
(i.e. parent must fail, parent must succeed, etc. to proceed)
|
||||
|
||||
Return a list object
|
||||
|
||||
@@ -5,11 +5,12 @@ import logging
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def run_task_manager():
|
||||
logger.debug("Running Tower task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -6,7 +6,6 @@ import contextlib
|
||||
import logging
|
||||
import threading
|
||||
import json
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
# Django
|
||||
@@ -53,6 +52,7 @@ from awx.conf.utils import conf_to_dict
|
||||
__all__ = []
|
||||
|
||||
logger = logging.getLogger('awx.main.signals')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
|
||||
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
|
||||
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
|
||||
@@ -157,17 +157,26 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
||||
|
||||
def save_related_job_templates(sender, instance, **kwargs):
|
||||
'''save_related_job_templates loops through all of the
|
||||
job templates that use an Inventory or Project that have had their
|
||||
job templates that use an Inventory that have had their
|
||||
Organization updated. This triggers the rebuilding of the RBAC hierarchy
|
||||
and ensures the proper access restrictions.
|
||||
'''
|
||||
if sender not in (Project, Inventory):
|
||||
if sender is not Inventory:
|
||||
raise ValueError('This signal callback is only intended for use with Project or Inventory')
|
||||
|
||||
update_fields = kwargs.get('update_fields', None)
|
||||
if ((update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or
|
||||
kwargs.get('created', False)):
|
||||
return
|
||||
|
||||
if instance._prior_values_store.get('organization_id') != instance.organization_id:
|
||||
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
|
||||
for jt in jtq:
|
||||
update_role_parentage_for_instance(jt)
|
||||
parents_added, parents_removed = update_role_parentage_for_instance(jt)
|
||||
if parents_added or parents_removed:
|
||||
logger.info('Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
|
||||
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
|
||||
))
|
||||
|
||||
|
||||
def connect_computed_field_signals():
|
||||
@@ -183,7 +192,6 @@ def connect_computed_field_signals():
|
||||
|
||||
connect_computed_field_signals()
|
||||
|
||||
post_save.connect(save_related_job_templates, sender=Project)
|
||||
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||
@@ -356,12 +364,24 @@ def model_serializer_mapping():
|
||||
}
|
||||
|
||||
|
||||
def emit_activity_stream_change(instance):
|
||||
if 'migrate' in sys.argv:
|
||||
# don't emit activity stream external logs during migrations, it
|
||||
# could be really noisy
|
||||
return
|
||||
from awx.api.serializers import ActivityStreamSerializer
|
||||
actor = None
|
||||
if instance.actor:
|
||||
actor = instance.actor.username
|
||||
summary_fields = ActivityStreamSerializer(instance).get_summary_fields(instance)
|
||||
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
|
||||
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
|
||||
actor=actor, operation=instance.operation,
|
||||
object1=instance.object1, object2=instance.object2, summary_fields=summary_fields))
|
||||
|
||||
|
||||
def activity_stream_create(sender, instance, created, **kwargs):
|
||||
if created and activity_stream_enabled:
|
||||
# TODO: remove deprecated_group conditional in 3.3
|
||||
# Skip recording any inventory source directly associated with a group.
|
||||
if isinstance(instance, InventorySource) and instance.deprecated_group:
|
||||
return
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
@@ -392,6 +412,9 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_update(sender, instance, **kwargs):
|
||||
@@ -423,15 +446,14 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_delete(sender, instance, **kwargs):
|
||||
if not activity_stream_enabled:
|
||||
return
|
||||
# TODO: remove deprecated_group conditional in 3.3
|
||||
# Skip recording any inventory source directly associated with a group.
|
||||
if isinstance(instance, InventorySource) and instance.deprecated_group:
|
||||
return
|
||||
# Inventory delete happens in the task system rather than request-response-cycle.
|
||||
# If we trigger this handler there we may fall into db-integrity-related race conditions.
|
||||
# So we add flag verification to prevent normal signal handling. This funciton will be
|
||||
@@ -460,6 +482,9 @@ def activity_stream_delete(sender, instance, **kwargs):
|
||||
object1=object1,
|
||||
actor=get_current_user_or_none())
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_associate(sender, instance, **kwargs):
|
||||
@@ -533,6 +558,9 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
activity_entry.role.add(role)
|
||||
activity_entry.object_relationship_type = obj_rel
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
@receiver(current_user_getter)
|
||||
@@ -585,16 +613,6 @@ def deny_orphaned_approvals(sender, instance, **kwargs):
|
||||
@receiver(post_save, sender=Session)
|
||||
def save_user_session_membership(sender, **kwargs):
|
||||
session = kwargs.get('instance', None)
|
||||
if pkg_resources.get_distribution('channels').version >= '2':
|
||||
# If you get into this code block, it means we upgraded channels, but
|
||||
# didn't make the settings.SESSIONS_PER_USER feature work
|
||||
raise RuntimeError(
|
||||
'save_user_session_membership must be updated for channels>=2: '
|
||||
'http://channels.readthedocs.io/en/latest/one-to-two.html#requirements'
|
||||
)
|
||||
if 'runworker' in sys.argv:
|
||||
# don't track user session membership for websocket per-channel sessions
|
||||
return
|
||||
if not session:
|
||||
return
|
||||
user_id = session.get_decoded().get(SESSION_KEY, None)
|
||||
|
||||
@@ -26,7 +26,7 @@ import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
@@ -59,7 +59,7 @@ from awx.main.models import (
|
||||
Inventory, InventorySource, SmartInventoryMembership,
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob,
|
||||
JobEvent, ProjectUpdateEvent, InventoryUpdateEvent, AdHocCommandEvent, SystemJobEvent,
|
||||
build_safe_env
|
||||
build_safe_env, enforce_bigint_pk_migration
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError
|
||||
@@ -73,6 +73,7 @@ from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
get_awx_version)
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.common import _get_ansible_version, get_custom_venv_choices
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -135,6 +136,15 @@ def dispatch_startup():
|
||||
if Instance.objects.me().is_controller():
|
||||
awx_isolated_heartbeat()
|
||||
|
||||
# at process startup, detect the need to migrate old event records from int
|
||||
# to bigint; at *some point* in the future, once certain versions of AWX
|
||||
# and Tower fall out of use/support, we can probably just _assume_ that
|
||||
# everybody has moved to bigint, and remove this code entirely
|
||||
enforce_bigint_pk_migration()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@@ -151,7 +161,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
started_waiting = time.time()
|
||||
with advisory_lock('cluster_policy_lock', wait=True):
|
||||
@@ -264,7 +274,7 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
@@ -274,8 +284,14 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([
|
||||
setting.startswith('LOG_AGGREGATOR')
|
||||
for setting in setting_keys
|
||||
]):
|
||||
connection.on_commit(reconfigure_rsyslog)
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
# TODO: possibly implement some retry logic
|
||||
lock_file = project_path + '.lock'
|
||||
@@ -293,7 +309,7 @@ def delete_project_files(project_path):
|
||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all', exchange_type='fanout')
|
||||
@task(queue='tower_broadcast_all')
|
||||
def profile_sql(threshold=1, minutes=1):
|
||||
if threshold == 0:
|
||||
cache.delete('awx-profile-sql-threshold')
|
||||
@@ -307,7 +323,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -336,7 +352,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@@ -489,10 +505,10 @@ def awx_isolated_heartbeat():
|
||||
# Slow pass looping over isolated IGs and their isolated instances
|
||||
if len(isolated_instance_qs) > 0:
|
||||
logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs])))
|
||||
isolated_manager.IsolatedManager().health_check(isolated_instance_qs)
|
||||
isolated_manager.IsolatedManager(CallbackQueueDispatcher.dispatch).health_check(isolated_instance_qs)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@@ -549,7 +565,7 @@ def awx_periodic_scheduler():
|
||||
state.save()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -562,7 +578,7 @@ def handle_work_success(task_actual):
|
||||
schedule_task_manager()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_id, *args, **kwargs):
|
||||
subtasks = kwargs.get('subtasks', None)
|
||||
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
|
||||
@@ -584,7 +600,7 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
first_instance = instance
|
||||
first_instance_type = each_task['type']
|
||||
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag:
|
||||
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
|
||||
instance.status = 'failed'
|
||||
instance.failed = True
|
||||
if not instance.job_explanation:
|
||||
@@ -602,7 +618,26 @@ def handle_work_error(task_id, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_success_and_failure_notifications(job_id):
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
return
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_id)
|
||||
|
||||
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
'''
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -644,7 +679,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -660,7 +695,49 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def migrate_legacy_event_data(tblname):
|
||||
if 'event' not in tblname:
|
||||
return
|
||||
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
return
|
||||
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
|
||||
|
||||
def _remaining():
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
|
||||
return cursor.fetchone()[0]
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
return None
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
total_rows = _remaining()
|
||||
while total_rows:
|
||||
with transaction.atomic():
|
||||
cursor.execute(
|
||||
f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;'
|
||||
)
|
||||
last_insert_pk = cursor.fetchone()
|
||||
if last_insert_pk is None:
|
||||
# this means that the SELECT from the old table was
|
||||
# empty, and there was nothing to insert (so we're done)
|
||||
break
|
||||
last_insert_pk = last_insert_pk[0]
|
||||
cursor.execute(
|
||||
f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});'
|
||||
)
|
||||
logger.warn(
|
||||
f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)'
|
||||
)
|
||||
|
||||
if _remaining() is None:
|
||||
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
|
||||
logger.warn(f'{tblname} primary key migration to bigint has finished')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -1162,7 +1239,6 @@ class BaseTask(object):
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
should_write_event = False
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
self.dispatcher.dispatch(event_data)
|
||||
self.event_ct += 1
|
||||
@@ -1174,13 +1250,17 @@ class BaseTask(object):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
return should_write_event
|
||||
return False
|
||||
|
||||
def cancel_callback(self):
|
||||
'''
|
||||
Ansible runner callback to tell the job when/if it is canceled
|
||||
'''
|
||||
self.instance = self.update_model(self.instance.pk)
|
||||
unified_job_id = self.instance.pk
|
||||
self.instance = self.update_model(unified_job_id)
|
||||
if not self.instance:
|
||||
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
|
||||
return True
|
||||
if self.instance.cancel_flag or self.instance.status == 'canceled':
|
||||
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
|
||||
if cancel_wait > 5:
|
||||
@@ -1370,6 +1450,7 @@ class BaseTask(object):
|
||||
if not params[v]:
|
||||
del params[v]
|
||||
|
||||
self.dispatcher = CallbackQueueDispatcher()
|
||||
if self.instance.is_isolated() or containerized:
|
||||
module_args = None
|
||||
if 'module_args' in params:
|
||||
@@ -1384,6 +1465,7 @@ class BaseTask(object):
|
||||
|
||||
ansible_runner.utils.dump_artifacts(params)
|
||||
isolated_manager_instance = isolated_manager.IsolatedManager(
|
||||
self.event_handler,
|
||||
canceled_callback=lambda: self.update_model(self.instance.pk).cancel_flag,
|
||||
check_callback=self.check_handler,
|
||||
pod_manager=pod_manager
|
||||
@@ -1393,11 +1475,9 @@ class BaseTask(object):
|
||||
params.get('playbook'),
|
||||
params.get('module'),
|
||||
module_args,
|
||||
event_data_key=self.event_data_key,
|
||||
ident=str(self.instance.pk))
|
||||
self.event_ct = len(isolated_manager_instance.handled_events)
|
||||
self.finished_callback(None)
|
||||
else:
|
||||
self.dispatcher = CallbackQueueDispatcher()
|
||||
res = ansible_runner.interface.run(**params)
|
||||
status = res.status
|
||||
rc = res.rc
|
||||
@@ -1475,7 +1555,7 @@ class BaseTask(object):
|
||||
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunJob(BaseTask):
|
||||
'''
|
||||
Run a job using ansible-playbook.
|
||||
@@ -1692,7 +1772,7 @@ class RunJob(BaseTask):
|
||||
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
|
||||
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
|
||||
args.append('--forks=%d' % settings.MAX_FORKS)
|
||||
else:
|
||||
else:
|
||||
args.append('--forks=%d' % job.forks)
|
||||
if job.force_handlers:
|
||||
args.append('--force-handlers')
|
||||
@@ -1805,7 +1885,7 @@ class RunJob(BaseTask):
|
||||
current_revision = git_repo.head.commit.hexsha
|
||||
if desired_revision == current_revision:
|
||||
job_revision = desired_revision
|
||||
logger.info('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
|
||||
else:
|
||||
sync_needs = all_sync_needs
|
||||
except (ValueError, BadGitName):
|
||||
@@ -1904,10 +1984,11 @@ class RunJob(BaseTask):
|
||||
except Inventory.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
if inventory is not None:
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
|
||||
model = ProjectUpdate
|
||||
@@ -2268,7 +2349,7 @@ class RunProjectUpdate(BaseTask):
|
||||
# force option is necessary because remote refs are not counted, although no information is lost
|
||||
git_repo.delete_head(tmp_branch_name, force=True)
|
||||
else:
|
||||
copy_tree(project_path, destination_folder)
|
||||
copy_tree(project_path, destination_folder, preserve_symlinks=1)
|
||||
|
||||
def post_run_hook(self, instance, status):
|
||||
# To avoid hangs, very important to release lock even if errors happen here
|
||||
@@ -2317,7 +2398,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
|
||||
model = InventoryUpdate
|
||||
@@ -2585,7 +2666,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
)
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
'''
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -2775,7 +2856,7 @@ class RunAdHocCommand(BaseTask):
|
||||
isolated_manager_instance.cleanup()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
|
||||
model = SystemJob
|
||||
@@ -2849,11 +2930,16 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task()
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(
|
||||
model_module, model_name, obj_pk, new_obj_pk,
|
||||
user_pk, sub_obj_list, permission_check_func=None
|
||||
user_pk, uuid, permission_check_func=None
|
||||
):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
78
awx/main/tests/functional/analytics/test_collectors.py
Normal file
78
awx/main/tests/functional/analytics/test_collectors.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import pytest
|
||||
import tempfile
|
||||
import os
|
||||
import shutil
|
||||
import csv
|
||||
|
||||
from django.utils.timezone import now
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
|
||||
from awx.main.analytics import collectors
|
||||
|
||||
from awx.main.models import (
|
||||
ProjectUpdate,
|
||||
InventorySource,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sqlite_copy_expert(request):
|
||||
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
|
||||
# behavior to test that it writes a file that contains stdout from events
|
||||
path = tempfile.mkdtemp(prefix='copied_tables')
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# Would be cool if we instead properly disected the SQL query and verified
|
||||
# it that way. But instead, we just take the nieve approach here.
|
||||
assert sql.startswith('COPY (')
|
||||
assert sql.endswith(') TO STDOUT WITH CSV HEADER')
|
||||
|
||||
sql = sql.replace('COPY (', '')
|
||||
sql = sql.replace(') TO STDOUT WITH CSV HEADER', '')
|
||||
|
||||
# Remove JSON style queries
|
||||
# TODO: could replace JSON style queries with sqlite kind of equivalents
|
||||
sql_new = []
|
||||
for line in sql.split('\n'):
|
||||
if line.find('main_jobevent.event_data::') == -1:
|
||||
sql_new.append(line)
|
||||
elif not line.endswith(','):
|
||||
sql_new[-1] = sql_new[-1].rstrip(',')
|
||||
sql = '\n'.join(sql_new)
|
||||
|
||||
self.execute(sql)
|
||||
results = self.fetchall()
|
||||
headers = [i[0] for i in self.description]
|
||||
|
||||
csv_handle = csv.writer(fd, delimiter=',', quoting=csv.QUOTE_ALL, escapechar='\\', lineterminator='\n')
|
||||
csv_handle.writerow(headers)
|
||||
csv_handle.writerows(results)
|
||||
|
||||
|
||||
setattr(SQLiteCursorWrapper, 'copy_expert', write_stdout)
|
||||
request.addfinalizer(lambda: shutil.rmtree(path))
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, 'copy_expert'))
|
||||
return path
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_copy_tables_unified_job_query(sqlite_copy_expert, project, inventory, job_template):
|
||||
'''
|
||||
Ensure that various unified job types are in the output of the query.
|
||||
'''
|
||||
|
||||
time_start = now()
|
||||
inv_src = InventorySource.objects.create(name="inventory_update1", inventory=inventory, source='gce')
|
||||
|
||||
project_update_name = ProjectUpdate.objects.create(project=project, name="project_update1").name
|
||||
inventory_update_name = inv_src.create_unified_job().name
|
||||
job_name = job_template.create_unified_job().name
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir)
|
||||
with open(os.path.join(tmpdir, 'unified_jobs_table.csv')) as f:
|
||||
lines = ''.join([l for l in f])
|
||||
|
||||
assert project_update_name in lines
|
||||
assert inventory_update_name in lines
|
||||
assert job_name in lines
|
||||
@@ -13,10 +13,10 @@ def test_empty():
|
||||
"active_host_count": 0,
|
||||
"credential": 0,
|
||||
"custom_inventory_script": 0,
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"host": 0,
|
||||
"inventory": 0,
|
||||
"inventories": {'normal': 0, 'smart': 0},
|
||||
"inventories": {"normal": 0, "smart": 0},
|
||||
"job_template": 0,
|
||||
"notification_template": 0,
|
||||
"organization": 0,
|
||||
@@ -27,28 +27,97 @@ def test_empty():
|
||||
"user": 0,
|
||||
"workflow_job_template": 0,
|
||||
"unified_job": 0,
|
||||
"pending_jobs": 0
|
||||
"pending_jobs": 0,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_database_counts(organization_factory, job_template_factory,
|
||||
workflow_job_template_factory):
|
||||
objs = organization_factory('org', superusers=['admin'])
|
||||
jt = job_template_factory('test', organization=objs.organization,
|
||||
inventory='test_inv', project='test_project',
|
||||
credential='test_cred')
|
||||
workflow_job_template_factory('test')
|
||||
def test_database_counts(
|
||||
organization_factory, job_template_factory, workflow_job_template_factory
|
||||
):
|
||||
objs = organization_factory("org", superusers=["admin"])
|
||||
jt = job_template_factory(
|
||||
"test",
|
||||
organization=objs.organization,
|
||||
inventory="test_inv",
|
||||
project="test_project",
|
||||
credential="test_cred",
|
||||
)
|
||||
workflow_job_template_factory("test")
|
||||
models.Team(organization=objs.organization).save()
|
||||
models.Host(inventory=jt.inventory).save()
|
||||
models.Schedule(
|
||||
rrule='DTSTART;TZID=America/New_York:20300504T150000',
|
||||
unified_job_template=jt.job_template
|
||||
rrule="DTSTART;TZID=America/New_York:20300504T150000",
|
||||
unified_job_template=jt.job_template,
|
||||
).save()
|
||||
models.CustomInventoryScript(organization=objs.organization).save()
|
||||
|
||||
counts = collectors.counts(None)
|
||||
for key in ('organization', 'team', 'user', 'inventory', 'credential',
|
||||
'project', 'job_template', 'workflow_job_template', 'host',
|
||||
'schedule', 'custom_inventory_script'):
|
||||
for key in (
|
||||
"organization",
|
||||
"team",
|
||||
"user",
|
||||
"inventory",
|
||||
"credential",
|
||||
"project",
|
||||
"job_template",
|
||||
"workflow_job_template",
|
||||
"host",
|
||||
"schedule",
|
||||
"custom_inventory_script",
|
||||
):
|
||||
assert counts[key] == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_counts(organization_factory, inventory_factory):
|
||||
(inv1, inv2, inv3) = [inventory_factory(f"inv-{i}") for i in range(3)]
|
||||
|
||||
s1 = inv1.inventory_sources.create(name="src1", source="ec2")
|
||||
s2 = inv1.inventory_sources.create(name="src2", source="file")
|
||||
s3 = inv1.inventory_sources.create(name="src3", source="gce")
|
||||
|
||||
s1.hosts.create(name="host1", inventory=inv1)
|
||||
s1.hosts.create(name="host2", inventory=inv1)
|
||||
s1.hosts.create(name="host3", inventory=inv1)
|
||||
|
||||
s2.hosts.create(name="host4", inventory=inv1)
|
||||
s2.hosts.create(name="host5", inventory=inv1)
|
||||
|
||||
s3.hosts.create(name="host6", inventory=inv1)
|
||||
|
||||
s1 = inv2.inventory_sources.create(name="src1", source="ec2")
|
||||
|
||||
s1.hosts.create(name="host1", inventory=inv2)
|
||||
s1.hosts.create(name="host2", inventory=inv2)
|
||||
s1.hosts.create(name="host3", inventory=inv2)
|
||||
|
||||
inv_counts = collectors.inventory_counts(None)
|
||||
|
||||
assert {
|
||||
inv1.id: {
|
||||
"name": "inv-0",
|
||||
"kind": "",
|
||||
"hosts": 6,
|
||||
"sources": 3,
|
||||
"source_list": [
|
||||
{"name": "src1", "source": "ec2", "num_hosts": 3},
|
||||
{"name": "src2", "source": "file", "num_hosts": 2},
|
||||
{"name": "src3", "source": "gce", "num_hosts": 1},
|
||||
],
|
||||
},
|
||||
inv2.id: {
|
||||
"name": "inv-1",
|
||||
"kind": "",
|
||||
"hosts": 3,
|
||||
"sources": 1,
|
||||
"source_list": [{"name": "src1", "source": "ec2", "num_hosts": 3}],
|
||||
},
|
||||
inv3.id: {
|
||||
"name": "inv-2",
|
||||
"kind": "",
|
||||
"hosts": 0,
|
||||
"sources": 0,
|
||||
"source_list": [],
|
||||
},
|
||||
} == inv_counts
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.middleware import ActivityStreamMiddleware
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.access import ActivityStreamAccess
|
||||
from awx.conf.models import Setting
|
||||
@@ -61,28 +60,6 @@ def test_ctint_activity_stream(monkeypatch, get, user, settings):
|
||||
assert response.data['summary_fields']['setting'][0]['name'] == 'FOO'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_middleware_actor_added(monkeypatch, post, get, user, settings):
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
u = user('admin-poster', True)
|
||||
|
||||
url = reverse('api:organization_list')
|
||||
response = post(url,
|
||||
dict(name='test-org', description='test-desc'),
|
||||
u,
|
||||
middleware=ActivityStreamMiddleware())
|
||||
assert response.status_code == 201
|
||||
|
||||
org_id = response.data['id']
|
||||
activity_stream = ActivityStream.objects.filter(organization__pk=org_id).first()
|
||||
|
||||
url = reverse('api:activity_stream_detail', kwargs={'pk': activity_stream.pk})
|
||||
response = get(url, u)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.data['summary_fields']['actor']['username'] == 'admin-poster'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin, settings):
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
|
||||
@@ -972,7 +972,7 @@ def test_field_removal(put, organization, admin, credentialtype_ssh):
|
||||
['insights_inventories', Inventory()],
|
||||
['unifiedjobs', Job()],
|
||||
['unifiedjobtemplates', JobTemplate()],
|
||||
['unifiedjobtemplates', InventorySource()],
|
||||
['unifiedjobtemplates', InventorySource(source='ec2')],
|
||||
['projects', Project()],
|
||||
['workflowjobnodes', WorkflowJobNode()],
|
||||
])
|
||||
|
||||
@@ -599,9 +599,9 @@ class TestControlledBySCM:
|
||||
delete(inv_src.get_absolute_url(), admin_user, expect=204)
|
||||
assert scm_inventory.inventory_sources.count() == 0
|
||||
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, admin_user):
|
||||
def test_adding_inv_src_ok(self, post, scm_inventory, project, admin_user):
|
||||
post(reverse('api:inventory_inventory_sources_list', kwargs={'pk': scm_inventory.id}),
|
||||
{'name': 'new inv src', 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
{'name': 'new inv src', 'source_project': project.pk, 'update_on_project_update': False, 'source': 'scm', 'overwrite_vars': True},
|
||||
admin_user, expect=201)
|
||||
|
||||
def test_adding_inv_src_prohibited(self, post, scm_inventory, project, admin_user):
|
||||
|
||||
@@ -39,6 +39,26 @@ def test_extra_credentials(get, organization_factory, job_template_factory, cred
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_permission_denied_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
|
||||
jt.credentials.add(machine_credential)
|
||||
jt_user = User.objects.create(username='jobtemplateuser')
|
||||
jt.execute_role.members.add(jt_user)
|
||||
with impersonate(jt_user):
|
||||
job = jt.create_unified_job()
|
||||
|
||||
# User capability is shown for this
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_prompts_not_accepted_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
|
||||
jt.credentials.add(machine_credential)
|
||||
jt_user = User.objects.create(username='jobtemplateuser')
|
||||
@@ -53,8 +73,6 @@ def test_job_relaunch_permission_denied_response(
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields' in r.data['detail']
|
||||
assert 'do not have permission' in r.data['detail']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -209,7 +227,8 @@ def test_block_related_unprocessed_events(mocker, organization, project, delete,
|
||||
status='finished',
|
||||
finished=time_of_finish,
|
||||
job_template=job_template,
|
||||
project=project
|
||||
project=project,
|
||||
organization=project.organization
|
||||
)
|
||||
view = RelatedJobsPreventDeleteMixin()
|
||||
time_of_request = time_of_finish + relativedelta(seconds=2)
|
||||
|
||||
@@ -6,7 +6,7 @@ import pytest
|
||||
# AWX
|
||||
from awx.api.serializers import JobTemplateSerializer
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
@@ -30,14 +30,19 @@ def test_create(post, project, machine_credential, inventory, alice, grant_proje
|
||||
project.use_role.members.add(alice)
|
||||
if grant_inventory:
|
||||
inventory.use_role.members.add(alice)
|
||||
project.organization.job_template_admin_role.members.add(alice)
|
||||
|
||||
r = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
}, alice)
|
||||
assert r.status_code == expect
|
||||
post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml'
|
||||
},
|
||||
user=alice,
|
||||
expect=expect
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -123,14 +128,18 @@ def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, invento
|
||||
project.use_role.members.add(alice)
|
||||
inventory.use_role.members.add(alice)
|
||||
settings.MAX_FORKS = 10
|
||||
response = post(reverse('api:job_template_list'), {
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
}, alice)
|
||||
assert response.status_code == 400
|
||||
response = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'Some name',
|
||||
'project': project.id,
|
||||
'inventory': inventory.id,
|
||||
'playbook': 'helloworld.yml',
|
||||
'forks': 11,
|
||||
},
|
||||
user=alice,
|
||||
expect=400
|
||||
)
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@@ -510,6 +519,72 @@ def test_job_template_unset_custom_virtualenv(get, patch, organization_factory,
|
||||
assert resp.data['custom_virtualenv'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_organization_follows_project(post, patch, admin_user):
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
project_common = dict(scm_type='git', playbook_files=['helloworld.yml'])
|
||||
project1 = Project.objects.create(name='proj1', organization=org1, **project_common)
|
||||
project2 = Project.objects.create(name='proj2', organization=org2, **project_common)
|
||||
r = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
"name": "fooo",
|
||||
"ask_inventory_on_launch": True,
|
||||
"project": project1.pk,
|
||||
"playbook": "helloworld.yml"
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
data = r.data
|
||||
assert data['organization'] == project1.organization_id
|
||||
data['project'] = project2.id
|
||||
jt = JobTemplate.objects.get(pk=data['id'])
|
||||
r = patch(
|
||||
url=jt.get_absolute_url(),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['organization'] == project2.organization_id
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_organization_field_is_read_only(patch, post, project, admin_user):
|
||||
org = project.organization
|
||||
jt = JobTemplate.objects.create(
|
||||
name='foo_jt',
|
||||
ask_inventory_on_launch=True,
|
||||
project=project, playbook='helloworld.yml'
|
||||
)
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
r = patch(
|
||||
url=jt.get_absolute_url(),
|
||||
data={'organization': org2.id},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
assert r.data['organization'] == org.id
|
||||
assert JobTemplate.objects.get(pk=jt.pk).organization == org
|
||||
|
||||
# similar test, but on creation
|
||||
r = post(
|
||||
url=reverse('api:job_template_list'),
|
||||
data={
|
||||
'name': 'foobar',
|
||||
'project': project.id,
|
||||
'organization': org2.id,
|
||||
'ask_inventory_on_launch': True,
|
||||
'playbook': 'helloworld.yml'
|
||||
},
|
||||
user=admin_user,
|
||||
expect=201
|
||||
)
|
||||
assert r.data['organization'] == org.id
|
||||
assert JobTemplate.objects.get(pk=r.data['id']).organization == org
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_callback_disallowed_null_inventory(project):
|
||||
jt = JobTemplate.objects.create(
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -326,6 +328,38 @@ def test_refresh_accesstoken(oauth_application, post, get, delete, admin):
|
||||
assert original_refresh_token.revoked # is not None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_refresh_token_expiration_is_respected(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
refresh_url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
short_lived = {
|
||||
'ACCESS_TOKEN_EXPIRE_SECONDS': 1,
|
||||
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 1,
|
||||
'REFRESH_TOKEN_EXPIRE_SECONDS': 1
|
||||
}
|
||||
time.sleep(1)
|
||||
with override_settings(OAUTH2_PROVIDER=short_lived):
|
||||
response = post(
|
||||
refresh_url,
|
||||
data='grant_type=refresh_token&refresh_token=' + refresh_token.token,
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + smart_str(base64.b64encode(smart_bytes(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
]))))
|
||||
)
|
||||
assert response.status_code == 403
|
||||
assert b'The refresh token has expired.' in response.content
|
||||
assert RefreshToken.objects.filter(token=refresh_token).exists()
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_revoke_access_then_refreshtoken(oauth_application, post, get, delete, admin):
|
||||
|
||||
@@ -2,6 +2,8 @@ import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from awx.main.models import Project
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def organization_resource_creator(organization, user):
|
||||
@@ -19,21 +21,26 @@ def organization_resource_creator(organization, user):
|
||||
for i in range(inventories):
|
||||
inventory = organization.inventories.create(name="associated-inv %s" % i)
|
||||
for i in range(projects):
|
||||
organization.projects.create(name="test-proj %s" % i,
|
||||
description="test-proj-desc")
|
||||
Project.objects.create(
|
||||
name="test-proj %s" % i,
|
||||
description="test-proj-desc",
|
||||
organization=organization
|
||||
)
|
||||
# Mix up the inventories and projects used by the job templates
|
||||
i_proj = 0
|
||||
i_inv = 0
|
||||
for i in range(job_templates):
|
||||
project = organization.projects.all()[i_proj]
|
||||
project = Project.objects.filter(organization=organization)[i_proj]
|
||||
# project = organization.projects.all()[i_proj]
|
||||
inventory = organization.inventories.all()[i_inv]
|
||||
project.jobtemplates.create(name="test-jt %s" % i,
|
||||
description="test-job-template-desc",
|
||||
inventory=inventory,
|
||||
playbook="test_playbook.yml")
|
||||
playbook="test_playbook.yml",
|
||||
organization=organization)
|
||||
i_proj += 1
|
||||
i_inv += 1
|
||||
if i_proj >= organization.projects.count():
|
||||
if i_proj >= Project.objects.filter(organization=organization).count():
|
||||
i_proj = 0
|
||||
if i_inv >= organization.inventories.count():
|
||||
i_inv = 0
|
||||
@@ -179,12 +186,14 @@ def test_scan_JT_counted(resourced_organization, user, get):
|
||||
@pytest.mark.django_db
|
||||
def test_JT_not_double_counted(resourced_organization, user, get):
|
||||
admin_user = user('admin', True)
|
||||
proj = Project.objects.filter(organization=resourced_organization).all()[0]
|
||||
# Add a run job template to the org
|
||||
resourced_organization.projects.all()[0].jobtemplates.create(
|
||||
proj.jobtemplates.create(
|
||||
job_type='run',
|
||||
inventory=resourced_organization.inventories.all()[0],
|
||||
project=resourced_organization.projects.all()[0],
|
||||
name='double-linked-job-template')
|
||||
project=proj,
|
||||
name='double-linked-job-template',
|
||||
organization=resourced_organization)
|
||||
counts_dict = COUNTS_PRIMES
|
||||
counts_dict['job_templates'] += 1
|
||||
|
||||
@@ -197,38 +206,3 @@ def test_JT_not_double_counted(resourced_organization, user, get):
|
||||
detail_response = get(reverse('api:organization_detail', kwargs={'pk': resourced_organization.pk}), admin_user)
|
||||
assert detail_response.status_code == 200
|
||||
assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_JT_associated_with_project(organizations, project, user, get):
|
||||
# Check that adding a project to an organization gets the project's JT
|
||||
# included in the organization's JT count
|
||||
external_admin = user('admin', True)
|
||||
two_orgs = organizations(2)
|
||||
organization = two_orgs[0]
|
||||
other_org = two_orgs[1]
|
||||
|
||||
unrelated_inv = other_org.inventories.create(name='not-in-organization')
|
||||
organization.projects.add(project)
|
||||
project.jobtemplates.create(name="test-jt",
|
||||
description="test-job-template-desc",
|
||||
inventory=unrelated_inv,
|
||||
playbook="test_playbook.yml")
|
||||
|
||||
response = get(reverse('api:organization_list'), external_admin)
|
||||
assert response.status_code == 200
|
||||
|
||||
org_id = organization.id
|
||||
counts = {}
|
||||
for org_json in response.data['results']:
|
||||
working_id = org_json['id']
|
||||
counts[working_id] = org_json['summary_fields']['related_field_counts']
|
||||
|
||||
assert counts[org_id] == {
|
||||
'users': 0,
|
||||
'admins': 0,
|
||||
'job_templates': 1,
|
||||
'projects': 1,
|
||||
'inventories': 0,
|
||||
'teams': 0
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import datetime
|
||||
import pytest
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import JobTemplate, Schedule
|
||||
@@ -140,7 +142,6 @@ def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, su
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
("DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1", "more than 1000 events are not allowed"), # noqa
|
||||
])
|
||||
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
job_template = JobTemplate.objects.create(
|
||||
@@ -342,6 +343,40 @@ def test_months_with_31_days(post, admin_user):
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.timeout(3)
|
||||
@pytest.mark.parametrize('freq, delta, total_seconds', (
|
||||
('MINUTELY', 1, 60),
|
||||
('MINUTELY', 15, 15 * 60),
|
||||
('HOURLY', 1, 3600),
|
||||
('HOURLY', 4, 3600 * 4),
|
||||
))
|
||||
def test_really_old_dtstart(post, admin_user, freq, delta, total_seconds):
|
||||
url = reverse('api:schedule_rrule')
|
||||
# every <interval>, at the :30 second mark
|
||||
rrule = f'DTSTART;TZID=America/New_York:20051231T000030 RRULE:FREQ={freq};INTERVAL={delta}'
|
||||
start = now()
|
||||
next_ten = post(url, {'rrule': rrule}, admin_user, expect=200).data['utc']
|
||||
|
||||
assert len(next_ten) == 10
|
||||
|
||||
# the first date is *in the future*
|
||||
assert next_ten[0] >= start
|
||||
|
||||
# ...but *no more than* <interval> into the future
|
||||
assert now() + datetime.timedelta(**{
|
||||
'minutes' if freq == 'MINUTELY' else 'hours': delta
|
||||
})
|
||||
|
||||
# every date in the list is <interval> greater than the last
|
||||
for i, x in enumerate(next_ten):
|
||||
if i == 0:
|
||||
continue
|
||||
assert x.second == 30
|
||||
delta = (x - next_ten[i - 1])
|
||||
assert delta.total_seconds() == total_seconds
|
||||
|
||||
|
||||
def test_dst_rollback_duplicates(post, admin_user):
|
||||
# From Nov 2 -> Nov 3, 2030, daylight savings ends and we "roll back" an hour.
|
||||
# Make sure we don't "double count" duplicate times in the "rolled back"
|
||||
@@ -365,3 +400,77 @@ def test_zoneinfo(get, admin_user):
|
||||
url = reverse('api:schedule_zoneinfo')
|
||||
r = get(url, admin_user, expect=200)
|
||||
assert {'name': 'America/New_York'} in r.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_jt_schedule(options, post, project, inventory, alice):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='test-jt',
|
||||
project=project,
|
||||
playbook='helloworld.yml',
|
||||
inventory=inventory
|
||||
)
|
||||
jt.save()
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the JT because we don't have execute rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': jt.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to execute the JT
|
||||
jt.execute_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_project_schedule(options, post, project, alice):
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the project because we don't have update rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': project.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# use role does *not* grant the ability to schedule
|
||||
project.use_role.members.add(alice)
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to update project
|
||||
project.update_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_normal_user_can_create_inventory_update_schedule(options, post, inventory_source, alice):
|
||||
url = reverse('api:schedule_list')
|
||||
|
||||
# can't create a schedule on the project because we don't have update rights
|
||||
params = {
|
||||
'name': 'My Example Schedule',
|
||||
'rrule': RRULE_EXAMPLE,
|
||||
'unified_job_template': inventory_source.id,
|
||||
}
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# use role does *not* grant the ability to schedule
|
||||
inventory_source.inventory.use_role.members.add(alice)
|
||||
assert 'POST' not in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=403)
|
||||
|
||||
# now we can, because we're allowed to update project
|
||||
inventory_source.inventory.update_role.members.add(alice)
|
||||
assert 'POST' in options(url, user=alice).data['actions'].keys()
|
||||
post(url, params, alice, expect=201)
|
||||
|
||||
@@ -5,18 +5,13 @@
|
||||
# Python
|
||||
import pytest
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
from kombu.utils.url import parse_url
|
||||
|
||||
# Mock
|
||||
from unittest import mock
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.conf.models import Setting
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
|
||||
TEST_GIF_LOGO = 'data:image/gif;base64,R0lGODlhIQAjAPIAAP//////AP8AAMzMAJmZADNmAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQJCgAHACwAAAAAIQAjAAADo3i63P4wykmrvTjrzZsxXfR94WMQBFh6RECuixHMLyzPQ13ewZCvow9OpzEAjIBj79cJJmU+FceIVEZ3QRozxBttmyOBwPBtisdX4Bha3oxmS+llFIPHQXQKkiSEXz9PeklHBzx3hYNyEHt4fmmAhHp8Nz45KgV5FgWFOFEGmwWbGqEfniChohmoQZ+oqRiZDZhEgk81I4mwg4EKVbxzrDHBEAkAIfkECQoABwAsAAAAACEAIwAAA6V4utz+MMpJq724GpP15p1kEAQYQmOwnWjgrmxjuMEAx8rsDjZ+fJvdLWQAFAHGWo8FRM54JqIRmYTigDrDMqZTbbbMj0CgjTLHZKvPQH6CTx+a2vKR0XbbOsoZ7SphG057gjl+c0dGgzeGNiaBiSgbBQUHBV08NpOVlkMSk0FKjZuURHiiOJxQnSGfQJuoEKREejK0dFRGjoiQt7iOuLx0rgxYEQkAIfkECQoABwAsAAAAACEAIwAAA7h4utxnxslJDSGR6nrz/owxYB64QUEwlGaVqlB7vrAJscsd3Lhy+wBArGEICo3DUFH4QDqK0GMy51xOgcGlEAfJ+iAFie62chR+jYKaSAuQGOqwJp7jGQRDuol+F/jxZWsyCmoQfwYwgoM5Oyg1i2w0A2WQIW2TPYOIkleQmy+UlYygoaIPnJmapKmqKiusMmSdpjxypnALtrcHioq3ury7hGm3dnVosVpMWFmwREZbddDOSsjVswcJACH5BAkKAAcALAAAAAAhACMAAAOxeLrc/jDKSZUxNS9DCNYV54HURQwfGRlDEFwqdLVuGjOsW9/Odb0wnsUAKBKNwsMFQGwyNUHckVl8bqI4o43lA26PNkv1S9DtNuOeVirw+aTI3qWAQwnud1vhLSnQLS0GeFF+GoVKNF0fh4Z+LDQ6Bn5/MTNmL0mAl2E3j2aclTmRmYCQoKEDiaRDKFhJez6UmbKyQowHtzy1uEl8DLCnEktrQ2PBD1NxSlXKIW5hz6cJACH5BAkKAAcALAAAAAAhACMAAAOkeLrc/jDKSau9OOvNlTFd9H3hYxAEWDJfkK5LGwTq+g0zDR/GgM+10A04Cm56OANgqTRmkDTmSOiLMgFOTM9AnFJHuexzYBAIijZf2SweJ8ttbbXLmd5+wBiJosSCoGF/fXEeS1g8gHl9hxODKkh4gkwVIwUekESIhA4FlgV3PyCWG52WI2oGnR2lnUWpqhqVEF4Xi7QjhpsshpOFvLosrnpoEAkAIfkECQoABwAsAAAAACEAIwAAA6l4utz+MMpJq71YGpPr3t1kEAQXQltQnk8aBCa7bMMLy4wx1G8s072PL6SrGQDI4zBThCU/v50zCVhidIYgNPqxWZkDg0AgxB2K4vEXbBSvr1JtZ3uOext0x7FqovF6OXtfe1UzdjAxhINPM013ChtJER8FBQeVRX8GlpggFZWWfjwblTiigGZnfqRmpUKbljKxDrNMeY2eF4R8jUiSur6/Z8GFV2WBtwwJACH5BAkKAAcALAAAAAAhACMAAAO6eLrcZi3KyQwhkGpq8f6ONWQgaAxB8JTfg6YkO50pzD5xhaurhCsGAKCnEw6NucNDCAkyI8ugdAhFKpnJJdMaeiofBejowUseCr9GYa0j1GyMdVgjBxoEuPSZXWKf7gKBeHtzMms0gHgGfDIVLztmjScvNZEyk28qjT40b5aXlHCbDgOhnzedoqOOlKeopaqrCy56sgtotbYKhYW6e7e9tsHBssO6eSTIm1peV0iuFUZDyU7NJnmcuQsJACH5BAkKAAcALAAAAAAhACMAAAOteLrc/jDKSZsxNS9DCNYV54Hh4H0kdAXBgKaOwbYX/Miza1vrVe8KA2AoJL5gwiQgeZz4GMXlcHl8xozQ3kW3KTajL9zsBJ1+sV2fQfALem+XAlRApxu4ioI1UpC76zJ4fRqDBzI+LFyFhH1iiS59fkgziW07jjRAG5QDeECOLk2Tj6KjnZafW6hAej6Smgevr6yysza2tiCuMasUF2Yov2gZUUQbU8YaaqjLpQkAOw==' # NOQA
|
||||
@@ -238,73 +233,95 @@ def test_ui_settings(get, put, patch, delete, admin):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_requires_superuser(get, post, alice):
|
||||
def test_logging_aggregator_connection_test_requires_superuser(post, alice):
|
||||
url = reverse('api:setting_logging_test')
|
||||
post(url, {}, user=alice, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('key', [
|
||||
'LOG_AGGREGATOR_TYPE',
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregator_connection_test_not_enabled(post, admin):
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {}, user=admin, expect=409)
|
||||
assert 'Logging not enabled' in resp.data.get('error')
|
||||
|
||||
|
||||
def _mock_logging_defaults():
|
||||
# Pre-populate settings obj with defaults
|
||||
class MockSettings:
|
||||
pass
|
||||
mock_settings_obj = MockSettings()
|
||||
mock_settings_json = dict()
|
||||
for key in settings_registry.get_registered_settings(category_slug='logging'):
|
||||
value = settings_registry.get_setting_field(key).get_default()
|
||||
setattr(mock_settings_obj, key, value)
|
||||
mock_settings_json[key] = value
|
||||
setattr(mock_settings_obj, 'MAX_EVENT_RES_DATA', 700000)
|
||||
return mock_settings_obj, mock_settings_json
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize('key, value, error', [
|
||||
['LOG_AGGREGATOR_TYPE', 'logstash', 'Cannot enable log aggregator without providing host.'],
|
||||
['LOG_AGGREGATOR_HOST', 'https://logstash', 'Cannot enable log aggregator without providing type.']
|
||||
])
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_bad_request(get, post, admin, key):
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {}, user=admin, expect=400)
|
||||
assert 'This field is required.' in resp.data.get(key, [])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_valid(mocker, get, post, admin):
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
url = reverse('api:setting_logging_test')
|
||||
user_data = {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080,
|
||||
'LOG_AGGREGATOR_USERNAME': 'logger',
|
||||
'LOG_AGGREGATOR_PASSWORD': 'mcstash'
|
||||
}
|
||||
post(url, user_data, user=admin, expect=200)
|
||||
args, kwargs = perform_test.call_args_list[0]
|
||||
create_settings = kwargs['custom_settings']
|
||||
for k, v in user_data.items():
|
||||
assert hasattr(create_settings, k)
|
||||
assert getattr(create_settings, k) == v
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_with_masked_password(mocker, patch, post, admin):
|
||||
def test_logging_aggregator_missing_settings(put, post, admin, key, value, error):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings[key] = value
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
patch(url, user=admin, data={'LOG_AGGREGATOR_PASSWORD': 'password123'}, expect=200)
|
||||
time.sleep(1) # log settings are cached slightly
|
||||
response = put(url, data=mock_settings, user=admin, expect=400)
|
||||
assert error in str(response.data)
|
||||
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
url = reverse('api:setting_logging_test')
|
||||
user_data = {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080,
|
||||
'LOG_AGGREGATOR_USERNAME': 'logger',
|
||||
'LOG_AGGREGATOR_PASSWORD': '$encrypted$'
|
||||
}
|
||||
post(url, user_data, user=admin, expect=200)
|
||||
args, kwargs = perform_test.call_args_list[0]
|
||||
create_settings = kwargs['custom_settings']
|
||||
assert getattr(create_settings, 'LOG_AGGREGATOR_PASSWORD') == 'password123'
|
||||
|
||||
@pytest.mark.parametrize('type, host, port, username, password', [
|
||||
['logstash', 'localhost', 8080, 'logger', 'mcstash'],
|
||||
['loggly', 'http://logs-01.loggly.com/inputs/1fd38090-hash-h4a$h-8d80-t0k3n71/tag/http/', None, None, None],
|
||||
['splunk', 'https://yoursplunk:8088/services/collector/event', None, None, None],
|
||||
['other', '97.221.40.41', 9000, 'logger', 'mcstash'],
|
||||
['sumologic', 'https://endpoint5.collection.us2.sumologic.com/receiver/v1/http/Zagnw_f9XGr_zZgd-_EPM0hb8_rUU7_RU8Q==',
|
||||
None, None, None]
|
||||
])
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregator_valid_settings(put, post, admin, type, host, port, username, password):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
# type = 'splunk'
|
||||
# host = 'https://yoursplunk:8088/services/collector/event'
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings['LOG_AGGREGATOR_TYPE'] = type
|
||||
mock_settings['LOG_AGGREGATOR_HOST'] = host
|
||||
if port:
|
||||
mock_settings['LOG_AGGREGATOR_PORT'] = port
|
||||
if username:
|
||||
mock_settings['LOG_AGGREGATOR_USERNAME'] = username
|
||||
if password:
|
||||
mock_settings['LOG_AGGREGATOR_PASSWORD'] = password
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
response = put(url, data=mock_settings, user=admin, expect=200)
|
||||
assert type in response.data.get('LOG_AGGREGATOR_TYPE')
|
||||
assert host in response.data.get('LOG_AGGREGATOR_HOST')
|
||||
if port:
|
||||
assert port == response.data.get('LOG_AGGREGATOR_PORT')
|
||||
if username:
|
||||
assert username in response.data.get('LOG_AGGREGATOR_USERNAME')
|
||||
if password: # Note: password should be encrypted
|
||||
assert '$encrypted$' in response.data.get('LOG_AGGREGATOR_PASSWORD')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_invalid(mocker, get, post, admin):
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
perform_test.side_effect = LoggingConnectivityException('404: Not Found')
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080
|
||||
}, user=admin, expect=500)
|
||||
assert resp.data == {'error': '404: Not Found'}
|
||||
def test_logging_aggregator_connection_test_valid(put, post, admin):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
type = 'other'
|
||||
host = 'https://localhost'
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings['LOG_AGGREGATOR_TYPE'] = type
|
||||
mock_settings['LOG_AGGREGATOR_HOST'] = host
|
||||
# POST to save these mock settings
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
put(url, data=mock_settings, user=admin, expect=200)
|
||||
# "Test" the logger
|
||||
url = reverse('api:setting_logging_test')
|
||||
post(url, {}, user=admin, expect=202)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -386,15 +403,3 @@ def test_saml_x509cert_validation(patch, get, admin, headers):
|
||||
}
|
||||
})
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_broker_url_with_special_characters():
|
||||
settings.BROKER_URL = 'amqp://guest:a@ns:ibl3#@rabbitmq:5672//'
|
||||
url = parse_url(settings.BROKER_URL)
|
||||
assert url['transport'] == 'amqp'
|
||||
assert url['hostname'] == 'rabbitmq'
|
||||
assert url['port'] == 5672
|
||||
assert url['userid'] == 'guest'
|
||||
assert url['password'] == 'a@ns:ibl3#'
|
||||
assert url['virtual_host'] == '/'
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import models
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -9,3 +10,76 @@ def test_aliased_forward_reverse_field_searches(instance, options, get, admin):
|
||||
response = options(url, None, admin)
|
||||
assert 'job_template__search' in response.data['related_search_fields']
|
||||
get(reverse("api:unified_job_template_list") + "?job_template__search=anything", user=admin, expect=200)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('model', (
|
||||
'Project',
|
||||
'JobTemplate',
|
||||
'WorkflowJobTemplate'
|
||||
))
|
||||
class TestUnifiedOrganization:
|
||||
|
||||
def data_for_model(self, model, orm_style=False):
|
||||
data = {
|
||||
'name': 'foo',
|
||||
'organization': None
|
||||
}
|
||||
if model == 'JobTemplate':
|
||||
proj = models.Project.objects.create(
|
||||
name="test-proj",
|
||||
playbook_files=['helloworld.yml']
|
||||
)
|
||||
if orm_style:
|
||||
data['project_id'] = proj.id
|
||||
else:
|
||||
data['project'] = proj.id
|
||||
data['playbook'] = 'helloworld.yml'
|
||||
data['ask_inventory_on_launch'] = True
|
||||
return data
|
||||
|
||||
def test_organization_blank_on_edit_of_orphan(self, model, admin_user, patch):
|
||||
cls = getattr(models, model)
|
||||
data = self.data_for_model(model, orm_style=True)
|
||||
obj = cls.objects.create(**data)
|
||||
patch(
|
||||
url=obj.get_absolute_url(),
|
||||
data={'name': 'foooooo'},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
obj.refresh_from_db()
|
||||
assert obj.name == 'foooooo'
|
||||
|
||||
def test_organization_blank_on_edit_of_orphan_as_nonsuperuser(self, model, rando, patch):
|
||||
"""Test case reflects historical bug where ordinary users got weird error
|
||||
message when editing an orphaned project
|
||||
"""
|
||||
cls = getattr(models, model)
|
||||
data = self.data_for_model(model, orm_style=True)
|
||||
obj = cls.objects.create(**data)
|
||||
if model == 'JobTemplate':
|
||||
obj.project.admin_role.members.add(rando)
|
||||
obj.admin_role.members.add(rando)
|
||||
patch(
|
||||
url=obj.get_absolute_url(),
|
||||
data={'name': 'foooooo'},
|
||||
user=rando,
|
||||
expect=200
|
||||
)
|
||||
obj.refresh_from_db()
|
||||
assert obj.name == 'foooooo'
|
||||
|
||||
def test_organization_blank_on_edit_of_normal(self, model, admin_user, patch, organization):
|
||||
cls = getattr(models, model)
|
||||
data = self.data_for_model(model, orm_style=True)
|
||||
data['organization'] = organization
|
||||
obj = cls.objects.create(**data)
|
||||
patch(
|
||||
url=obj.get_absolute_url(),
|
||||
data={'name': 'foooooo'},
|
||||
user=admin_user,
|
||||
expect=200
|
||||
)
|
||||
obj.refresh_from_db()
|
||||
assert obj.name == 'foooooo'
|
||||
|
||||
@@ -23,9 +23,9 @@ def _mk_project_update():
|
||||
|
||||
|
||||
def _mk_inventory_update():
|
||||
source = InventorySource()
|
||||
source = InventorySource(source='ec2')
|
||||
source.save()
|
||||
iu = InventoryUpdate(inventory_source=source)
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2')
|
||||
return iu
|
||||
|
||||
|
||||
|
||||
@@ -123,7 +123,11 @@ def test_delete_project_update_in_active_state(project, delete, admin, status):
|
||||
@pytest.mark.parametrize("status", list(TEST_STATES))
|
||||
@pytest.mark.django_db
|
||||
def test_delete_inventory_update_in_active_state(inventory_source, delete, admin, status):
|
||||
i = InventoryUpdate.objects.create(inventory_source=inventory_source, status=status)
|
||||
i = InventoryUpdate.objects.create(
|
||||
inventory_source=inventory_source,
|
||||
status=status,
|
||||
source=inventory_source.source
|
||||
)
|
||||
url = reverse('api:inventory_update_detail', kwargs={'pk': i.pk})
|
||||
delete(url, None, admin, expect=403)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user