mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
697 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14f42af700 | ||
|
|
9b702e46fe | ||
|
|
9608539710 | ||
|
|
23ac8d346f | ||
|
|
ff00239c09 | ||
|
|
fc7b2212e6 | ||
|
|
ade0734af7 | ||
|
|
1efd189d52 | ||
|
|
52799c169e | ||
|
|
73f819b632 | ||
|
|
f3116a3250 | ||
|
|
43522d30e6 | ||
|
|
f8641bfa5e | ||
|
|
8b9a92f306 | ||
|
|
c508695ed0 | ||
|
|
305f717e88 | ||
|
|
7d190da1c4 | ||
|
|
fd63937fa2 | ||
|
|
5e64e8771e | ||
|
|
597435141d | ||
|
|
f11bc1154a | ||
|
|
cfff30f024 | ||
|
|
3cbb516bac | ||
|
|
467536f93f | ||
|
|
3878d8f7d8 | ||
|
|
112bab274d | ||
|
|
9b3aa60468 | ||
|
|
1d01efa024 | ||
|
|
f98c6b5c5b | ||
|
|
854da96976 | ||
|
|
3211323a4e | ||
|
|
2d396927dd | ||
|
|
54e2608cf4 | ||
|
|
44cd8819f4 | ||
|
|
5c0850b279 | ||
|
|
7a512c1de7 | ||
|
|
5bdc8b0015 | ||
|
|
385ef0b0a4 | ||
|
|
6495779e40 | ||
|
|
012902f4fe | ||
|
|
65bdf0baf7 | ||
|
|
f6fb3e0b41 | ||
|
|
6fae6168d9 | ||
|
|
9f91190f4b | ||
|
|
23f2ac4cbc | ||
|
|
f06141eb00 | ||
|
|
a971e20e05 | ||
|
|
edc97a24b2 | ||
|
|
8dec13a25d | ||
|
|
586019fe8f | ||
|
|
d7b8a20a75 | ||
|
|
d98e4c304f | ||
|
|
eb4dca9dc1 | ||
|
|
74460fa2d7 | ||
|
|
ac06f9e432 | ||
|
|
2c8d524b1a | ||
|
|
46728a0931 | ||
|
|
39d785070c | ||
|
|
dca29e756d | ||
|
|
2a9d728b70 | ||
|
|
ef6297377b | ||
|
|
73fb332af3 | ||
|
|
16ad68a6b0 | ||
|
|
88c4feb3ae | ||
|
|
f65839ec8f | ||
|
|
8e0a22c766 | ||
|
|
c0fb2ddbdc | ||
|
|
d60bec8155 | ||
|
|
98da019d12 | ||
|
|
40f0f5ddf7 | ||
|
|
31124e07c6 | ||
|
|
cb1ab742e8 | ||
|
|
93093b9bc6 | ||
|
|
5fa44c61d2 | ||
|
|
7290de22e2 | ||
|
|
10d95c9aef | ||
|
|
da74c61a09 | ||
|
|
d93ef9f6de | ||
|
|
209c5c9378 | ||
|
|
6b1110f1c3 | ||
|
|
ec08997b63 | ||
|
|
09150fe21d | ||
|
|
3562be8317 | ||
|
|
dc8115681a | ||
|
|
6f0f56f4f6 | ||
|
|
1b2d457090 | ||
|
|
764322b87b | ||
|
|
51005c0342 | ||
|
|
cccd021d8b | ||
|
|
18752a637f | ||
|
|
bbf283d1fd | ||
|
|
f83126643a | ||
|
|
d913d622d3 | ||
|
|
f062554e82 | ||
|
|
2d0eae26bc | ||
|
|
45937f0be3 | ||
|
|
e0d9100dc4 | ||
|
|
19d0524461 | ||
|
|
4db5c496d0 | ||
|
|
babea5d599 | ||
|
|
a2e3bf1030 | ||
|
|
1550989482 | ||
|
|
d94a49ac74 | ||
|
|
de52adedef | ||
|
|
876d4316e1 | ||
|
|
4a4d25329b | ||
|
|
e6f06a95da | ||
|
|
b15a75676d | ||
|
|
098ec63944 | ||
|
|
1c4a376758 | ||
|
|
a52f050f44 | ||
|
|
836335b4c5 | ||
|
|
f10bf4c067 | ||
|
|
138211da64 | ||
|
|
44befa7847 | ||
|
|
b9950deaf9 | ||
|
|
8ba9eef97b | ||
|
|
9342cb012a | ||
|
|
0f04851ab7 | ||
|
|
c90dfbb7e0 | ||
|
|
05f93032f5 | ||
|
|
726b5ddc26 | ||
|
|
9c8dbdc7a5 | ||
|
|
c170d4c4f6 | ||
|
|
0ee49dae76 | ||
|
|
4ea7c8a534 | ||
|
|
8298b76dff | ||
|
|
52a46dd765 | ||
|
|
5bec4a51c6 | ||
|
|
fd658d44c9 | ||
|
|
6a296419d2 | ||
|
|
5b5f07d639 | ||
|
|
27c56d4148 | ||
|
|
ae7396b0cf | ||
|
|
c677e4b18e | ||
|
|
32f02fc91c | ||
|
|
588cb1e403 | ||
|
|
5971a84e74 | ||
|
|
e31fc37215 | ||
|
|
572c0fbb74 | ||
|
|
2aa30226f4 | ||
|
|
53da8e0775 | ||
|
|
8e53453737 | ||
|
|
80023017a2 | ||
|
|
10c357d0f1 | ||
|
|
e8b2072ea5 | ||
|
|
9e89c16b38 | ||
|
|
ca19b9e9d4 | ||
|
|
269e71c069 | ||
|
|
857a5718e5 | ||
|
|
6f7a717664 | ||
|
|
0b57522dce | ||
|
|
c2a2bf39d5 | ||
|
|
6b2cee2f69 | ||
|
|
4e55c98bc6 | ||
|
|
143d41fb2a | ||
|
|
c975f65bbc | ||
|
|
2995cde7cb | ||
|
|
221021a798 | ||
|
|
411d69204b | ||
|
|
19f4de0d05 | ||
|
|
070c67ffe8 | ||
|
|
5c38011ad5 | ||
|
|
4e9c6a956d | ||
|
|
1afdd7ac1d | ||
|
|
3312db61c3 | ||
|
|
0ca8fd7752 | ||
|
|
5bdf9a108c | ||
|
|
0a6d13c1b9 | ||
|
|
3673e7c3f4 | ||
|
|
b0324acd0e | ||
|
|
b6eeb2e77f | ||
|
|
6bbbe23a96 | ||
|
|
097f465f39 | ||
|
|
58337b9e2e | ||
|
|
2601631f28 | ||
|
|
48537e8202 | ||
|
|
0345a0c8e2 | ||
|
|
65c71b812e | ||
|
|
510b4197ac | ||
|
|
55855e9e63 | ||
|
|
6a32164438 | ||
|
|
811186308c | ||
|
|
d96383b317 | ||
|
|
83a9c3470e | ||
|
|
701a69b5e5 | ||
|
|
295e40002e | ||
|
|
7c2f6c95a6 | ||
|
|
fbd46f7799 | ||
|
|
c9ec0d31f1 | ||
|
|
85f7dc4222 | ||
|
|
1c8851b716 | ||
|
|
afe8dc6ad9 | ||
|
|
f294aabcc9 | ||
|
|
4c60999161 | ||
|
|
18ba40506f | ||
|
|
2ae82b9302 | ||
|
|
ec3b225c76 | ||
|
|
53d8e8b332 | ||
|
|
3fd0c29a95 | ||
|
|
8fb17cfbdb | ||
|
|
e4d227a791 | ||
|
|
0123469e7e | ||
|
|
d494645914 | ||
|
|
80c2249bdb | ||
|
|
6df65c95a7 | ||
|
|
119e80c717 | ||
|
|
b7e614beee | ||
|
|
f231216584 | ||
|
|
16a6fb5adc | ||
|
|
7b1edda368 | ||
|
|
69edef430c | ||
|
|
6f1f64118b | ||
|
|
e1e0bb30a9 | ||
|
|
95ec009758 | ||
|
|
55b948bf39 | ||
|
|
34df47ceba | ||
|
|
0505e38071 | ||
|
|
eb131f64cc | ||
|
|
a3a47834fd | ||
|
|
88a91cfeba | ||
|
|
6aab88259a | ||
|
|
0a4a1bed0a | ||
|
|
62215ca432 | ||
|
|
fd21603c0e | ||
|
|
aab58f5ae7 | ||
|
|
29ff69a774 | ||
|
|
33d7342ffe | ||
|
|
609b17aa20 | ||
|
|
e23a2b4506 | ||
|
|
eba12a6207 | ||
|
|
03c5cc779b | ||
|
|
065b943870 | ||
|
|
6e67ae68fd | ||
|
|
adf708366a | ||
|
|
4d2fcfd8c1 | ||
|
|
0921de5d2b | ||
|
|
f2801e0c03 | ||
|
|
befc658042 | ||
|
|
883fa4906a | ||
|
|
60827143bb | ||
|
|
5b17ab6873 | ||
|
|
0e80f663ab | ||
|
|
cb95de0862 | ||
|
|
86a3a79be4 | ||
|
|
b417fc3803 | ||
|
|
5b2adc89cf | ||
|
|
41fb21911e | ||
|
|
eaa74b40c1 | ||
|
|
cf513b33ee | ||
|
|
a39e1a528b | ||
|
|
05dded397d | ||
|
|
ae5a1117d4 | ||
|
|
b1361c8fe2 | ||
|
|
20ee73ce73 | ||
|
|
0bd8012fd9 | ||
|
|
05ef51f710 | ||
|
|
d6b5990cd2 | ||
|
|
26f7a2ba81 | ||
|
|
0381a3ac8c | ||
|
|
4b40cb3abb | ||
|
|
b0265b060b | ||
|
|
4ca33579a5 | ||
|
|
31e7e10f30 | ||
|
|
92f0af684c | ||
|
|
6a7520d10f | ||
|
|
f2dfa132a7 | ||
|
|
ddcbc408b9 | ||
|
|
ea39cbce73 | ||
|
|
44d7d68322 | ||
|
|
9f39bab2b8 | ||
|
|
8eb4dafb17 | ||
|
|
428f8addf8 | ||
|
|
57b317d440 | ||
|
|
68f9c5137d | ||
|
|
9f97efece8 | ||
|
|
1a68df275c | ||
|
|
86363e260e | ||
|
|
8f66793276 | ||
|
|
c7e0e30f93 | ||
|
|
8ab7745e3a | ||
|
|
fd0c4ec869 | ||
|
|
a435843f23 | ||
|
|
f850f8d3e0 | ||
|
|
c29d476919 | ||
|
|
b05b6b2e03 | ||
|
|
3f76499c56 | ||
|
|
e63383bde6 | ||
|
|
c6be92cdf6 | ||
|
|
341e1e34e3 | ||
|
|
70755a395b | ||
|
|
fe9b24cde2 | ||
|
|
70f7a082bb | ||
|
|
9df29e8fc4 | ||
|
|
d37cb64aaf | ||
|
|
1d9f01a201 | ||
|
|
373bb443aa | ||
|
|
286b1d4e25 | ||
|
|
7b7465f168 | ||
|
|
e453afa064 | ||
|
|
cf96275f1b | ||
|
|
be8168b555 | ||
|
|
fd92ba0c0b | ||
|
|
0184a7c267 | ||
|
|
81f6d36a3a | ||
|
|
f1df4c54f8 | ||
|
|
521d3d5edb | ||
|
|
acee22435b | ||
|
|
0c497fa682 | ||
|
|
90b9c7861c | ||
|
|
eb5bf599e3 | ||
|
|
10e68c6fb3 | ||
|
|
49bdadcdbf | ||
|
|
015fc29c1c | ||
|
|
0dfb183cb6 | ||
|
|
ba14634318 | ||
|
|
b953478225 | ||
|
|
9964ba7c9a | ||
|
|
12b8349e88 | ||
|
|
c74d60f3f3 | ||
|
|
44ad6bfdce | ||
|
|
fde7a1e3e5 | ||
|
|
4a0fc3e1af | ||
|
|
5f1da2b923 | ||
|
|
e7bf81883b | ||
|
|
4993a9e6ec | ||
|
|
8562c378c0 | ||
|
|
6d935f740c | ||
|
|
c1133b3f6d | ||
|
|
c0faa39b53 | ||
|
|
7a433f4e8f | ||
|
|
2302496724 | ||
|
|
54681eb055 | ||
|
|
b716e2b099 | ||
|
|
69dcbe0865 | ||
|
|
14a8e3da5e | ||
|
|
6ff1424e8c | ||
|
|
9786dc08d3 | ||
|
|
ecaa66c13b | ||
|
|
ee1d322336 | ||
|
|
1f4a45a698 | ||
|
|
966bb6fc74 | ||
|
|
f554f45288 | ||
|
|
5c2b2dea0c | ||
|
|
fd9373a9ec | ||
|
|
82a641e173 | ||
|
|
490f719fd9 | ||
|
|
46f5cb6b7a | ||
|
|
efb25b7b9e | ||
|
|
87b13ead12 | ||
|
|
7c6975baec | ||
|
|
6e6cd51b4d | ||
|
|
3d233faed8 | ||
|
|
54d0f173dc | ||
|
|
684b9bd47a | ||
|
|
9530c6ca50 | ||
|
|
b7209d1694 | ||
|
|
9d806ddb82 | ||
|
|
332c802317 | ||
|
|
9660e27246 | ||
|
|
64f45da4d2 | ||
|
|
73418e41f3 | ||
|
|
6e2010ca40 | ||
|
|
50433789ae | ||
|
|
a3f0158a94 | ||
|
|
130bf076f4 | ||
|
|
06d7a61ca1 | ||
|
|
297fecba3a | ||
|
|
3cbf384ad1 | ||
|
|
45a0084f78 | ||
|
|
f9741b619c | ||
|
|
5ec7378135 | ||
|
|
c05e4e07ee | ||
|
|
cc429f9741 | ||
|
|
cb766c6a95 | ||
|
|
3c637cd54c | ||
|
|
61cbd34586 | ||
|
|
9697999ddd | ||
|
|
41613ff544 | ||
|
|
0af7f046f0 | ||
|
|
efe03c396b | ||
|
|
9b1d6ab441 | ||
|
|
e9d524ebc0 | ||
|
|
c4d8d5ee9e | ||
|
|
75746f2c69 | ||
|
|
6d88a81cbd | ||
|
|
56d6479cd8 | ||
|
|
7be129f9fa | ||
|
|
4d21d699bb | ||
|
|
899a5fca0b | ||
|
|
4456ae2d71 | ||
|
|
3153587c90 | ||
|
|
a0090c7c52 | ||
|
|
3903e88a47 | ||
|
|
11b8ca1ef9 | ||
|
|
0b54ee65f5 | ||
|
|
6b8267d2b8 | ||
|
|
1f93d3ad69 | ||
|
|
5969d9e3e2 | ||
|
|
e05db68bde | ||
|
|
e92f1187d2 | ||
|
|
53d0611cf8 | ||
|
|
d1c49d45bf | ||
|
|
a293a60d5c | ||
|
|
4cdec9c297 | ||
|
|
4baada37d8 | ||
|
|
cb3f7b9ef5 | ||
|
|
1c7fbc2806 | ||
|
|
060578b30b | ||
|
|
cb05b54404 | ||
|
|
218b97883d | ||
|
|
d337948bd6 | ||
|
|
5d51a4e781 | ||
|
|
a0beb9e445 | ||
|
|
a9aa91d9f2 | ||
|
|
2f56a20484 | ||
|
|
4985fb6ffa | ||
|
|
b545a6510f | ||
|
|
df7b168911 | ||
|
|
83b449fd30 | ||
|
|
a00c8920ce | ||
|
|
a07b1a19f3 | ||
|
|
a95e554a16 | ||
|
|
4c92d02540 | ||
|
|
a0bdf8cdae | ||
|
|
daaabd935c | ||
|
|
eaf55728d8 | ||
|
|
45acd15c82 | ||
|
|
3f936cd5e7 | ||
|
|
5d9d486f9c | ||
|
|
d3f2dedbd5 | ||
|
|
fe605596b5 | ||
|
|
bf601fc988 | ||
|
|
7f36efe8dd | ||
|
|
615cc11d0d | ||
|
|
3b520cdeee | ||
|
|
c245abefcb | ||
|
|
6a093c8e8b | ||
|
|
2097b014e8 | ||
|
|
fb6ce4bed3 | ||
|
|
7ae19a8ba3 | ||
|
|
240556b5d6 | ||
|
|
1b6976205c | ||
|
|
ce21dec0d0 | ||
|
|
22df3047e6 | ||
|
|
79cb11d905 | ||
|
|
be79b6e907 | ||
|
|
2239c8d6e5 | ||
|
|
7394535022 | ||
|
|
ec40f62c4d | ||
|
|
5e6c978a47 | ||
|
|
a54352898e | ||
|
|
b583aeb646 | ||
|
|
af6af052d0 | ||
|
|
70325fd249 | ||
|
|
e935b06c39 | ||
|
|
8f9758803e | ||
|
|
9672e72834 | ||
|
|
fb07be36b5 | ||
|
|
0f6d2c36a0 | ||
|
|
805ba2568c | ||
|
|
e0bbd36b99 | ||
|
|
3b1b55946e | ||
|
|
189804932e | ||
|
|
61da965d54 | ||
|
|
1f1657d880 | ||
|
|
57f4db25d2 | ||
|
|
115726ed83 | ||
|
|
9a7dd38cbb | ||
|
|
74a5247d9d | ||
|
|
cc18cf650e | ||
|
|
78ccf3c674 | ||
|
|
96531eff7a | ||
|
|
561390d405 | ||
|
|
c608d761a2 | ||
|
|
61c0beccff | ||
|
|
182a7e8e5c | ||
|
|
0e6c14e707 | ||
|
|
080c430b82 | ||
|
|
40c6b346c1 | ||
|
|
1c61fafbc7 | ||
|
|
afe36974de | ||
|
|
99460a76d8 | ||
|
|
efbbbc0bdc | ||
|
|
9090cb830d | ||
|
|
6aca9d80bb | ||
|
|
33deaa7d0b | ||
|
|
0bd4a8c16d | ||
|
|
c98c6db664 | ||
|
|
31e6d7cc0a | ||
|
|
c758e6f8cf | ||
|
|
242c4e2533 | ||
|
|
d7d496553e | ||
|
|
1f59d6182b | ||
|
|
64b49feeee | ||
|
|
2419a592b4 | ||
|
|
eb66a03a30 | ||
|
|
87cf797153 | ||
|
|
a481fc3cc9 | ||
|
|
c3bab52a61 | ||
|
|
b3cdefec23 | ||
|
|
3cc3cf1f80 | ||
|
|
2a9a471181 | ||
|
|
2a37430eab | ||
|
|
29520f1ca6 | ||
|
|
051efa4a19 | ||
|
|
e26015a084 | ||
|
|
4cb0366fcf | ||
|
|
365b3a2366 | ||
|
|
5c35bb335a | ||
|
|
0df85b0a27 | ||
|
|
02c8f4cc59 | ||
|
|
fbe5832d5a | ||
|
|
b4956de6e4 | ||
|
|
5e91ae7b03 | ||
|
|
1468e5908a | ||
|
|
6e8c71a231 | ||
|
|
56868dbedd | ||
|
|
3c8f5f666f | ||
|
|
c7bfc60be3 | ||
|
|
62d91365a7 | ||
|
|
4e48118704 | ||
|
|
8bcc91518e | ||
|
|
f6bddfd336 | ||
|
|
a691caf346 | ||
|
|
3ad2bb1bb9 | ||
|
|
5548f7e91d | ||
|
|
6d08c11506 | ||
|
|
055222bb82 | ||
|
|
82a226d1fe | ||
|
|
e93518a030 | ||
|
|
33b1028882 | ||
|
|
67f1f9ac69 | ||
|
|
f99f183d53 | ||
|
|
2e179609d9 | ||
|
|
8fb0b401ce | ||
|
|
ed124c693a | ||
|
|
949c777546 | ||
|
|
3f5cb645f2 | ||
|
|
d83771f082 | ||
|
|
b177c7b558 | ||
|
|
2b664d6958 | ||
|
|
909c2513ac | ||
|
|
5f7c056f13 | ||
|
|
56881c25c9 | ||
|
|
aa855c1d57 | ||
|
|
09b097d6b9 | ||
|
|
ed8b4a3c70 | ||
|
|
d4f86e8999 | ||
|
|
17627ed6ce | ||
|
|
32ddfdf590 | ||
|
|
4a2a6949a8 | ||
|
|
5ba6d14e4d | ||
|
|
c7793e0b9c | ||
|
|
44d7915993 | ||
|
|
2ef08b1d13 | ||
|
|
16aa73fc2c | ||
|
|
5013d74f46 | ||
|
|
3d54ab9a0f | ||
|
|
9420837dae | ||
|
|
a613cb2f78 | ||
|
|
92c9f6b0f7 | ||
|
|
c7a558b027 | ||
|
|
4f40381b46 | ||
|
|
2c9ef3bae6 | ||
|
|
f82163499c | ||
|
|
6f7bdc04c0 | ||
|
|
9fc8144f8e | ||
|
|
f670a01bf7 | ||
|
|
0e2fb02185 | ||
|
|
b0520ae0e4 | ||
|
|
471ee46171 | ||
|
|
fc6acbd9d1 | ||
|
|
5b3bb1e81d | ||
|
|
7b757f17a9 | ||
|
|
753749d2b4 | ||
|
|
73d4d14a62 | ||
|
|
13131e96b6 | ||
|
|
aaaeb37812 | ||
|
|
dfcfdc7a4e | ||
|
|
4ca8862b51 | ||
|
|
e886ce57aa | ||
|
|
f747edca0c | ||
|
|
ba318f8670 | ||
|
|
9d359a080b | ||
|
|
41d0a2f7b9 | ||
|
|
379824bbf7 | ||
|
|
47de2ddcb5 | ||
|
|
14686e8b16 | ||
|
|
f35c16e4f0 | ||
|
|
226a689f7b | ||
|
|
54d63ee437 | ||
|
|
fe9bd37c74 | ||
|
|
0b633c2b6c | ||
|
|
8081d66872 | ||
|
|
40cf4ff209 | ||
|
|
554f96ccef | ||
|
|
1a5cdd05c5 | ||
|
|
bb67d62d81 | ||
|
|
a9288c82fc | ||
|
|
5a70989ff5 | ||
|
|
56139423d9 | ||
|
|
ffd9a239ba | ||
|
|
00b47f1dbf | ||
|
|
eb2a9baadd | ||
|
|
953fa3fe0d | ||
|
|
b20adac33d | ||
|
|
186aa6cc99 | ||
|
|
27b0f874cc | ||
|
|
669e963e38 | ||
|
|
03eb9bafb7 | ||
|
|
78ef11d558 | ||
|
|
ad71dc3e98 | ||
|
|
f3410f6517 | ||
|
|
7d432e484c | ||
|
|
271eb4043a | ||
|
|
cc9ee550d9 | ||
|
|
7a8d9394be | ||
|
|
339d430de1 | ||
|
|
6f24fda168 | ||
|
|
3ad9aa902d | ||
|
|
3408f5270e | ||
|
|
d365705e05 | ||
|
|
1f81592b5c | ||
|
|
0f0857747d | ||
|
|
ff27c486bf | ||
|
|
e7719e3fdc | ||
|
|
7a6cbe2685 | ||
|
|
b2328d17a4 | ||
|
|
b8790db84a | ||
|
|
89646e7799 | ||
|
|
1b32dad745 | ||
|
|
5ab0b91a0d | ||
|
|
da6f793468 | ||
|
|
ce44a056ad | ||
|
|
54aababd0f | ||
|
|
8a58a73cb0 | ||
|
|
db4d84b94c | ||
|
|
604b42929d | ||
|
|
a90ff45e98 | ||
|
|
d461090415 | ||
|
|
9ccee200f3 | ||
|
|
a74fe57691 | ||
|
|
3f5e1518ee | ||
|
|
e433e3ebc2 | ||
|
|
88cd154c97 | ||
|
|
6fcaba6540 | ||
|
|
cc0d658b1c | ||
|
|
792928aae8 | ||
|
|
aa5219cf30 | ||
|
|
9e7882fce2 | ||
|
|
c40ca718d0 | ||
|
|
67daca04e0 | ||
|
|
c3ca43d9bc | ||
|
|
a534a80360 | ||
|
|
012189cb10 | ||
|
|
0feeaf0453 | ||
|
|
7bf7cebd72 | ||
|
|
b7475442fb | ||
|
|
11874d38e2 | ||
|
|
ce59f1817a | ||
|
|
2e7b53cf90 | ||
|
|
afdb4f9709 | ||
|
|
2c17d56acd | ||
|
|
8bde6060c4 | ||
|
|
da16785201 | ||
|
|
b7eb1a4c59 | ||
|
|
47740f62f4 | ||
|
|
fb7b36cfbd | ||
|
|
ba8c44f8a6 | ||
|
|
287d181af7 | ||
|
|
dfa65225d9 | ||
|
|
12f6c05a4a | ||
|
|
bbde149ab1 | ||
|
|
9cf3066591 | ||
|
|
bda4db462f | ||
|
|
a825cf3583 | ||
|
|
7c8bd47198 | ||
|
|
916f919fcb | ||
|
|
ac280e1446 | ||
|
|
81875f0971 | ||
|
|
7d58ae3c8c | ||
|
|
5445de7d01 | ||
|
|
3ae133d39d | ||
|
|
b8bd6d2472 | ||
|
|
d32461388f | ||
|
|
0e9f7f37e0 | ||
|
|
e8ea6bc946 | ||
|
|
91045534d0 | ||
|
|
1042c1cc28 | ||
|
|
1ce9c00d77 | ||
|
|
22aaf765a5 | ||
|
|
186a1b04b4 | ||
|
|
1cd7f42a27 | ||
|
|
8cae728ea0 | ||
|
|
c0690cddc8 | ||
|
|
bbed8ec704 |
@@ -1 +1,3 @@
|
||||
awx/ui/node_modules
|
||||
awx/ui_next/node_modules
|
||||
Dockerfile
|
||||
|
||||
2
.env
2
.env
@@ -1,3 +1,3 @@
|
||||
PYTHONUNBUFFERED=true
|
||||
SELENIUM_DOCKER_TAG=latest
|
||||
|
||||
COMPOSE_PROJECT_NAME=tools
|
||||
|
||||
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/requirements"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -29,14 +29,18 @@ awx/ui/client/languages
|
||||
awx/ui/templates/ui/index.html
|
||||
awx/ui/templates/ui/installing.html
|
||||
awx/ui_next/node_modules/
|
||||
awx/ui_next/src/locales/
|
||||
awx/ui_next/src/locales/*/messages.js
|
||||
awx/ui_next/coverage/
|
||||
awx/ui_next/build
|
||||
awx/ui_next/.env.local
|
||||
awx/ui_next/instrumented
|
||||
rsyslog.pid
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/ansible/awx_dump.sql
|
||||
tools/docker-compose/Dockerfile
|
||||
tools/docker-compose/_build
|
||||
tools/docker-compose/_sources
|
||||
tools/docker-compose/overrides/
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
@@ -87,6 +91,9 @@ awx/awx_test.sqlite3-journal
|
||||
# Mac OS X
|
||||
*.DS_Store
|
||||
|
||||
# VSCode
|
||||
.vscode/
|
||||
|
||||
# Editors
|
||||
*.sw[poj]
|
||||
*~
|
||||
@@ -146,6 +153,8 @@ use_dev_supervisor.txt
|
||||
.idea/*
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/tools/docker-compose/overrides/
|
||||
/awx/ui_next/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
52
CHANGELOG.md
52
CHANGELOG.md
@@ -2,6 +2,58 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
# 18.0.0 (March 23, 2021)
|
||||
|
||||
**IMPORTANT INSTALL AND UPGRADE NOTES**
|
||||
|
||||
Starting in version 18.0, the [AWX Operator](https://github.com/ansible/awx-operator) is the preferred way to install AWX: https://github.com/ansible/awx/blob/devel/INSTALL.md#installing-awx
|
||||
|
||||
If you have a pre-existing installation of AWX that utilizes the Docker-based installation method, this install method has ** notably changed** from 17.x to 18.x. For details, please see:
|
||||
|
||||
- https://groups.google.com/g/awx-project/c/47MjWSUQaOc/m/bCjSDn0eBQAJ
|
||||
- https://github.com/ansible/awx/blob/devel/tools/docker-compose
|
||||
- https://github.com/ansible/awx/blob/devel/tools/docker-compose/docs/data_migration.md
|
||||
|
||||
### Introducing Execution Environments
|
||||
|
||||
After a herculean effort from a number of contributors, we're excited to announce that AWX 18.0.0 introduces a new concept called Execution Environments.
|
||||
|
||||
Execution Environments are container images which consist of everything necessary to run a playbook within AWX, and which drive the entire management and lifecycle of playbook execution runtime in AWX: https://github.com/ansible/awx/issues/5157. This means that going forward, AWX no longer utilizes the [bubblewrap](https://github.com/containers/bubblewrap) project for playbook isolation, but instead utilizes a container per playbook run.
|
||||
|
||||
Much like custom virtualenvs, custom Execution Environments can be crafted to specify additional Python or system-level dependencies. Ansible Builder outputs images you can upload to your registry which can *then* be defined in AWX and utilized for playbook runs.
|
||||
|
||||
To learn more about Ansible Builder and Execution Environments, see: https://www.ansible.com/blog/introduction-to-ansible-builder
|
||||
|
||||
### Other Notable Changes
|
||||
|
||||
- Removed `installer` directory.
|
||||
- The Kubernetes installer has been removed in favor of [AWX Operator](https://github.com/ansible/awx-operator).
|
||||
- The "Local Docker" install method has been removed in favor of the development environment. Details can be found at: https://github.com/ansible/awx/blob/devel/tools/docker-compose/README.md
|
||||
- Removal of custom virtual environments https://github.com/ansible/awx/pull/9498
|
||||
- Custom virtual environments have been replaced by Execution Environments https://github.com/ansible/awx/pull/9570
|
||||
- The default Container Group Pod definition has changed. All custom Pod specs have been reset. https://github.com/ansible/awx/commit/05ef51f710dad8f8036bc5acee4097db4adc0d71
|
||||
- Added user interface for the activity stream: https://github.com/ansible/awx/pull/9083
|
||||
- Converted many of the top-level list views (Jobs, Teams, Hosts, Inventories, Projects, and more) to a new, permanent table component for substantially increased responsiveness, usability, maintainability, and other 'ility's: https://github.com/ansible/awx/pull/8970, https://github.com/ansible/awx/pull/9182 and many others!
|
||||
- Added click-to-expand details for job tables
|
||||
- Added search filtering to job output https://github.com/ansible/awx/pull/9208
|
||||
- Added the new migration, update, and "installation in progress" page https://github.com/ansible/awx/pull/9123
|
||||
- Added the user interface for job settings https://github.com/ansible/awx/pull/8661
|
||||
- Runtime errors from jobs are now displayed, along with an explanation for what went wrong, on the output page https://github.com/ansible/awx/pull/8726
|
||||
- You can now cancel a running job from its output and details panel https://github.com/ansible/awx/pull/9199
|
||||
- Fixed a bug where launch prompt inputs were unexpectedly deposited in the url: https://github.com/ansible/awx/pull/9231
|
||||
- Playbook, credential type, and inventory file inputs now support type-ahead and manual type-in! https://github.com/ansible/awx/pull/9120
|
||||
- Added ability to relaunch against failed hosts: https://github.com/ansible/awx/pull/9225
|
||||
- Added pending workflow approval count to the application header https://github.com/ansible/awx/pull/9334
|
||||
- Added user interface for management jobs: https://github.com/ansible/awx/pull/9224
|
||||
- Added toast message to show notification template test result to notification templates list https://github.com/ansible/awx/pull/9318
|
||||
- Replaced CodeMirror with AceEditor for editing template variables and notification templates https://github.com/ansible/awx/pull/9281
|
||||
- Added support for filtering and pagination on job output https://github.com/ansible/awx/pull/9208
|
||||
- Added support for html in custom login text https://github.com/ansible/awx/pull/9519
|
||||
|
||||
# 17.1.0 (March 9, 2021)
|
||||
- Addressed a security issue in AWX (CVE-2021-20253)
|
||||
- Fixed a bug permissions error related to redis in K8S-based deployments: https://github.com/ansible/awx/issues/9401
|
||||
|
||||
# 17.0.1 (January 26, 2021)
|
||||
- Fixed pgdocker directory permissions issue with Local Docker installer: https://github.com/ansible/awx/pull/9152
|
||||
- Fixed a bug in the UI which caused toggle settings to not be changed when clicked: https://github.com/ansible/awx/pull/9093
|
||||
|
||||
224
CONTRIBUTING.md
224
CONTRIBUTING.md
@@ -11,24 +11,15 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Docker](#docker)
|
||||
* [Docker compose](#docker-compose)
|
||||
* [Node and npm](#node-and-npm)
|
||||
* [Build the environment](#build-the-environment)
|
||||
* [Frontend Development](#frontend-development)
|
||||
* [Build and Run the Development Environment](#build-and-run-the-development-environment)
|
||||
* [Fork and clone the AWX repo](#fork-and-clone-the-awx-repo)
|
||||
* [Create local settings](#create-local-settings)
|
||||
* [Build the base image](#build-the-base-image)
|
||||
* [Build the user interface](#build-the-user-interface)
|
||||
* [Running the environment](#running-the-environment)
|
||||
* [Start the containers](#start-the-containers)
|
||||
* [Start from the container shell](#start-from-the-container-shell)
|
||||
* [Post Build Steps](#post-build-steps)
|
||||
* [Start a shell](#start-a-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Building API Documentation](#build-api-documentation)
|
||||
* [Building API Documentation](#building-api-documentation)
|
||||
* [Accessing the AWX web interface](#accessing-the-awx-web-interface)
|
||||
* [Purging containers and images](#purging-containers-and-images)
|
||||
* [What should I work on?](#what-should-i-work-on)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [PR Checks run by Zuul](#pr-checks-run-by-zuul)
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
|
||||
## Things to know prior to submitting code
|
||||
@@ -42,7 +33,7 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
|
||||
## Setting up your development environment
|
||||
|
||||
The AWX development environment workflow and toolchain is based on Docker, and the docker-compose tool, to provide dependencies, services, and databases necessary to run all of the components. It also binds the local source tree into the development container, making it possible to observe and test changes in real time.
|
||||
The AWX development environment workflow and toolchain uses Docker and the docker-compose tool, to provide dependencies, services, and databases necessary to run all of the components. It also bind-mounts the local source tree into the development container, making it possible to observe and test changes in real time.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -55,29 +46,19 @@ respectively.
|
||||
|
||||
For Linux platforms, refer to the following from Docker:
|
||||
|
||||
**Fedora**
|
||||
* **Fedora** - https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
|
||||
* **CentOS** - https://docs.docker.com/engine/installation/linux/docker-ce/centos/
|
||||
|
||||
**CentOS**
|
||||
* **Ubuntu** - https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/centos/
|
||||
* **Debian** - https://docs.docker.com/engine/installation/linux/docker-ce/debian/
|
||||
|
||||
**Ubuntu**
|
||||
* **Arch** - https://wiki.archlinux.org/index.php/Docker
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
|
||||
#### Docker Compose
|
||||
|
||||
**Debian**
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/debian/
|
||||
|
||||
**Arch**
|
||||
|
||||
> https://wiki.archlinux.org/index.php/Docker
|
||||
|
||||
#### Docker compose
|
||||
|
||||
If you're not using Docker for Mac, or Docker for Windows, you may need, or choose to, install the Docker compose Python module separately, in which case you'll need to run the following:
|
||||
If you're not using Docker for Mac, or Docker for Windows, you may need, or choose to, install the `docker-compose` Python module separately.
|
||||
|
||||
```bash
|
||||
(host)$ pip3 install docker-compose
|
||||
@@ -87,186 +68,15 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
|
||||
See [the ui development documentation](awx/ui_next/CONTRIBUTING.md).
|
||||
|
||||
|
||||
### Build the environment
|
||||
|
||||
#### Fork and clone the AWX repo
|
||||
|
||||
If you have not done so already, you'll need to fork the AWX repo on GitHub. For more on how to do this, see [Fork a Repo](https://help.github.com/articles/fork-a-repo/).
|
||||
|
||||
#### Create local settings
|
||||
### Build and Run the Development Environment
|
||||
|
||||
AWX will import the file `awx/settings/local_settings.py` and combine it with defaults in `awx/settings/defaults.py`. This file is required for starting the development environment and startup will fail if it's not provided.
|
||||
See the [README.md](./tools/docker-compose/README.md) for docs on how to build the awx_devel image and run the development environment.
|
||||
|
||||
An example is provided. Make a copy of it, and edit as needed (the defaults are usually fine):
|
||||
|
||||
```bash
|
||||
(host)$ cp awx/settings/local_settings.py.docker_compose awx/settings/local_settings.py
|
||||
```
|
||||
|
||||
#### Build the base image
|
||||
|
||||
The AWX base container image (defined in `tools/docker-compose/Dockerfile`) contains basic OS dependencies and symbolic links into the development environment that make running the services easy.
|
||||
|
||||
Run the following to build the image:
|
||||
|
||||
```bash
|
||||
(host)$ make docker-compose-build
|
||||
```
|
||||
|
||||
**NOTE**
|
||||
|
||||
> The image will need to be rebuilt, if the Python requirements or OS dependencies change.
|
||||
|
||||
Once the build completes, you will have a `ansible/awx_devel` image in your local image cache. Use the `docker images` command to view it, as follows:
|
||||
|
||||
```bash
|
||||
(host)$ docker images
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
ansible/awx_devel latest ba9ec3e8df74 26 minutes ago 1.42GB
|
||||
```
|
||||
|
||||
#### Build the user interface
|
||||
|
||||
Run the following to build the AWX UI:
|
||||
|
||||
```bash
|
||||
(host) $ make ui-devel
|
||||
```
|
||||
See [the ui development documentation](awx/ui/README.md) for more information on using the frontend development, build, and test tooling.
|
||||
|
||||
### Running the environment
|
||||
|
||||
#### Start the containers
|
||||
|
||||
Start the development containers by running the following:
|
||||
|
||||
```bash
|
||||
(host)$ make docker-compose
|
||||
```
|
||||
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django and the front end build process.
|
||||
|
||||
If you start a second terminal session, you can take a look at the running containers using the `docker ps` command. For example:
|
||||
|
||||
```bash
|
||||
# List running containers
|
||||
(host)$ docker ps
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
44251b476f98 gcr.io/ansible-tower-engineering/awx_devel:devel "/entrypoint.sh /bin…" 27 seconds ago Up 23 seconds 0.0.0.0:6899->6899/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 22/tcp, 0.0.0.0:8888->8888/tcp tools_awx_run_9e820694d57e
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds
|
||||
b66a506d3007 postgres:12 "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:5432->5432/tcp tools_postgres_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
> The Makefile assumes that the image you built is tagged with your current branch. This allows you to build images for different contexts or branches. When starting the containers, you can choose a specific branch by setting `COMPOSE_TAG=<branch name>` in your environment.
|
||||
|
||||
> For example, you might be working in a feature branch, but you want to run the containers using the `devel` image you built previously. To do that, start the containers using the following command: `$ COMPOSE_TAG=devel make docker-compose`
|
||||
|
||||
##### Wait for migrations to complete
|
||||
|
||||
The first time you start the environment, database migrations need to run in order to build the PostgreSQL database. It will take few moments, but eventually you will see output in your terminal session that looks like the following:
|
||||
|
||||
```bash
|
||||
awx_1 | Operations to perform:
|
||||
awx_1 | Synchronize unmigrated apps: solo, api, staticfiles, debug_toolbar, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Synchronizing apps without migrations:
|
||||
awx_1 | Creating tables...
|
||||
awx_1 | Running deferred SQL...
|
||||
awx_1 | Installing custom SQL...
|
||||
awx_1 | Running migrations:
|
||||
awx_1 | Rendering model states... DONE
|
||||
awx_1 | Applying contenttypes.0001_initial... OK
|
||||
awx_1 | Applying contenttypes.0002_remove_content_type_name... OK
|
||||
awx_1 | Applying auth.0001_initial... OK
|
||||
awx_1 | Applying auth.0002_alter_permission_name_max_length... OK
|
||||
awx_1 | Applying auth.0003_alter_user_email_max_length... OK
|
||||
awx_1 | Applying auth.0004_alter_user_username_opts... OK
|
||||
awx_1 | Applying auth.0005_alter_user_last_login_null... OK
|
||||
awx_1 | Applying auth.0006_require_contenttypes_0002... OK
|
||||
awx_1 | Applying taggit.0001_initial... OK
|
||||
awx_1 | Applying taggit.0002_auto_20150616_2121... OK
|
||||
awx_1 | Applying main.0001_initial... OK
|
||||
awx_1 | Applying main.0002_squashed_v300_release... OK
|
||||
awx_1 | Applying main.0003_squashed_v300_v303_updates... OK
|
||||
awx_1 | Applying main.0004_squashed_v310_release... OK
|
||||
awx_1 | Applying conf.0001_initial... OK
|
||||
awx_1 | Applying conf.0002_v310_copy_tower_settings... OK
|
||||
...
|
||||
```
|
||||
|
||||
Once migrations are completed, you can begin using AWX.
|
||||
|
||||
#### Start from the container shell
|
||||
|
||||
Often times you'll want to start the development environment without immediately starting all of the services in the *awx* container, and instead be taken directly to a shell. You can do this with the following:
|
||||
|
||||
```bash
|
||||
(host)$ make docker-compose-test
|
||||
```
|
||||
|
||||
Using `docker exec`, this will create a session in the running *awx* container, and place you at a command prompt, where you can run shell commands inside the container.
|
||||
|
||||
If you want to start and use the development environment, you'll first need to bootstrap it by running the following command:
|
||||
|
||||
```bash
|
||||
(container)# /usr/bin/bootstrap_development.sh
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes. Once it's done it
|
||||
will drop you back to the shell.
|
||||
|
||||
In order to launch all developer services:
|
||||
|
||||
```bash
|
||||
(container)# /usr/bin/launch_awx.sh
|
||||
```
|
||||
|
||||
`launch_awx.sh` also calls `bootstrap_development.sh` so if all you are doing is launching the supervisor to start all services, you don't
|
||||
need to call `bootstrap_development.sh` first.
|
||||
|
||||
|
||||
|
||||
### Post Build Steps
|
||||
|
||||
Before you can log in and use the system, you will need to create an admin user. Optionally, you may also want to load some demo data.
|
||||
|
||||
##### Start a shell
|
||||
|
||||
To create the admin user, and load demo data, you first need to start a shell session on the *awx* container. In a new terminal session, use the `docker exec` command as follows to start the shell session:
|
||||
|
||||
```bash
|
||||
(host)$ docker exec -it tools_awx_1 bash
|
||||
```
|
||||
This creates a session in the *awx* containers, just as if you were using `ssh`, and allows you execute commands within the running container.
|
||||
|
||||
##### Create an admin user
|
||||
|
||||
Before you can log into AWX, you need to create an admin user. With this user you will be able to create more users, and begin configuring the server. From within the container shell, run the following command:
|
||||
|
||||
```bash
|
||||
(container)# awx-manage createsuperuser
|
||||
```
|
||||
You will be prompted for a username, an email address, and a password, and you will be asked to confirm the password. The email address is not important, so just enter something that looks like an email address. Remember the username and password, as you will use them to log into the web interface for the first time.
|
||||
|
||||
##### Load demo data
|
||||
|
||||
You can optionally load some demo data. This will create a demo project, inventory, and job template. From within the container shell, run the following to load the data:
|
||||
|
||||
```bash
|
||||
(container)# awx-manage create_preload_data
|
||||
```
|
||||
|
||||
**NOTE**
|
||||
|
||||
> This information will persist in the database running in the `tools_postgres_1` container, until the container is removed. You may periodically need to recreate
|
||||
this container, and thus the database, if the database schema changes in an upstream commit.
|
||||
|
||||
##### Building API Documentation
|
||||
### Building API Documentation
|
||||
|
||||
AWX includes support for building [Swagger/OpenAPI
|
||||
documentation](https://swagger.io). To build the documentation locally, run:
|
||||
@@ -284,7 +94,7 @@ is an example of one such service.
|
||||
|
||||
You can now log into the AWX web interface at [https://localhost:8043](https://localhost:8043), and access the API directly at [https://localhost:8043/api/](https://localhost:8043/api/).
|
||||
|
||||
To log in use the admin user and password you created above in [Create an admin user](#create-an-admin-user).
|
||||
[Create an admin user](./tools/docker-compose/README.md#create-an-admin-user) if needed.
|
||||
|
||||
### Purging containers and images
|
||||
|
||||
@@ -335,7 +145,7 @@ Sometimes it might take us a while to fully review your PR. We try to keep the `
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
|
||||
|
||||
## PR Checks ran by Zuul
|
||||
## PR Checks run by Zuul
|
||||
Zuul jobs for awx are defined in the [zuul-jobs](https://github.com/ansible/zuul-jobs) repo.
|
||||
|
||||
Zuul runs the following checks that must pass:
|
||||
|
||||
667
INSTALL.md
667
INSTALL.md
@@ -1,643 +1,122 @@
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Installing AWX](#installing-awx)
|
||||
* [The AWX Operator](#the-awx-operator)
|
||||
* [Quickstart with minikube](#quickstart-with-minikube)
|
||||
* [Starting minikube](#starting-minikube)
|
||||
* [Deploying the AWX Operator](#deploying-the-awx-operator)
|
||||
* [Verifying the Operator Deployment](#verifying-the-operator-deployment)
|
||||
* [Deploy AWX](#deploy-awx)
|
||||
* [Accessing AWX](#accessing-awx)
|
||||
* [Installing the AWX CLI](#installing-the-awx-cli)
|
||||
* [Building the CLI Documentation](#building-the-cli-documentation)
|
||||
|
||||
|
||||
# Installing AWX
|
||||
|
||||
This document provides a guide for installing AWX.
|
||||
:warning: NOTE |
|
||||
--- |
|
||||
If you're installing an older release of AWX (prior to 18.0), these instructions have changed. Take a look at your version specific instructions, e.g., for AWX 17.0.1, see: [https://github.com/ansible/awx/blob/17.0.1/INSTALL.md](https://github.com/ansible/awx/blob/17.0.1/INSTALL.md)
|
||||
If you're attempting to migrate an older Docker-based AWX installation, see: [Migrating Data from Local Docker](https://github.com/ansible/awx/blob/devel/tools/docker-compose/docs/data_migration.md) |
|
||||
|
||||
## Table of contents
|
||||
## The AWX Operator
|
||||
|
||||
- [Installing AWX](#installing-awx)
|
||||
* [Getting started](#getting-started)
|
||||
+ [Clone the repo](#clone-the-repo)
|
||||
+ [AWX branding](#awx-branding)
|
||||
+ [Prerequisites](#prerequisites)
|
||||
+ [System Requirements](#system-requirements)
|
||||
+ [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
+ [Official vs Building Images](#official-vs-building-images)
|
||||
* [Upgrading from previous versions](#upgrading-from-previous-versions)
|
||||
* [OpenShift](#openshift)
|
||||
+ [Prerequisites](#prerequisites-1)
|
||||
+ [Pre-install steps](#pre-install-steps)
|
||||
- [Deploying to Minishift](#deploying-to-minishift)
|
||||
- [PostgreSQL](#postgresql)
|
||||
+ [Run the installer](#run-the-installer)
|
||||
+ [Post-install](#post-install)
|
||||
+ [Accessing AWX](#accessing-awx)
|
||||
* [Kubernetes](#kubernetes)
|
||||
+ [Prerequisites](#prerequisites-2)
|
||||
+ [Pre-install steps](#pre-install-steps-1)
|
||||
+ [Configuring Helm](#configuring-helm)
|
||||
+ [Run the installer](#run-the-installer-1)
|
||||
+ [Post-install](#post-install-1)
|
||||
+ [Accessing AWX](#accessing-awx-1)
|
||||
+ [SSL Termination](#ssl-termination)
|
||||
* [Docker-Compose](#docker-compose)
|
||||
+ [Prerequisites](#prerequisites-3)
|
||||
+ [Pre-install steps](#pre-install-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
- [Inventory variables](#inventory-variables)
|
||||
- [Docker registry](#docker-registry)
|
||||
- [Proxy settings](#proxy-settings)
|
||||
- [PostgreSQL](#postgresql-1)
|
||||
+ [Run the installer](#run-the-installer-2)
|
||||
+ [Post-install](#post-install-2)
|
||||
+ [Accessing AWX](#accessing-awx-2)
|
||||
- [Installing the AWX CLI](#installing-the-awx-cli)
|
||||
* [Building the CLI Documentation](#building-the-cli-documentation)
|
||||
Starting in version 18.0, the [AWX Operator](https://github.com/ansible/awx-operator) is the preferred way to install AWX.
|
||||
|
||||
AWX can also alternatively be installed and [run in Docker](./tools/docker-compose/README.md), but this install path is only recommended for development/test-oriented deployments, and has no official published release.
|
||||
|
||||
## Getting started
|
||||
### Quickstart with minikube
|
||||
|
||||
### Clone the repo
|
||||
If you don't have an existing OpenShift or Kubernetes cluster, minikube is a fast and easy way to get up and running.
|
||||
|
||||
If you have not already done so, you will need to clone, or create a local copy, of the [AWX repo](https://github.com/ansible/awx). We generally recommend that you view the releases page:
|
||||
To install minikube, follow the steps in their [documentation](https://minikube.sigs.k8s.io/docs/start/).
|
||||
|
||||
https://github.com/ansible/awx/releases
|
||||
#### Starting minikube
|
||||
|
||||
...and clone the latest stable release, e.g.,
|
||||
|
||||
`git clone -b x.y.z https://github.com/ansible/awx.git`
|
||||
|
||||
Please note that deploying from `HEAD` (or the latest commit) is **not** stable, and that if you want to do this, you should proceed at your own risk (also, see the section #official-vs-building-images for building your own image).
|
||||
|
||||
For more on how to clone the repo, view [git clone help](https://git-scm.com/docs/git-clone).
|
||||
|
||||
Once you have a local copy, run the commands in the following sections from the root of the project tree.
|
||||
|
||||
### AWX branding
|
||||
|
||||
You can optionally install the AWX branding assets from the [awx-logos repo](https://github.com/ansible/awx-logos). Prior to installing, please review and agree to the [trademark guidelines](https://github.com/ansible/awx-logos/blob/master/TRADEMARKS.md).
|
||||
|
||||
To install the assets, clone the `awx-logos` repo so that it is next to your `awx` clone. As you progress through the installation steps, you'll be setting variables in the [inventory](./installer/inventory) file. To include the assets in the build, set `awx_official=true`.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before you can run a deployment, you'll need the following installed in your local environment:
|
||||
|
||||
- [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) Requires Version 2.8+
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
+ A recent version
|
||||
- [docker](https://pypi.org/project/docker/) Python module
|
||||
+ This is incompatible with `docker-py`. If you have previously installed `docker-py`, please uninstall it.
|
||||
+ We use this module instead of `docker-py` because it is what the `docker-compose` Python module requires.
|
||||
- [community.general.docker_image collection](https://docs.ansible.com/ansible/latest/collections/community/general/docker_image_module.html)
|
||||
+ This is only required if you are using Ansible >= 2.10
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- Python 3.6+
|
||||
- [Node 14.x LTS version](https://nodejs.org/en/download/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
|
||||
### System Requirements
|
||||
|
||||
The system that runs the AWX service will need to satisfy the following requirements
|
||||
|
||||
- At least 4GB of memory
|
||||
- At least 2 cpu cores
|
||||
- At least 20GB of space
|
||||
- Running Docker, Openshift, or Kubernetes
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 10+.
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, a Kubernetes cluster, or docker-compose. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
|
||||
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
|
||||
|
||||
In the sections below, you'll find deployment details and instructions for each platform:
|
||||
- [OpenShift](#openshift)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Docker Compose](#docker-compose).
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
When installing AWX you have the option of building your own image or using the image provided on DockerHub (see [awx](https://hub.docker.com/r/ansible/awx/))
|
||||
|
||||
This is controlled by the following variables in the `inventory` file
|
||||
Once you have installed minikube, run the following command to start it. You may wish to customize these options.
|
||||
|
||||
```
|
||||
dockerhub_base=ansible
|
||||
dockerhub_version=latest
|
||||
$ minikube start --cpus=4 --memory=8g --addons=ingress
|
||||
```
|
||||
|
||||
If these variables are present then all deployments will use these hosted images. If the variables are not present then the images will be built during the install.
|
||||
#### Deploying the AWX Operator
|
||||
|
||||
*dockerhub_base*
|
||||
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls a container image named `ansible/awx:tag`)
|
||||
|
||||
*dockerhub_version*
|
||||
|
||||
> Multiple versions are provided. `latest` always pulls the most recent. You may also select version numbers at different granularities: 1, 1.0, 1.0.1, 1.0.0.123
|
||||
|
||||
*use_container_for_build*
|
||||
|
||||
> Use a local distribution build container image for building the AWX package. This is helpful if you don't want to bother installing the build-time dependencies as it is taken care of already.
|
||||
|
||||
|
||||
## Upgrading from previous versions
|
||||
|
||||
Upgrading AWX involves rerunning the install playbook. Download a newer release from [https://github.com/ansible/awx/releases](https://github.com/ansible/awx/releases) and re-populate the inventory file with your customized variables.
|
||||
|
||||
For convenience, you can create a file called `vars.yml`:
|
||||
For a comprehensive overview of features, see [README.md](https://github.com/ansible/awx-operator/blob/devel/README.md) in the awx-operator repo. The following steps are the bare minimum to get AWX up and running.
|
||||
|
||||
```
|
||||
admin_password: 'adminpass'
|
||||
pg_password: 'pgpass'
|
||||
secret_key: 'mysupersecret'
|
||||
$ minikube kubectl -- apply -f https://raw.githubusercontent.com/ansible/awx-operator/devel/deploy/awx-operator.yaml
|
||||
```
|
||||
|
||||
And pass it to the installer:
|
||||
##### Verifying the Operator Deployment
|
||||
|
||||
After a few seconds, the operator should be up and running. Verify it by running the following command:
|
||||
|
||||
```
|
||||
$ ansible-playbook -i inventory install.yml -e @vars.yml
|
||||
$ minikube kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-operator-7c78bfbfd-xb6th 1/1 Running 0 11s
|
||||
```
|
||||
|
||||
## OpenShift
|
||||
#### Deploy AWX
|
||||
|
||||
### Prerequisites
|
||||
|
||||
To complete a deployment to OpenShift, you will need access to an OpenShift cluster. For demo and testing purposes, you can use [Minishift](https://github.com/minishift/minishift) to create a single node cluster running inside a virtual machine.
|
||||
|
||||
When using OpenShift for deploying AWX make sure you have correct privileges to add the security context 'privileged', otherwise the installation will fail. The privileged context is needed because of the use of [the bubblewrap tool](https://github.com/containers/bubblewrap) to add an additional layer of security when using containers.
|
||||
|
||||
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
|
||||
|
||||
The default resource requests per-deployment requires:
|
||||
|
||||
> Memory: 6GB
|
||||
> CPU: 3 cores
|
||||
|
||||
This can be tuned by overriding the variables found in [/installer/roles/kubernetes/defaults/main.yml](/installer/roles/kubernetes/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
|
||||
For more detail on how resource requests are formed see: [https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources](https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources)
|
||||
|
||||
### Pre-install steps
|
||||
|
||||
Before starting the install, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*openshift_host*
|
||||
|
||||
> IP address or hostname of the OpenShift cluster. If you're using Minishift, this will be the value returned by `minishift ip`.
|
||||
|
||||
|
||||
*openshift_skip_tls_verify*
|
||||
|
||||
> Boolean. Set to True if using self-signed certs.
|
||||
|
||||
*openshift_project*
|
||||
|
||||
> Name of the OpenShift project that will be created, and used as the namespace for the AWX app. Defaults to *awx*.
|
||||
|
||||
*openshift_user*
|
||||
|
||||
> Username of the OpenShift user that will create the project, and deploy the application. Defaults to *developer*.
|
||||
|
||||
*openshift_pg_emptydir*
|
||||
|
||||
> Boolean. Set to True to use an emptyDir volume when deploying the PostgreSQL pod. Note: This should only be used for demo and testing purposes.
|
||||
|
||||
*docker_registry*
|
||||
|
||||
> IP address and port, or URL, for accessing a registry that the OpenShift cluster can access. Defaults to *172.30.1.1:5000*, the internal registry delivered with Minishift. This is not needed if you are using official hosted images.
|
||||
|
||||
*docker_registry_repository*
|
||||
|
||||
> Namespace to use when pushing and pulling images to and from the registry. Generally this will match the project name. It defaults to *awx*. This is not needed if you are using official hosted images.
|
||||
|
||||
*docker_registry_username*
|
||||
|
||||
> Username of the user that will push images to the registry. Will generally match the *openshift_user* value. Defaults to *developer*. This is not needed if you are using official hosted images.
|
||||
|
||||
#### Deploying to Minishift
|
||||
|
||||
Install Minishift by following the [installation guide](https://docs.openshift.org/latest/minishift/getting-started/installing.html).
|
||||
|
||||
The recommended minimum resources for your Minishift VM:
|
||||
|
||||
```bash
|
||||
$ minishift start --cpus=4 --memory=8GB
|
||||
```
|
||||
|
||||
The Minishift VM contains a Docker daemon, which you can use to build the AWX images. This is generally the approach you should take, and we recommend doing so. To use this instance, run the following command to setup your environment:
|
||||
|
||||
```bash
|
||||
# Set DOCKER environment variable to point to the Minishift VM
|
||||
$ eval $(minishift docker-env)
|
||||
```
|
||||
|
||||
**Note**
|
||||
|
||||
> If you choose to not use the Docker instance running inside the VM, and build the images externally, you will have to enable the OpenShift cluster to access the images. This involves pushing the images to an external Docker registry, and granting the cluster access to it, or exposing the internal registry, and pushing the images into it.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
By default, AWX will deploy a PostgreSQL pod inside of your cluster. You will need to create a [Persistent Volume Claim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html) which is named `postgresql` by default, and can be overridden by setting the `openshift_pg_pvc_name` variable. For testing and demo purposes, you may set `openshift_pg_emptydir=yes`.
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_admin_password`, `pg_database`, and `pg_port` with the connection information. When setting `pg_hostname` the installer will assume you have configured the database in that location and will not launch the postgresql pod.
|
||||
|
||||
### Run the installer
|
||||
|
||||
To start the install, you will pass two *extra* variables on the command line. The first is *openshift_password*, which is the password for the *openshift_user*, and the second is *docker_registry_password*, which is the password associated with *docker_registry_username*.
|
||||
|
||||
If you're using the OpenShift internal registry, then you'll pass an access token for the *docker_registry_password* value, rather than a password. The `oc whoami -t` command will generate the required token, as long as you're logged into the cluster via `oc cluster login`.
|
||||
|
||||
Run the following command (docker_registry_password is optional if using official images):
|
||||
|
||||
```bash
|
||||
# Start the install
|
||||
$ ansible-playbook -i inventory install.yml -e openshift_password=developer -e docker_registry_password=$(oc whoami -t)
|
||||
```
|
||||
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `oc get pods`:
|
||||
|
||||
```bash
|
||||
# View the running pods
|
||||
$ oc get pods
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-3886581826-5mv0l 4/4 Running 0 8s
|
||||
postgresql-1-l85fh 1/1 Running 0 20m
|
||||
Once the Operator is running, you can now deploy AWX by creating a simple YAML file:
|
||||
|
||||
```
|
||||
|
||||
In the above example, the name of the AWX pod is `awx-3886581826-5mv0l`. Before accessing the AWX web interface, setup tasks and database migrations need to complete. These tasks are running in the `awx_task` container inside the AWX pod. To monitor their status, tail the container's STDOUT by running the following command, replacing the AWX pod name with the pod name from your environment:
|
||||
|
||||
```bash
|
||||
# Follow the awx_task log output
|
||||
$ oc logs -f awx-3886581826-5mv0l -c awx-celery
|
||||
$ cat myawx.yml
|
||||
---
|
||||
apiVersion: awx.ansible.com/v1beta1
|
||||
kind: AWX
|
||||
metadata:
|
||||
name: awx
|
||||
spec:
|
||||
tower_ingress_type: Ingress
|
||||
```
|
||||
|
||||
You will see the following indicating that database migrations are running:
|
||||
And then creating the AWX object in the Kubernetes API:
|
||||
|
||||
```bash
|
||||
Using /etc/ansible/ansible.cfg as config file
|
||||
127.0.0.1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"db": "awx"
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
Installing custom SQL...
|
||||
Running migrations:
|
||||
Rendering model states... DONE
|
||||
Applying contenttypes.0001_initial... OK
|
||||
Applying contenttypes.0002_remove_content_type_name... OK
|
||||
Applying auth.0001_initial... OK
|
||||
Applying auth.0002_alter_permission_name_max_length... OK
|
||||
Applying auth.0003_alter_user_email_max_length... OK
|
||||
Applying auth.0004_alter_user_username_opts... OK
|
||||
Applying auth.0005_alter_user_last_login_null... OK
|
||||
Applying auth.0006_require_contenttypes_0002... OK
|
||||
Applying taggit.0001_initial... OK
|
||||
Applying taggit.0002_auto_20150616_2121... OK
|
||||
...
|
||||
```
|
||||
$ minikube kubectl apply -- -f myawx.yml
|
||||
awx.awx.ansible.com/awx created
|
||||
```
|
||||
|
||||
When you see output similar to the following, you'll know that database migrations have completed, and you can access the web interface:
|
||||
After creating the AWX object in the Kubernetes API, the operator will begin running its reconciliation loop.
|
||||
|
||||
```bash
|
||||
Python 2.7.5 (default, Nov 6 2016, 00:28:07)
|
||||
[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
(InteractiveConsole)
|
||||
To see what's going on, you can tail the logs of the operator pod (note that your pod name will be different):
|
||||
|
||||
>>> <User: admin>
|
||||
>>> Default organization added.
|
||||
Demo Credential, Inventory, and Job Template added.
|
||||
Successfully registered instance awx-3886581826-5mv0l
|
||||
(changed: True)
|
||||
Creating instance group tower
|
||||
Added instance awx-3886581826-5mv0l to tower
|
||||
```
|
||||
$ minikube kubectl logs -- -f awx-operator-7c78bfbfd-xb6th
|
||||
```
|
||||
|
||||
Once database migrations complete, the web interface will be accessible.
|
||||
After a few seconds, you will see the database and application pods show up. On a fresh system, it may take a few minutes for the container images to download.
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web interface is running in the AWX pod, behind the `awx-web-svc` service. To view the service, and its port value, run the following command:
|
||||
|
||||
```bash
|
||||
# View available services
|
||||
$ oc get services
|
||||
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
awx-web-svc 172.30.111.74 <nodes> 8052:30083/TCP 37m
|
||||
postgresql 172.30.102.9 <none> 5432/TCP 38m
|
||||
```
|
||||
$ minikube kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-5ffbfd489c-bvtvf 3/3 Running 0 2m54s
|
||||
awx-operator-7c78bfbfd-xb6th 1/1 Running 0 6m42s
|
||||
awx-postgres-0 1/1 Running 0 2m58s
|
||||
```
|
||||
|
||||
The deployment process creates a route, `awx-web-svc`, to expose the service. How the ingres is actually created will vary depending on your environment, and how the cluster is configured. You can view the route, and the external IP address and hostname assigned to it, by running the following command:
|
||||
##### Accessing AWX
|
||||
|
||||
```bash
|
||||
# View available routes
|
||||
$ oc get routes
|
||||
To access the AWX UI, you'll need to grab the service url from minikube:
|
||||
|
||||
NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
|
||||
awx-web-svc awx-web-svc-awx.192.168.64.2.nip.io awx-web-svc http edge/Allow None
|
||||
```
|
||||
$ minikube service awx-service --url
|
||||
http://192.168.59.2:31868
|
||||
```
|
||||
|
||||
The above example is taken from a Minishift instance. From a web browser, use `https` to access the `HOST/PORT` value from your environment. Using the above example, the URL to access the server would be [https://awx-web-svc-awx.192.168.64.2.nip.io](https://awx-web-svc-awx.192.168.64.2.nip.io).
|
||||
On fresh installs, you will see the "AWX is currently upgrading." page until database migrations finish.
|
||||
|
||||
Once you access the AWX server, you will be prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
Once you are redirected to the login screen, you can now log in by obtaining the generated admin password (note: do not copy the trailing `%`):
|
||||
|
||||
## Kubernetes
|
||||
|
||||
### Prerequisites
|
||||
|
||||
A Kubernetes deployment will require you to have access to a Kubernetes cluster as well as the following tools:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||
- [helm](https://helm.sh/docs/intro/quickstart/)
|
||||
|
||||
The installation program will reference `kubectl` directly. `helm` is only necessary if you are letting the installer configure PostgreSQL for you.
|
||||
|
||||
The default resource requests per-pod requires:
|
||||
|
||||
> Memory: 6GB
|
||||
> CPU: 3 cores
|
||||
|
||||
This can be tuned by overriding the variables found in [/installer/roles/kubernetes/defaults/main.yml](/installer/roles/kubernetes/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
|
||||
For more detail on how resource requests are formed see: [https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
|
||||
### Pre-install steps
|
||||
|
||||
Before starting the install process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section uncommenting when necessary. Make sure the openshift and standalone docker sections are commented out:
|
||||
|
||||
*kubernetes_context*
|
||||
|
||||
> Prior to running the installer, make sure you've configured the context for the cluster you'll be installing to. This is how the installer knows which cluster to connect to and what authentication to use
|
||||
|
||||
*kubernetes_namespace*
|
||||
|
||||
> Name of the Kubernetes namespace where the AWX resources will be installed. This will be created if it doesn't exist
|
||||
|
||||
*docker_registry_*
|
||||
|
||||
> These settings should be used if building your own base images. You'll need access to an external registry and are responsible for making sure your kube cluster can talk to it and use it. If these are undefined and the dockerhub_ configuration settings are uncommented then the images will be pulled from dockerhub instead
|
||||
|
||||
### Configuring Helm
|
||||
|
||||
If you want the AWX installer to manage creating the database pod (rather than installing and configuring postgres on your own). Then you will need to have a working `helm` installation, you can find details here: [https://helm.sh/docs/intro/quickstart/](https://helm.sh/docs/intro/quickstart/).
|
||||
|
||||
You do not need to create a [Persistent Volume Claim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html) as Helm does it for you. However, an existing one may be used by setting the `pg_persistence_existingclaim` variable.
|
||||
|
||||
Newer Kubernetes clusters with RBAC enabled will need to make sure a service account is created, make sure to follow the instructions here [https://helm.sh/docs/topics/rbac/](https://helm.sh/docs/topics/rbac/)
|
||||
|
||||
### Run the installer
|
||||
|
||||
After making changes to the `inventory` file use `ansible-playbook` to begin the install
|
||||
|
||||
```bash
|
||||
$ ansible-playbook -i inventory install.yml
|
||||
```
|
||||
$ minikube kubectl -- get secret awx-admin-password -o jsonpath='{.data.password}' | base64 --decode
|
||||
b6ChwVmqEiAsil2KSpH4xGaZPeZvWnWj%
|
||||
```
|
||||
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `kubectl get pods --namespace awx` (replace awx with the namespace you used):
|
||||
|
||||
```bash
|
||||
# View the running pods, it may take a few minutes for everything to be marked in the Running state
|
||||
$ kubectl get pods --namespace awx
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-2558692395-2r8ss 4/4 Running 0 29s
|
||||
awx-postgresql-355348841-kltkn 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web interface is running in the AWX pod behind the `awx-web-svc` service:
|
||||
|
||||
```bash
|
||||
# View available services
|
||||
$ kubectl get svc --namespace awx
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
awx-postgresql ClusterIP 10.7.250.208 <none> 5432/TCP 2m
|
||||
awx-web-svc NodePort 10.7.241.35 <none> 80:30177/TCP 1m
|
||||
```
|
||||
|
||||
The deployment process creates an `Ingress` named `awx-web-svc` also. Some kubernetes cloud providers will automatically handle routing configuration when an Ingress is created others may require that you more explicitly configure it. You can see what kubernetes knows about things with:
|
||||
|
||||
```bash
|
||||
kubectl get ing --namespace awx
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
awx-web-svc * 35.227.x.y 80 3m
|
||||
```
|
||||
|
||||
If your provider is able to allocate an IP Address from the Ingress controller then you can navigate to the address and access the AWX interface. For some providers it can take a few minutes to allocate and make this accessible. For other providers it may require you to manually intervene.
|
||||
|
||||
### SSL Termination
|
||||
|
||||
Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termination. As such the default configuration will only expose AWX through HTTP on port 80. You are responsible for configuring SSL support until support is added (either to Kubernetes or AWX itself).
|
||||
|
||||
|
||||
## Docker-Compose
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/) on the host where AWX will be deployed. After installing Docker, the Docker service must be started (depending on your OS, you may have to add the local user that uses Docker to the ``docker`` group, refer to the documentation for details)
|
||||
- [docker-compose](https://pypi.org/project/docker-compose/) Python module.
|
||||
+ This also installs the `docker` Python module, which is incompatible with `docker-py`. If you have previously installed `docker-py`, please uninstall it.
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
### Pre-install steps
|
||||
|
||||
#### Deploying to a remote host
|
||||
|
||||
By default, the delivered [installer/inventory](./installer/inventory) file will deploy AWX to the local host. It is possible, however, to deploy to a remote host. The [installer/install.yml](./installer/install.yml) playbook can be used to build images on the local host, and ship the built images to, and run deployment tasks on, a remote host. To do this, modify the [installer/inventory](./installer/inventory) file, by commenting out `localhost`, and adding the remote host.
|
||||
|
||||
For example, suppose you wish to build images locally on your CI/CD host, and deploy them to a remote host named *awx-server*. To do this, add *awx-server* to the [installer/inventory](./installer/inventory) file, and comment out or remove `localhost`, as demonstrated by the following:
|
||||
|
||||
```yaml
|
||||
# localhost ansible_connection=local
|
||||
awx-server
|
||||
|
||||
[all:vars]
|
||||
...
|
||||
```
|
||||
|
||||
In the above example, image build tasks will be delegated to `localhost`, which is typically where the clone of the AWX project exists. Built images will be archived, copied to remote host, and imported into the remote Docker image cache. Tasks to start the AWX containers will then execute on the remote host.
|
||||
|
||||
If you choose to use the official images then the remote host will be the one to pull those images.
|
||||
|
||||
**Note**
|
||||
|
||||
> You may also want to set additional variables to control how Ansible connects to the host. For more information about this, view [Behavioral Inventory Parameters](http://docs.ansible.com/ansible/latest/intro_inventory.html#id12).
|
||||
|
||||
> As mentioned above, in [Prerequisites](#prerequisites-1), the prerequisites are required on the remote host.
|
||||
|
||||
> When deploying to a remote host, the playbook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
|
||||
|
||||
#### Inventory variables
|
||||
|
||||
Before starting the install process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*postgres_data_dir*
|
||||
|
||||
> If you're using the default PostgreSQL container (see [PostgreSQL](#postgresql-1) below), provide a path that can be mounted to the container, and where the database can be persisted.
|
||||
|
||||
*host_port*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. If undefined no port will be exposed. Defaults to *80*.
|
||||
|
||||
*host_port_ssl*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. If undefined no port will be exposed. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key. This needs to be a .pem-file
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
> When using docker-compose, the `docker-compose.yml` file will be created there (default `~/.awx/awxcompose`).
|
||||
|
||||
*custom_venv_dir*
|
||||
|
||||
> Adds the custom venv environments from the local host to be passed into the containers at install.
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
> If you're using a non trusted CA, provide a path where the untrusted Certs are stored on your Host.
|
||||
|
||||
#### Docker registry
|
||||
|
||||
If you wish to tag and push built images to a Docker registry, set the following variables in the inventory file:
|
||||
|
||||
*docker_registry*
|
||||
|
||||
> IP address and port, or URL, for accessing a registry.
|
||||
|
||||
*docker_registry_repository*
|
||||
|
||||
> Namespace to use when pushing and pulling images to and from the registry. Defaults to *awx*.
|
||||
|
||||
*docker_registry_username*
|
||||
|
||||
> Username of the user that will push images to the registry. Defaults to *developer*.
|
||||
|
||||
**Note**
|
||||
|
||||
> These settings are ignored if using official images
|
||||
|
||||
|
||||
#### Proxy settings
|
||||
|
||||
*http_proxy*
|
||||
|
||||
> IP address and port, or URL, for using an http_proxy.
|
||||
|
||||
*https_proxy*
|
||||
|
||||
> IP address and port, or URL, for using an https_proxy.
|
||||
|
||||
*no_proxy*
|
||||
|
||||
> Exclude IP address or URL from the proxy.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
AWX requires access to a PostgreSQL database, and by default, one will be created and deployed in a container, and data will be persisted to a host volume. In this scenario, you must set the value of `postgres_data_dir` to a path that can be mounted to the container. When the container is stopped, the database files will still exist in the specified path.
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_admin_password`, `pg_database`, and `pg_port` with the connection information.
|
||||
|
||||
### Run the installer
|
||||
|
||||
If you are not pushing images to a Docker registry, start the install by running the following:
|
||||
|
||||
```bash
|
||||
# Set the working directory to installer
|
||||
$ cd installer
|
||||
|
||||
# Run the Ansible playbook
|
||||
$ ansible-playbook -i inventory install.yml
|
||||
```
|
||||
|
||||
If you're pushing built images to a repository, then use the `-e` option to pass the registry password as follows, replacing *password* with the password of the username assigned to `docker_registry_username` (note that you will also need to remove `dockerhub_base` and `dockerhub_version` from the inventory file):
|
||||
|
||||
```bash
|
||||
# Set the working directory to installer
|
||||
$ cd installer
|
||||
|
||||
# Run the Ansible playbook
|
||||
$ ansible-playbook -i inventory -e docker_registry_password=password install.yml
|
||||
```
|
||||
|
||||
### Post-install
|
||||
|
||||
After the playbook run completes, Docker starts a series of containers that provide the services that make up AWX. You can view the running containers using the `docker ps` command.
|
||||
|
||||
If you're deploying using Docker Compose, container names will be prefixed by the name of the folder where the docker-compose.yml file is created (by default, `awx`).
|
||||
|
||||
Immediately after the containers start, the *awx_task* container will perform required setup tasks, including database migrations. These tasks need to complete before the web interface can be accessed. To monitor the progress, you can follow the container's STDOUT by running the following:
|
||||
|
||||
```bash
|
||||
# Tail the awx_task log
|
||||
$ docker logs -f awx_task
|
||||
```
|
||||
|
||||
You will see output similar to the following:
|
||||
|
||||
```bash
|
||||
Using /etc/ansible/ansible.cfg as config file
|
||||
127.0.0.1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"db": "awx"
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
Installing custom SQL...
|
||||
Running migrations:
|
||||
Rendering model states... DONE
|
||||
Applying contenttypes.0001_initial... OK
|
||||
Applying contenttypes.0002_remove_content_type_name... OK
|
||||
Applying auth.0001_initial... OK
|
||||
Applying auth.0002_alter_permission_name_max_length... OK
|
||||
Applying auth.0003_alter_user_email_max_length... OK
|
||||
Applying auth.0004_alter_user_username_opts... OK
|
||||
Applying auth.0005_alter_user_last_login_null... OK
|
||||
Applying auth.0006_require_contenttypes_0002... OK
|
||||
Applying taggit.0001_initial... OK
|
||||
Applying taggit.0002_auto_20150616_2121... OK
|
||||
Applying main.0001_initial... OK
|
||||
...
|
||||
```
|
||||
|
||||
Once migrations complete, you will see the following log output, indicating that migrations have completed:
|
||||
|
||||
```bash
|
||||
Python 2.7.5 (default, Nov 6 2016, 00:28:07)
|
||||
[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
(InteractiveConsole)
|
||||
|
||||
>>> <User: admin>
|
||||
>>> Default organization added.
|
||||
Demo Credential, Inventory, and Job Template added.
|
||||
Successfully registered instance awx
|
||||
(changed: True)
|
||||
Creating instance group tower
|
||||
Added instance awx to tower
|
||||
(changed: True)
|
||||
...
|
||||
```
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
Now you can log in at the URL above with the username "admin" and the password above. Happy Automating!
|
||||
|
||||
|
||||
# Installing the AWX CLI
|
||||
|
||||
230
Makefile
230
Makefile
@@ -20,11 +20,12 @@ COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
COMPOSE_HOST ?= $(shell hostname)
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv/
|
||||
COLLECTION_BASE ?= /var/lib/awx/vendor/awx_ansible_collections
|
||||
SCL_PREFIX ?=
|
||||
CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio,pycurl
|
||||
@@ -60,11 +61,11 @@ WHEEL_FILE ?= $(WHEEL_NAME)-py2-none-any.whl
|
||||
I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange runserver \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build release_clean sdist \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
ui-test ui-deps ui-test-ci VERSION
|
||||
dev_build release_build sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION docker-compose-sources
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -113,31 +114,7 @@ guard-%:
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
virtualenv: virtualenv_ansible virtualenv_awx
|
||||
|
||||
# virtualenv_* targets do not use --system-site-packages to prevent bugs installing packages
|
||||
# but Ansible venvs are expected to have this, so that must be done after venv creation
|
||||
virtualenv_ansible:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv -p python $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
virtualenv_ansible_py3:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv -p $(PYTHON) $(VENV_BASE)/ansible; \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) $(VENV_BOOTSTRAP); \
|
||||
fi; \
|
||||
fi
|
||||
virtualenv: virtualenv_awx
|
||||
|
||||
# flit is needed for offline install of certain packages, specifically ptyprocess
|
||||
# it is needed for setup, but not always recognized as a setup dependency
|
||||
@@ -153,32 +130,6 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# --ignore-install flag is not used because *.txt files should specify exact versions
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
rm $(shell ls -d $(VENV_BASE)/ansible/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
|
||||
requirements_ansible_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
$(VENV_BASE)/ansible/bin/pip install pytest mock; \
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
@@ -192,17 +143,9 @@ requirements_awx: virtualenv_awx
|
||||
requirements_awx_dev:
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
|
||||
|
||||
requirements_collections:
|
||||
mkdir -p $(COLLECTION_BASE)
|
||||
n=0; \
|
||||
until [ "$$n" -ge 5 ]; do \
|
||||
ansible-galaxy collection install -r requirements/collections_requirements.yml -p $(COLLECTION_BASE) && break; \
|
||||
n=$$((n+1)); \
|
||||
done
|
||||
requirements: requirements_awx
|
||||
|
||||
requirements: requirements_ansible requirements_awx requirements_collections
|
||||
|
||||
requirements_dev: requirements_awx requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
requirements_dev: requirements_awx requirements_awx_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
@@ -267,11 +210,27 @@ collectstatic:
|
||||
fi; \
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
UWSGI_DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:awx-dispatcher tower-processes:awx-receiver
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/var/lib/awx/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1="exec:supervisorctl restart tower-processes:awx-dispatcher tower-processes:awx-receiver"
|
||||
uwsgi -b 32768 \
|
||||
--socket 127.0.0.1:8050 \
|
||||
--module=awx.wsgi:application \
|
||||
--home=/var/lib/awx/venv/awx \
|
||||
--chdir=/awx_devel/ \
|
||||
--vacuum \
|
||||
--processes=5 \
|
||||
--harakiri=120 --master \
|
||||
--no-orphans \
|
||||
--py-autoreload 1 \
|
||||
--max-requests=1000 \
|
||||
--stats /tmp/stats.socket \
|
||||
--lazy-apps \
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)" \
|
||||
--hook-accepting1="exec: $(UWSGI_DEV_RELOAD_COMMAND)"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -365,7 +324,8 @@ test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
fi && \
|
||||
pip install ansible && \
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
@@ -427,39 +387,6 @@ bulk_data:
|
||||
fi; \
|
||||
$(PYTHON) tools/data_generators/rbac_dummy_data_generator.py --preset=$(DATA_GEN_PRESET)
|
||||
|
||||
# l10n TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# check for UI po files
|
||||
HAVE_PO := $(shell ls awx/ui/po/*.po 2>/dev/null)
|
||||
check-po:
|
||||
ifdef HAVE_PO
|
||||
# Should be 'Language: zh-CN' but not 'Language: zh_CN' in zh_CN.po
|
||||
for po in awx/ui/po/*.po ; do \
|
||||
echo $$po; \
|
||||
mo="awx/ui/po/`basename $$po .po`.mo"; \
|
||||
msgfmt --check --verbose $$po -o $$mo; \
|
||||
if test "$$?" -ne 0 ; then \
|
||||
exit -1; \
|
||||
fi; \
|
||||
rm $$mo; \
|
||||
name=`echo "$$po" | grep '-'`; \
|
||||
if test "x$$name" != x ; then \
|
||||
right_name=`echo $$language | sed -e 's/-/_/'`; \
|
||||
echo "ERROR: WRONG $$name CORRECTION: $$right_name"; \
|
||||
exit -1; \
|
||||
fi; \
|
||||
language=`grep '^"Language:' "$$po" | grep '_'`; \
|
||||
if test "x$$language" != x ; then \
|
||||
right_language=`echo $$language | sed -e 's/_/-/'`; \
|
||||
echo "ERROR: WRONG $$language CORRECTION: $$right_language in $$po"; \
|
||||
exit -1; \
|
||||
fi; \
|
||||
done;
|
||||
else
|
||||
@echo No PO files
|
||||
endif
|
||||
|
||||
|
||||
# UI TASKS
|
||||
# --------------------------------------
|
||||
@@ -472,16 +399,13 @@ clean-ui:
|
||||
rm -rf awx/ui_next/build
|
||||
rm -rf awx/ui_next/src/locales/_build
|
||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||
git checkout awx/ui_next/src/locales
|
||||
|
||||
awx/ui_next/node_modules:
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn --ignore-scripts install
|
||||
|
||||
$(UI_BUILD_FLAG_FILE):
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run build
|
||||
git checkout awx/ui_next/src/locales
|
||||
mkdir -p awx/public/static/css
|
||||
mkdir -p awx/public/static/js
|
||||
mkdir -p awx/public/static/media
|
||||
@@ -495,11 +419,17 @@ ui-release: awx/ui_next/node_modules $(UI_BUILD_FLAG_FILE)
|
||||
ui-devel: awx/ui_next/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel-instrumented: awx/ui_next/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run start-instrumented
|
||||
|
||||
ui-devel-test: awx/ui_next/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run start
|
||||
|
||||
ui-zuul-lint-and-test:
|
||||
$(NPM_BIN) --prefix awx/ui_next install
|
||||
$(NPM_BIN) run --prefix awx/ui_next lint
|
||||
$(NPM_BIN) run --prefix awx/ui_next prettier-check
|
||||
$(NPM_BIN) run --prefix awx/ui_next test
|
||||
$(NPM_BIN) run --prefix awx/ui_next test -- --coverage --watchAll=false
|
||||
|
||||
|
||||
# Build a pip-installable package into dist/ with a timestamped version number.
|
||||
@@ -543,31 +473,30 @@ docker-auth:
|
||||
awx/projects:
|
||||
@mkdir -p $@
|
||||
|
||||
# Docker isolated rampart
|
||||
docker-compose-isolated: awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
COMPOSE_UP_OPTS ?=
|
||||
CLUSTER_NODE_COUNT ?= 1
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml $(COMPOSE_UP_OPTS) up --no-recreate awx
|
||||
docker-compose-sources:
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
-e awx_image=$(DEV_DOCKER_TAG_BASE)/awx_devel \
|
||||
-e awx_image_tag=$(COMPOSE_TAG) \
|
||||
-e cluster_node_count=$(CLUSTER_NODE_COUNT)
|
||||
|
||||
docker-compose-cluster: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
docker-compose: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_UP_OPTS) up
|
||||
|
||||
docker-compose-credential-plugins: docker-auth awx/projects
|
||||
docker-compose-credential-plugins: docker-auth awx/projects docker-compose-sources
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx
|
||||
|
||||
docker-compose-test: docker-auth awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
docker-compose-test: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
|
||||
|
||||
docker-compose-runtest: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
docker-compose-runtest: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports --no-deps awx /start_tests.sh swagger
|
||||
docker-compose-build-swagger: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
|
||||
|
||||
detect-schema-change: genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
@@ -575,21 +504,14 @@ detect-schema-change: genschema
|
||||
diff -u -b reference-schema.json schema.json
|
||||
|
||||
docker-compose-clean: awx/projects
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml rm -sf
|
||||
|
||||
# Base development image build
|
||||
docker-compose-build:
|
||||
ansible localhost -m template -a "src=installer/roles/image_build/templates/Dockerfile.j2 dest=tools/docker-compose/Dockerfile" -e build_dev=True
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
docker tag ansible/awx_devel $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
# For use when developing on "isolated" AWX deployments
|
||||
docker-compose-isolated-build: docker-compose-build
|
||||
docker build -t ansible/awx_isolated -f tools/docker-isolated/Dockerfile .
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
@@ -601,11 +523,11 @@ docker-clean-volumes: docker-compose-clean
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth awx/projects
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
docker-compose-elk: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth awx/projects
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
docker-compose-cluster-elk: docker-auth awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
@@ -624,5 +546,33 @@ psql-container:
|
||||
VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
Dockerfile: installer/roles/image_build/templates/Dockerfile.j2
|
||||
ansible localhost -m template -a "src=installer/roles/image_build/templates/Dockerfile.j2 dest=Dockerfile"
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml
|
||||
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
-e kube_dev=True \
|
||||
-e template_dest=_build_kube_dev
|
||||
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
docker build -f Dockerfile.kube-dev \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# generate UI .pot
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template
|
||||
|
||||
# generate API django .pot .po
|
||||
LANG = "en-us"
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l $(LANG) --keep-pot
|
||||
|
||||
10
README.md
10
README.md
@@ -1,4 +1,7 @@
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status)
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://webchat.freenode.net/#ansible-awx)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
|
||||
|
||||
@@ -36,8 +39,3 @@ We welcome your feedback and ideas. Here's how to reach us with feedback and que
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.freenode.net
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
[Apache v2](./LICENSE.md)
|
||||
|
||||
@@ -24,7 +24,7 @@ from rest_framework.request import clone_request
|
||||
from awx.api.fields import ChoiceNullField
|
||||
from awx.main.fields import JSONField, ImplicitRoleField
|
||||
from awx.main.models import NotificationTemplate
|
||||
from awx.main.scheduler.kubernetes import PodManager
|
||||
from awx.main.tasks import AWXReceptorJob
|
||||
|
||||
|
||||
class Metadata(metadata.SimpleMetadata):
|
||||
@@ -209,7 +209,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
continue
|
||||
|
||||
if field == "pod_spec_override":
|
||||
meta['default'] = PodManager().pod_definition
|
||||
meta['default'] = AWXReceptorJob().pod_definition
|
||||
|
||||
# Add type choices if available from the serializer.
|
||||
if field == 'type' and hasattr(serializer, 'get_type_choices'):
|
||||
|
||||
@@ -50,7 +50,7 @@ from awx.main.constants import (
|
||||
)
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
|
||||
CredentialType, CustomInventoryScript, Group, Host, Instance,
|
||||
CredentialType, CustomInventoryScript, ExecutionEnvironment, Group, Host, Instance,
|
||||
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
|
||||
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
|
||||
JobNotificationMixin, JobTemplate, Label, Notification, NotificationTemplate,
|
||||
@@ -107,6 +107,8 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'insights_credential_id',),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
@@ -124,12 +126,12 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'inventory_source': ('source', 'last_updated', 'status'),
|
||||
'inventory_source': ('id', 'name', 'source', 'last_updated', 'status'),
|
||||
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'role': ('id', 'role_field'),
|
||||
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'instance_group': ('id', 'name', 'controller_id', 'is_containerized'),
|
||||
'instance_group': ('id', 'name', 'controller_id', 'is_container_group'),
|
||||
'insights_credential': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
@@ -647,7 +649,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJobTemplate
|
||||
fields = ('*', 'last_job_run', 'last_job_failed',
|
||||
'next_job_run', 'status')
|
||||
'next_job_run', 'status', 'execution_environment')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
|
||||
@@ -657,6 +659,9 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
|
||||
if obj.next_schedule:
|
||||
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
|
||||
if obj.execution_environment_id:
|
||||
res['execution_environment'] = self.reverse('api:execution_environment_detail',
|
||||
kwargs={'pk': obj.execution_environment_id})
|
||||
return res
|
||||
|
||||
def get_types(self):
|
||||
@@ -711,6 +716,7 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = UnifiedJob
|
||||
fields = ('*', 'unified_job_template', 'launch_type', 'status',
|
||||
'execution_environment',
|
||||
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
|
||||
'job_cwd', 'job_env', 'job_explanation',
|
||||
'execution_node', 'controller_node',
|
||||
@@ -748,6 +754,9 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
|
||||
if obj.workflow_job_id:
|
||||
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
|
||||
if obj.execution_environment_id:
|
||||
res['execution_environment'] = self.reverse('api:execution_environment_detail',
|
||||
kwargs={'pk': obj.execution_environment_id})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -1243,11 +1252,13 @@ class OrganizationSerializer(BaseSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Organization
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv',)
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment',)
|
||||
read_only_fields = ('*', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(OrganizationSerializer, self).get_related(obj)
|
||||
res.update(dict(
|
||||
res.update(
|
||||
execution_environments = self.reverse('api:organization_execution_environments_list', kwargs={'pk': obj.pk}),
|
||||
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
|
||||
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
|
||||
job_templates = self.reverse('api:organization_job_templates_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1267,7 +1278,10 @@ class OrganizationSerializer(BaseSerializer):
|
||||
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
galaxy_credentials = self.reverse('api:organization_galaxy_credentials_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
)
|
||||
if obj.default_environment:
|
||||
res['default_environment'] = self.reverse('api:execution_environment_detail',
|
||||
kwargs={'pk': obj.default_environment_id})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -1347,6 +1361,29 @@ class ProjectOptionsSerializer(BaseSerializer):
|
||||
return super(ProjectOptionsSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ExecutionEnvironmentSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy']
|
||||
managed_by_tower = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = ExecutionEnvironment
|
||||
fields = ('*', 'organization', 'image', 'managed_by_tower', 'credential', 'pull')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ExecutionEnvironmentSerializer, self).get_related(obj)
|
||||
res.update(
|
||||
activity_stream=self.reverse('api:execution_environment_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
unified_job_templates=self.reverse('api:execution_environment_job_template_list', kwargs={'pk': obj.pk}),
|
||||
copy=self.reverse('api:execution_environment_copy', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse('api:credential_detail',
|
||||
kwargs={'pk': obj.credential.pk})
|
||||
return res
|
||||
|
||||
|
||||
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
|
||||
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
|
||||
@@ -1360,9 +1397,10 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
fields = ('*', 'organization', 'scm_update_on_launch',
|
||||
'scm_update_cache_timeout', 'allow_override', 'custom_virtualenv',) + \
|
||||
fields = ('*', '-execution_environment', 'organization', 'scm_update_on_launch',
|
||||
'scm_update_cache_timeout', 'allow_override', 'custom_virtualenv', 'default_environment') + \
|
||||
('last_update_failed', 'last_updated') # Backwards compatibility
|
||||
read_only_fields = ('*', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ProjectSerializer, self).get_related(obj)
|
||||
@@ -1386,6 +1424,9 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail',
|
||||
kwargs={'pk': obj.organization.pk})
|
||||
if obj.default_environment:
|
||||
res['default_environment'] = self.reverse('api:execution_environment_detail',
|
||||
kwargs={'pk': obj.default_environment_id})
|
||||
# Backwards compatibility.
|
||||
if obj.current_update:
|
||||
res['current_update'] = self.reverse('api:project_update_detail',
|
||||
@@ -1939,6 +1980,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
|
||||
'enabled_var', 'enabled_value', 'host_filter', 'overwrite', 'overwrite_vars',
|
||||
'custom_virtualenv', 'timeout', 'verbosity')
|
||||
read_only_fields = ('*', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
|
||||
@@ -2924,6 +2966,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'become_enabled', 'diff_mode', 'allow_simultaneous', 'custom_virtualenv',
|
||||
'job_slice_count', 'webhook_service', 'webhook_credential',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobTemplateSerializer, self).get_related(obj)
|
||||
@@ -4731,10 +4774,10 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
'Isolated groups have a designated controller group.'),
|
||||
read_only=True
|
||||
)
|
||||
is_containerized = serializers.BooleanField(
|
||||
is_container_group = serializers.BooleanField(
|
||||
required=False,
|
||||
help_text=_('Indicates whether instances in this group are containerized.'
|
||||
'Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
read_only=True
|
||||
'Containerized groups have a designated Openshift or Kubernetes cluster.')
|
||||
)
|
||||
# NOTE: help_text is duplicated from field definitions, no obvious way of
|
||||
# both defining field details here and also getting the field's help_text
|
||||
@@ -4761,7 +4804,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
fields = ("id", "type", "url", "related", "name", "created", "modified",
|
||||
"capacity", "committed_capacity", "consumed_capacity",
|
||||
"percent_capacity_remaining", "jobs_running", "jobs_total",
|
||||
"instances", "controller", "is_controller", "is_isolated", "is_containerized", "credential",
|
||||
"instances", "controller", "is_controller", "is_isolated", "is_container_group", "credential",
|
||||
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list",
|
||||
"pod_spec_override", "summary_fields")
|
||||
|
||||
@@ -4786,17 +4829,17 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
|
||||
if self.instance and self.instance.controller_id is not None:
|
||||
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
if value and self.instance and self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
def validate_policy_instance_percentage(self, value):
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
if value and self.instance and self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
def validate_policy_instance_minimum(self, value):
|
||||
if value and self.instance and self.instance.is_containerized:
|
||||
if value and self.instance and self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
|
||||
return value
|
||||
|
||||
@@ -4810,6 +4853,15 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
attrs = super(InstanceGroupSerializer, self).validate(attrs)
|
||||
|
||||
if attrs.get('credential') and not attrs.get('is_container_group'):
|
||||
raise serializers.ValidationError({'is_container_group': _(
|
||||
'is_container_group must be True when associating a credential to an Instance Group')})
|
||||
|
||||
return attrs
|
||||
|
||||
def get_capacity_dict(self):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'capacity_map' not in self.context:
|
||||
|
||||
20
awx/api/urls/execution_environments.py
Normal file
20
awx/api/urls/execution_environments.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import (
|
||||
ExecutionEnvironmentList,
|
||||
ExecutionEnvironmentDetail,
|
||||
ExecutionEnvironmentJobTemplateList,
|
||||
ExecutionEnvironmentCopy,
|
||||
ExecutionEnvironmentActivityStreamList,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', ExecutionEnvironmentList.as_view(), name='execution_environment_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', ExecutionEnvironmentDetail.as_view(), name='execution_environment_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/unified_job_templates/$', ExecutionEnvironmentJobTemplateList.as_view(), name='execution_environment_job_template_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/copy/$', ExecutionEnvironmentCopy.as_view(), name='execution_environment_copy'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', ExecutionEnvironmentActivityStreamList.as_view(), name='execution_environment_activity_stream_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -9,6 +9,7 @@ from awx.api.views import (
|
||||
OrganizationUsersList,
|
||||
OrganizationAdminsList,
|
||||
OrganizationInventoriesList,
|
||||
OrganizationExecutionEnvironmentsList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
@@ -34,6 +35,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/users/$', OrganizationUsersList.as_view(), name='organization_users_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/admins/$', OrganizationAdminsList.as_view(), name='organization_admins_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/inventories/$', OrganizationInventoriesList.as_view(), name='organization_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/execution_environments/$', OrganizationExecutionEnvironmentsList.as_view(), name='organization_execution_environments_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/projects/$', OrganizationProjectsList.as_view(), name='organization_projects_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_templates/$', OrganizationJobTemplatesList.as_view(), name='organization_job_templates_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/workflow_job_templates/$', OrganizationWorkflowJobTemplatesList.as_view(), name='organization_workflow_job_templates_list'),
|
||||
|
||||
@@ -42,6 +42,7 @@ from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
from .project_update import urls as project_update_urls
|
||||
from .inventory import urls as inventory_urls
|
||||
from .execution_environments import urls as execution_environment_urls
|
||||
from .team import urls as team_urls
|
||||
from .host import urls as host_urls
|
||||
from .group import urls as group_urls
|
||||
@@ -106,6 +107,7 @@ v2_urls = [
|
||||
url(r'^schedules/', include(schedule_urls)),
|
||||
url(r'^organizations/', include(organization_urls)),
|
||||
url(r'^users/', include(user_urls)),
|
||||
url(r'^execution_environments/', include(execution_environment_urls)),
|
||||
url(r'^projects/', include(project_urls)),
|
||||
url(r'^project_updates/', include(project_update_urls)),
|
||||
url(r'^teams/', include(team_urls)),
|
||||
|
||||
@@ -112,6 +112,7 @@ from awx.api.views.organization import ( # noqa
|
||||
OrganizationInventoriesList,
|
||||
OrganizationUsersList,
|
||||
OrganizationAdminsList,
|
||||
OrganizationExecutionEnvironmentsList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
@@ -396,7 +397,7 @@ class InstanceGroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAP
|
||||
permission_classes = (InstanceGroupTowerPermission,)
|
||||
|
||||
def update_raw_data(self, data):
|
||||
if self.get_object().is_containerized:
|
||||
if self.get_object().is_container_group:
|
||||
data.pop('policy_instance_percentage', None)
|
||||
data.pop('policy_instance_minimum', None)
|
||||
data.pop('policy_instance_list', None)
|
||||
@@ -685,6 +686,52 @@ class TeamAccessList(ResourceAccessList):
|
||||
parent_model = models.Team
|
||||
|
||||
|
||||
class ExecutionEnvironmentList(ListCreateAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
|
||||
|
||||
class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.ExecutionEnvironment
|
||||
serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
swagger_topic = "Execution Environments"
|
||||
|
||||
|
||||
class ExecutionEnvironmentJobTemplateList(SubListAPIView):
|
||||
|
||||
model = models.UnifiedJobTemplate
|
||||
serializer_class = serializers.UnifiedJobTemplateSerializer
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'unifiedjobtemplates'
|
||||
|
||||
|
||||
class ExecutionEnvironmentCopy(CopyAPIView):
|
||||
|
||||
model = models.ExecutionEnvironment
|
||||
copy_return_serializer_class = serializers.ExecutionEnvironmentSerializer
|
||||
|
||||
|
||||
class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
|
||||
model = models.ActivityStream
|
||||
serializer_class = serializers.ActivityStreamSerializer
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
|
||||
model = models.Project
|
||||
|
||||
@@ -15,6 +15,7 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
Host,
|
||||
Project,
|
||||
ExecutionEnvironment,
|
||||
JobTemplate,
|
||||
WorkflowJobTemplate,
|
||||
Organization,
|
||||
@@ -45,6 +46,7 @@ from awx.api.serializers import (
|
||||
RoleSerializer,
|
||||
NotificationTemplateSerializer,
|
||||
InstanceGroupSerializer,
|
||||
ExecutionEnvironmentSerializer,
|
||||
ProjectSerializer, JobTemplateSerializer, WorkflowJobTemplateSerializer,
|
||||
CredentialSerializer
|
||||
)
|
||||
@@ -141,6 +143,16 @@ class OrganizationProjectsList(SubListCreateAPIView):
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationExecutionEnvironmentsList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = ExecutionEnvironment
|
||||
serializer_class = ExecutionEnvironmentSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'executionenvironments'
|
||||
parent_key = 'organization'
|
||||
swagger_topic = "Execution Environments"
|
||||
|
||||
|
||||
class OrganizationJobTemplatesList(SubListCreateAPIView):
|
||||
|
||||
model = JobTemplate
|
||||
|
||||
@@ -100,6 +100,7 @@ class ApiVersionRootView(APIView):
|
||||
data['dashboard'] = reverse('api:dashboard_view', request=request)
|
||||
data['organizations'] = reverse('api:organization_list', request=request)
|
||||
data['users'] = reverse('api:user_list', request=request)
|
||||
data['execution_environments'] = reverse('api:execution_environment_list', request=request)
|
||||
data['projects'] = reverse('api:project_list', request=request)
|
||||
data['project_updates'] = reverse('api:project_update_list', request=request)
|
||||
data['teams'] = reverse('api:team_list', request=request)
|
||||
|
||||
@@ -14,6 +14,7 @@ from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField,
|
||||
IntegerField, ListField, NullBooleanField
|
||||
)
|
||||
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
|
||||
@@ -23,4 +23,4 @@ class Migration(migrations.Migration):
|
||||
operations = [
|
||||
migrations.RunPython(copy_session_settings, reverse_copy_session_settings),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -352,7 +352,6 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT)
|
||||
|
||||
def _get_local(self, name, validate=True):
|
||||
self.__clean_on_fork__()
|
||||
self._preload_cache()
|
||||
cache_key = Setting.get_cache_key(name)
|
||||
try:
|
||||
|
||||
@@ -29,9 +29,9 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialType,
|
||||
CredentialInputSource, CustomInventoryScript, Group, Host, Instance, InstanceGroup,
|
||||
Inventory, InventorySource, InventoryUpdate, InventoryUpdateEvent, Job, JobEvent,
|
||||
JobHostSummary, JobLaunchConfig, JobTemplate, Label, Notification,
|
||||
CredentialInputSource, CustomInventoryScript, ExecutionEnvironment, Group, Host, Instance,
|
||||
InstanceGroup, Inventory, InventorySource, InventoryUpdate, InventoryUpdateEvent, Job,
|
||||
JobEvent, JobHostSummary, JobLaunchConfig, JobTemplate, Label, Notification,
|
||||
NotificationTemplate, Organization, Project, ProjectUpdate,
|
||||
ProjectUpdateEvent, Role, Schedule, SystemJob, SystemJobEvent,
|
||||
SystemJobTemplate, Team, UnifiedJob, UnifiedJobTemplate, WorkflowJob,
|
||||
@@ -777,6 +777,11 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
if data and data.get('default_environment'):
|
||||
ee = get_object_from_data('default_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
@@ -1308,6 +1313,54 @@ class TeamAccess(BaseAccess):
|
||||
*args, **kwargs)
|
||||
|
||||
|
||||
class ExecutionEnvironmentAccess(BaseAccess):
|
||||
"""
|
||||
I can see an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm a member of the same organization
|
||||
- it is a global ExecutionEnvironment
|
||||
I can create/change an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm an admin for the organization(s)
|
||||
"""
|
||||
|
||||
model = ExecutionEnvironment
|
||||
select_related = ('organization',)
|
||||
prefetch_related = ('organization__admin_role', 'organization__execution_environment_admin_role')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return ExecutionEnvironment.objects.filter(
|
||||
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role')) |
|
||||
Q(organization__isnull=True)
|
||||
).distinct()
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'execution_environment_admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, mandatory=True,
|
||||
role_field='execution_environment_admin_role')
|
||||
|
||||
def can_change(self, obj, data):
|
||||
if obj.managed_by_tower:
|
||||
raise PermissionDenied
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
if obj and obj.organization_id is None:
|
||||
raise PermissionDenied
|
||||
if self.user not in obj.organization.execution_environment_admin_role:
|
||||
raise PermissionDenied
|
||||
if data and 'organization' in data:
|
||||
new_org = get_object_from_data('organization', Organization, data, obj=obj)
|
||||
if not new_org or self.user not in new_org.execution_environment_admin_role:
|
||||
return False
|
||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True,
|
||||
role_field='execution_environment_admin_role')
|
||||
|
||||
def can_delete(self, obj):
|
||||
return self.can_change(obj, None)
|
||||
|
||||
|
||||
class ProjectAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can see projects when:
|
||||
@@ -1337,14 +1390,29 @@ class ProjectAccess(NotificationAttachMixin, BaseAccess):
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'project_admin_role').exists()
|
||||
return (self.check_related('organization', Organization, data, role_field='project_admin_role', mandatory=True) and
|
||||
self.check_related('credential', Credential, data, role_field='use_role'))
|
||||
|
||||
if data.get('default_environment'):
|
||||
ee = get_object_from_data('default_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='project_admin_role', mandatory=True) and
|
||||
self.check_related('credential', Credential, data, role_field='use_role')
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return (self.check_related('organization', Organization, data, obj=obj, role_field='project_admin_role') and
|
||||
self.user in obj.admin_role and
|
||||
self.check_related('credential', Credential, data, obj=obj, role_field='use_role'))
|
||||
if data and data.get('default_environment'):
|
||||
ee = get_object_from_data('default_environment', ExecutionEnvironment, data, obj=obj)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, obj=obj, role_field='project_admin_role') and
|
||||
self.user in obj.admin_role and
|
||||
self.check_related('credential', Credential, data, obj=obj, role_field='use_role')
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_start(self, obj, validate_license=True):
|
||||
@@ -1449,6 +1517,10 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.user not in inventory.use_role:
|
||||
return False
|
||||
|
||||
ee = get_value(ExecutionEnvironment, 'execution_environment')
|
||||
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
project = get_value(Project, 'project')
|
||||
# If the user has admin access to the project (as an org admin), should
|
||||
# be able to proceed without additional checks.
|
||||
@@ -1496,6 +1568,11 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
return True
|
||||
|
||||
if data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
||||
is_mandatory = True
|
||||
if not getattr(obj, '{}_id'.format(required_field)):
|
||||
@@ -1926,6 +2003,11 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
if data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
@@ -1975,6 +2057,11 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
if data and data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) and
|
||||
|
||||
@@ -311,7 +311,7 @@ def events_table(since, full_path, until, **kwargs):
|
||||
return _copy_table(table='events', query=events_query, path=full_path)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.1', format='csv', description=_('Data on jobs run'), expensive=True)
|
||||
@register('unified_jobs_table', '1.2', format='csv', description=_('Data on jobs run'), expensive=True)
|
||||
def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
@@ -334,7 +334,9 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
main_unifiedjob.finished,
|
||||
main_unifiedjob.elapsed,
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id
|
||||
main_unifiedjob.instance_group_id,
|
||||
main_unifiedjob.installed_collections,
|
||||
main_unifiedjob.ansible_version
|
||||
FROM main_unifiedjob
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
LEFT JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
|
||||
@@ -10,6 +10,7 @@ from rest_framework.fields import FloatField
|
||||
|
||||
# Tower
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
@@ -176,6 +177,18 @@ register(
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_EXECUTION_ENVIRONMENT',
|
||||
field_class=fields.PrimaryKeyRelatedField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
queryset=ExecutionEnvironment.objects.all(),
|
||||
label=_('Global default execution environment'),
|
||||
help_text=_('.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CUSTOM_VENV_PATHS',
|
||||
field_class=fields.StringListPathField,
|
||||
|
||||
142
awx/main/credential_plugins/centrify_vault.py
Normal file
142
awx/main/credential_plugins/centrify_vault.py
Normal file
@@ -0,0 +1,142 @@
|
||||
from .plugin import CredentialPlugin, raise_for_status
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from urllib.parse import urljoin
|
||||
import requests
|
||||
pas_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': _('Centrify Tenant URL'),
|
||||
'type': 'string',
|
||||
'help_text': _('Centrify Tenant URL'),
|
||||
'format': 'url',
|
||||
}, {
|
||||
'id':'client_id',
|
||||
'label':_('Centrify API User'),
|
||||
'type':'string',
|
||||
'help_text': _('Centrify API User, having necessary permissions as mentioned in support doc'),
|
||||
|
||||
}, {
|
||||
'id':'client_password',
|
||||
'label':_('Centrify API Password'),
|
||||
'type':'string',
|
||||
'help_text': _('Password of Centrify API User with necessary permissions'),
|
||||
'secret':True,
|
||||
},{
|
||||
'id':'oauth_application_id',
|
||||
'label':_('OAuth2 Application ID'),
|
||||
'type':'string',
|
||||
'help_text': _('Application ID of the configured OAuth2 Client (defaults to \'awx\')'),
|
||||
'default': 'awx',
|
||||
},{
|
||||
'id':'oauth_scope',
|
||||
'label':_('OAuth2 Scope'),
|
||||
'type':'string',
|
||||
'help_text': _('Scope of the configured OAuth2 Client (defaults to \'awx\')'),
|
||||
'default': 'awx',
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'account-name',
|
||||
'label': _('Account Name'),
|
||||
'type': 'string',
|
||||
'help_text': _('Local system account or Domain account name enrolled in Centrify Vault. eg. (root or DOMAIN/Administrator)'),
|
||||
},{
|
||||
'id': 'system-name',
|
||||
'label': _('System Name'),
|
||||
'type': 'string',
|
||||
'help_text': _('Machine Name enrolled with in Centrify Portal'),
|
||||
}],
|
||||
'required': ['url', 'account-name', 'system-name','client_id','client_password'],
|
||||
}
|
||||
|
||||
|
||||
# generate bearer token to authenticate with PAS portal, Input : Client ID, Client Secret
|
||||
def handle_auth(**kwargs):
|
||||
post_data = {
|
||||
"grant_type": "client_credentials",
|
||||
"scope": kwargs['oauth_scope']
|
||||
}
|
||||
response = requests.post(
|
||||
kwargs['endpoint'],
|
||||
data = post_data,
|
||||
auth = (kwargs['client_id'],kwargs['client_password']),
|
||||
verify = True,
|
||||
timeout = (5, 30)
|
||||
)
|
||||
raise_for_status(response)
|
||||
try:
|
||||
return response.json()['access_token']
|
||||
except KeyError:
|
||||
raise RuntimeError('OAuth request to tenant was unsuccessful')
|
||||
|
||||
|
||||
# fetch the ID of system with RedRock query, Input : System Name, Account Name
|
||||
def get_ID(**kwargs):
|
||||
endpoint = urljoin(kwargs['url'],'/Redrock/query')
|
||||
name=" Name='{0}' and User='{1}'".format(kwargs['system_name'],kwargs['acc_name'])
|
||||
query = 'Select ID from VaultAccount where {0}'.format(name)
|
||||
post_headers = {
|
||||
"Authorization": "Bearer " + kwargs['access_token'],
|
||||
"X-CENTRIFY-NATIVE-CLIENT":"true"
|
||||
}
|
||||
response = requests.post(
|
||||
endpoint,
|
||||
json = {'Script': query},
|
||||
headers = post_headers,
|
||||
verify = True,
|
||||
timeout = (5, 30)
|
||||
)
|
||||
raise_for_status(response)
|
||||
try:
|
||||
result_str = response.json()["Result"]["Results"]
|
||||
return result_str[0]["Row"]["ID"]
|
||||
except (IndexError, KeyError):
|
||||
raise RuntimeError("Error Detected!! Check the Inputs")
|
||||
|
||||
|
||||
# CheckOut Password from Centrify Vault, Input : ID
|
||||
def get_passwd(**kwargs):
|
||||
endpoint = urljoin(kwargs['url'],'/ServerManage/CheckoutPassword')
|
||||
post_headers = {
|
||||
"Authorization": "Bearer " + kwargs['access_token'],
|
||||
"X-CENTRIFY-NATIVE-CLIENT":"true"
|
||||
}
|
||||
response = requests.post(
|
||||
endpoint,
|
||||
json = {'ID': kwargs['acc_id']},
|
||||
headers = post_headers,
|
||||
verify = True,
|
||||
timeout = (5, 30)
|
||||
)
|
||||
raise_for_status(response)
|
||||
try:
|
||||
return response.json()["Result"]["Password"]
|
||||
except KeyError:
|
||||
raise RuntimeError("Password Not Found")
|
||||
|
||||
|
||||
def centrify_backend(**kwargs):
|
||||
url = kwargs.get('url')
|
||||
acc_name = kwargs.get('account-name')
|
||||
system_name = kwargs.get('system-name')
|
||||
client_id = kwargs.get('client_id')
|
||||
client_password = kwargs.get('client_password')
|
||||
app_id = kwargs.get('oauth_application_id', 'awx')
|
||||
endpoint = urljoin(url, f'/oauth2/token/{app_id}')
|
||||
endpoint = {
|
||||
'endpoint': endpoint,
|
||||
'client_id': client_id,
|
||||
'client_password': client_password,
|
||||
'oauth_scope': kwargs.get('oauth_scope', 'awx')
|
||||
}
|
||||
token = handle_auth(**endpoint)
|
||||
get_id_args = {'system_name':system_name,'acc_name':acc_name,'url':url,'access_token':token}
|
||||
acc_id = get_ID(**get_id_args)
|
||||
get_pwd_args = {'url':url,'acc_id':acc_id,'access_token':token}
|
||||
return get_passwd(**get_pwd_args)
|
||||
|
||||
|
||||
centrify_plugin = CredentialPlugin(
|
||||
'Centrify Vault Credential Provider Lookup',
|
||||
inputs=pas_inputs,
|
||||
backend=centrify_backend
|
||||
)
|
||||
@@ -40,6 +40,12 @@ base_inputs = {
|
||||
'multiline': False,
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication')
|
||||
}, {
|
||||
'id': 'namespace',
|
||||
'label': _('Namespace name (Vault Enterprise only)'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _('Name of the namespace to use when authenticate and retrieve secrets')
|
||||
}, {
|
||||
'id': 'default_auth_path',
|
||||
'label': _('Path to Approle Auth'),
|
||||
@@ -137,6 +143,9 @@ def approle_auth(**kwargs):
|
||||
# AppRole Login
|
||||
request_kwargs['json'] = {'role_id': role_id, 'secret_id': secret_id}
|
||||
sess = requests.Session()
|
||||
# Namespace support
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
@@ -164,6 +173,8 @@ def kv_backend(**kwargs):
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
# Compatibility header for older installs of Hashicorp Vault
|
||||
sess.headers['X-Vault-Token'] = token
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
|
||||
if api_version == 'v2':
|
||||
if kwargs.get('secret_version'):
|
||||
@@ -222,6 +233,8 @@ def ssh_backend(**kwargs):
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
# Compatability header for older installs of Hashicorp Vault
|
||||
sess.headers['X-Vault-Token'] = token
|
||||
# https://www.vaultproject.io/api/secret/ssh/index.html#sign-ssh-key
|
||||
|
||||
@@ -6,6 +6,7 @@ from multiprocessing import Process
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
@@ -35,6 +36,7 @@ class Scheduler(Scheduler):
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
GuidMiddleware.set_guid(GuidMiddleware._generate_guid())
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
|
||||
@@ -16,6 +16,7 @@ from queue import Full as QueueFull, Empty as QueueEmpty
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.core.cache import cache as django_cache
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
from jinja2 import Template
|
||||
import psutil
|
||||
|
||||
@@ -445,6 +446,8 @@ class AutoscalePool(WorkerPool):
|
||||
return super(AutoscalePool, self).up()
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
GuidMiddleware.set_guid(body['guid'])
|
||||
try:
|
||||
# when the cluster heartbeat occurs, clean up internally
|
||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||
|
||||
@@ -5,6 +5,7 @@ import json
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@@ -83,6 +84,9 @@ class task:
|
||||
'kwargs': kwargs,
|
||||
'task': cls.name
|
||||
}
|
||||
guid = GuidMiddleware.get_guid()
|
||||
if guid:
|
||||
obj['guid'] = guid
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
import psutil
|
||||
|
||||
@@ -152,6 +153,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
if 'guid' in body:
|
||||
GuidMiddleware.set_guid(body['guid'])
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
@@ -176,6 +179,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
finally:
|
||||
GuidMiddleware.set_guid('')
|
||||
return
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
@@ -6,6 +6,9 @@ import traceback
|
||||
|
||||
from kubernetes.config import kube_config
|
||||
|
||||
from django.conf import settings
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
from .base import BaseWorker
|
||||
@@ -52,6 +55,8 @@ class TaskWorker(BaseWorker):
|
||||
uuid = body.get('uuid', '<unknown>')
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
if 'guid' in body:
|
||||
GuidMiddleware.set_guid(body.pop('guid'))
|
||||
_call = TaskWorker.resolve_callable(task)
|
||||
if inspect.isclass(_call):
|
||||
# the callable is a class, e.g., RunJob; instantiate and
|
||||
@@ -81,6 +86,7 @@ class TaskWorker(BaseWorker):
|
||||
'task': u'awx.main.tasks.RunProjectUpdate'
|
||||
}
|
||||
'''
|
||||
settings.__clean_on_fork__()
|
||||
result = None
|
||||
try:
|
||||
result = self.run_callable(body)
|
||||
|
||||
@@ -6,7 +6,6 @@ import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
import yaml
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
@@ -32,7 +31,7 @@ def set_pythonpath(venv_libdir, env):
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, event_handler, canceled_callback=None, check_callback=None, pod_manager=None):
|
||||
def __init__(self, event_handler, canceled_callback=None, check_callback=None):
|
||||
"""
|
||||
:param event_handler: a callable used to persist event data from isolated nodes
|
||||
:param canceled_callback: a callable - which returns `True` or `False`
|
||||
@@ -45,28 +44,12 @@ class IsolatedManager(object):
|
||||
self.started_at = None
|
||||
self.captured_command_artifact = False
|
||||
self.instance = None
|
||||
self.pod_manager = pod_manager
|
||||
|
||||
def build_inventory(self, hosts):
|
||||
if self.instance and self.instance.is_containerized:
|
||||
inventory = {'all': {'hosts': {}}}
|
||||
fd, path = tempfile.mkstemp(
|
||||
prefix='.kubeconfig', dir=self.private_data_dir
|
||||
)
|
||||
with open(path, 'wb') as temp:
|
||||
temp.write(yaml.dump(self.pod_manager.kube_config).encode())
|
||||
temp.flush()
|
||||
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
for host in hosts:
|
||||
inventory['all']['hosts'][host] = {
|
||||
"ansible_connection": "kubectl",
|
||||
"ansible_kubectl_config": path,
|
||||
}
|
||||
else:
|
||||
inventory = '\n'.join([
|
||||
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
|
||||
for host in hosts
|
||||
])
|
||||
inventory = '\n'.join([
|
||||
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
|
||||
for host in hosts
|
||||
])
|
||||
|
||||
return inventory
|
||||
|
||||
|
||||
@@ -2,22 +2,22 @@
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from crum import impersonate
|
||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||
from awx.main.models import (
|
||||
User, Organization, Project, Inventory, CredentialType,
|
||||
Credential, Host, JobTemplate, ExecutionEnvironment
|
||||
)
|
||||
from awx.main.signals import disable_computed_fields
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Create preloaded data, intended for new installs
|
||||
"""
|
||||
help = 'Creates a preload tower data iff there is none.'
|
||||
help = 'Creates a preload tower data if there is none.'
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
# Sanity check: Is there already an organization in the system?
|
||||
if Organization.objects.count():
|
||||
print('An organization is already in the system, exiting.')
|
||||
print('(changed: False)')
|
||||
return
|
||||
changed = False
|
||||
|
||||
# Create a default organization as the first superuser found.
|
||||
try:
|
||||
@@ -26,44 +26,62 @@ class Command(BaseCommand):
|
||||
superuser = None
|
||||
with impersonate(superuser):
|
||||
with disable_computed_fields():
|
||||
o = Organization.objects.create(name='Default')
|
||||
p = Project(name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p.save(skip_update=True)
|
||||
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
'username': superuser.username
|
||||
},
|
||||
created_by=superuser)
|
||||
c.admin_role.members.add(superuser)
|
||||
public_galaxy_credential = Credential(
|
||||
name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs = {
|
||||
'url': 'https://galaxy.ansible.com/'
|
||||
}
|
||||
)
|
||||
public_galaxy_credential.save()
|
||||
o.galaxy_credentials.add(public_galaxy_credential)
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser)
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
project=p,
|
||||
inventory=i)
|
||||
jt.credentials.add(c)
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
print('(changed: True)')
|
||||
if not Organization.objects.exists():
|
||||
o = Organization.objects.create(name='Default')
|
||||
|
||||
p = Project(name='Demo Project',
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible-tower-samples',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p.save(skip_update=True)
|
||||
|
||||
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
'username': superuser.username
|
||||
},
|
||||
created_by=superuser)
|
||||
|
||||
c.admin_role.members.add(superuser)
|
||||
|
||||
public_galaxy_credential = Credential(name='Ansible Galaxy',
|
||||
managed_by_tower=True,
|
||||
credential_type=CredentialType.objects.get(kind='galaxy'),
|
||||
inputs={'url': 'https://galaxy.ansible.com/'})
|
||||
public_galaxy_credential.save()
|
||||
o.galaxy_credentials.add(public_galaxy_credential)
|
||||
|
||||
i = Inventory.objects.create(name='Demo Inventory',
|
||||
organization=o,
|
||||
created_by=superuser)
|
||||
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser)
|
||||
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
project=p,
|
||||
inventory=i)
|
||||
jt.credentials.add(c)
|
||||
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
changed = True
|
||||
|
||||
default_ee = settings.AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE
|
||||
ee, created = ExecutionEnvironment.objects.get_or_create(name='Default EE', defaults={'image': default_ee,
|
||||
'managed_by_tower': True})
|
||||
|
||||
if created:
|
||||
changed = True
|
||||
print('Default Execution Environment registered.')
|
||||
|
||||
if changed:
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('(changed: False)')
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
@@ -34,11 +33,7 @@ from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
# other AWX imports
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||
# TODO: remove proot utils once we move to running inv. updates in containers
|
||||
from awx.main.utils import (
|
||||
check_proot_installed,
|
||||
wrap_args_with_proot,
|
||||
build_proot_temp_dir,
|
||||
ignore_inventory_computed_fields,
|
||||
get_licenser
|
||||
)
|
||||
@@ -89,27 +84,6 @@ class AnsibleInventoryLoader(object):
|
||||
else:
|
||||
self.venv_path = settings.ANSIBLE_VENV_PATH
|
||||
|
||||
def build_env(self):
|
||||
env = dict(os.environ.items())
|
||||
env['VIRTUAL_ENV'] = self.venv_path
|
||||
env['PATH'] = os.path.join(self.venv_path, "bin") + ":" + env['PATH']
|
||||
# Set configuration items that should always be used for updates
|
||||
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
|
||||
if key not in env:
|
||||
env[key] = value
|
||||
venv_libdir = os.path.join(self.venv_path, "lib")
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for version in os.listdir(venv_libdir):
|
||||
if fnmatch.fnmatch(version, 'python[23].*'):
|
||||
if os.path.isdir(os.path.join(venv_libdir, version)):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, version, "site-packages") + ":"
|
||||
break
|
||||
# For internal inventory updates, these are not reported in the job_env API
|
||||
logger.info('Using VIRTUAL_ENV: {}'.format(env['VIRTUAL_ENV']))
|
||||
logger.info('Using PATH: {}'.format(env['PATH']))
|
||||
logger.info('Using PYTHONPATH: {}'.format(env.get('PYTHONPATH', None)))
|
||||
return env
|
||||
|
||||
def get_path_to_ansible_inventory(self):
|
||||
venv_exe = os.path.join(self.venv_path, 'bin', 'ansible-inventory')
|
||||
if os.path.exists(venv_exe):
|
||||
@@ -128,66 +102,29 @@ class AnsibleInventoryLoader(object):
|
||||
return shutil.which('ansible-inventory')
|
||||
|
||||
def get_base_args(self):
|
||||
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
|
||||
ansible_inventory_path = self.get_path_to_ansible_inventory()
|
||||
# NOTE: why do we add "python" to the start of these args?
|
||||
# the script that runs ansible-inventory specifies a python interpreter
|
||||
# that makes no sense in light of the fact that we put all the dependencies
|
||||
# inside of /var/lib/awx/venv/ansible, so we override the specified interpreter
|
||||
# https://github.com/ansible/ansible/issues/50714
|
||||
bargs = ['python', ansible_inventory_path, '-i', self.source]
|
||||
bargs = ['podman', 'run', '--user=root', '--quiet']
|
||||
bargs.extend(['-v', '{0}:{0}:Z'.format(self.source)])
|
||||
for key, value in STANDARD_INVENTORY_UPDATE_ENV.items():
|
||||
bargs.extend(['-e', '{0}={1}'.format(key, value)])
|
||||
bargs.extend([settings.AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE])
|
||||
bargs.extend(['ansible-inventory', '-i', self.source])
|
||||
bargs.extend(['--playbook-dir', functioning_dir(self.source)])
|
||||
if self.verbosity:
|
||||
# INFO: -vvv, DEBUG: -vvvvv, for inventory, any more than 3 makes little difference
|
||||
bargs.append('-{}'.format('v' * min(5, self.verbosity * 2 + 1)))
|
||||
bargs.append('--list')
|
||||
logger.debug('Using base command: {}'.format(' '.join(bargs)))
|
||||
return bargs
|
||||
|
||||
# TODO: Remove this once we move to running ansible-inventory in containers
|
||||
# and don't need proot for process isolation anymore
|
||||
def get_proot_args(self, cmd, env):
|
||||
cwd = os.getcwd()
|
||||
if not check_proot_installed():
|
||||
raise RuntimeError("proot is not installed but is configured for use")
|
||||
|
||||
kwargs = {}
|
||||
# we cannot safely store tmp data in source dir or trust script contents
|
||||
if env['AWX_PRIVATE_DATA_DIR']:
|
||||
# If this is non-blank, file credentials are being used and we need access
|
||||
private_data_dir = functioning_dir(env['AWX_PRIVATE_DATA_DIR'])
|
||||
logger.debug("Using private credential data in '{}'.".format(private_data_dir))
|
||||
kwargs['private_data_dir'] = private_data_dir
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.AWX_ANSIBLE_COLLECTIONS_PATHS]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
kwargs['proot_custom_virtualenv'] = self.venv_path
|
||||
|
||||
return wrap_args_with_proot(cmd, cwd, **kwargs)
|
||||
|
||||
|
||||
def command_to_json(self, cmd):
|
||||
data = {}
|
||||
stdout, stderr = '', ''
|
||||
env = self.build_env()
|
||||
|
||||
# TODO: remove proot args once inv. updates run in containers
|
||||
if (('AWX_PRIVATE_DATA_DIR' in env) and
|
||||
getattr(settings, 'AWX_PROOT_ENABLED', False)):
|
||||
cmd = self.get_proot_args(cmd, env)
|
||||
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = proc.communicate()
|
||||
stdout = smart_text(stdout)
|
||||
stderr = smart_text(stderr)
|
||||
|
||||
# TODO: can be removed when proot is removed
|
||||
if self.tmp_private_dir:
|
||||
shutil.rmtree(self.tmp_private_dir, True)
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError('%s failed (rc=%d) with stdout:\n%s\nstderr:\n%s' % (
|
||||
'ansible-inventory', proc.returncode, stdout, stderr))
|
||||
@@ -205,9 +142,10 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
def load(self):
|
||||
base_args = self.get_base_args()
|
||||
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
|
||||
return self.command_to_json(base_args + ['--list'])
|
||||
return self.command_to_json(base_args)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
@@ -17,13 +17,14 @@ class InstanceNotFound(Exception):
|
||||
|
||||
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, controller, instance_percent, inst_min, hostname_list):
|
||||
def __init__(self, queuename, controller, instance_percent, inst_min, hostname_list, is_container_group=None):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.controller = controller
|
||||
self.instance_percent = instance_percent
|
||||
self.instance_min = inst_min
|
||||
self.hostname_list = hostname_list
|
||||
self.is_container_group = is_container_group
|
||||
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
@@ -36,6 +37,10 @@ class RegisterQueue:
|
||||
ig.policy_instance_minimum = self.instance_min
|
||||
changed = True
|
||||
|
||||
if self.is_container_group:
|
||||
ig.is_container_group = self.is_container_group
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
ig.save()
|
||||
|
||||
|
||||
@@ -144,7 +144,8 @@ class InstanceManager(models.Manager):
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
RegisterQueue('tower', None, 100, 0, []).register()
|
||||
is_container_group = settings.IS_K8S
|
||||
RegisterQueue('tower', None, 100, 0, [], is_container_group).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
@@ -237,7 +238,7 @@ class InstanceGroupManager(models.Manager):
|
||||
elif t.status == 'running':
|
||||
# Subtract capacity from all groups that contain the instance
|
||||
if t.execution_node not in instance_ig_mapping:
|
||||
if not t.is_containerized:
|
||||
if not t.is_container_group_task:
|
||||
logger.warning('Detected %s running inside lost instance, '
|
||||
'may still be waiting for reaper.', t.log_format)
|
||||
if t.instance_group:
|
||||
|
||||
@@ -45,7 +45,10 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
response['X-API-Total-Time'] = '%0.3fs' % total_time
|
||||
if settings.AWX_REQUEST_PROFILE:
|
||||
response['X-API-Profile-File'] = self.prof.stop()
|
||||
perf_logger.info('api response times', extra=dict(python_objects=dict(request=request, response=response)))
|
||||
perf_logger.info(
|
||||
f'request: {request}, response_time: {response["X-API-Total-Time"]}',
|
||||
extra=dict(python_objects=dict(request=request, response=response, X_API_TOTAL_TIME=response["X-API-Total-Time"]))
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
|
||||
59
awx/main/migrations/0124_execution_environments.py
Normal file
59
awx/main/migrations/0124_execution_environments.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Generated by Django 2.2.11 on 2020-07-08 18:42
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
import django.db.models.expressions
|
||||
import taggit.managers
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('taggit', '0003_taggeditem_add_unique_index'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('main', '0123_drop_hg_support'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='ExecutionEnvironment',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('created', models.DateTimeField(default=None, editable=False)),
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('description', models.TextField(blank=True, default='')),
|
||||
('image', models.CharField(help_text='The registry location where the container is stored.', max_length=1024, verbose_name='image location')),
|
||||
('managed_by_tower', models.BooleanField(default=False, editable=False)),
|
||||
('created_by', models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{'class': 'executionenvironment', 'model_name': 'executionenvironment', 'app_label': 'main'}(class)s_created+", to=settings.AUTH_USER_MODEL)),
|
||||
('credential', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='executionenvironments', to='main.Credential')),
|
||||
('modified_by', models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{'class': 'executionenvironment', 'model_name': 'executionenvironment', 'app_label': 'main'}(class)s_modified+", to=settings.AUTH_USER_MODEL)),
|
||||
('organization', models.ForeignKey(blank=True, default=None, help_text='The organization used to determine access to this execution environment.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='executionenvironments', to='main.Organization')),
|
||||
('tags', taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
|
||||
],
|
||||
options={
|
||||
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),
|
||||
'unique_together': {('organization', 'image')},
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='execution_environment',
|
||||
field=models.ManyToManyField(blank=True, to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='default_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The default execution environment for jobs run by this organization.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='execution_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The container image to be used for execution.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='unifiedjobs', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='execution_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The container image to be used for execution.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='unifiedjobtemplates', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
]
|
||||
46
awx/main/migrations/0125_more_ee_modeling_changes.py
Normal file
46
awx/main/migrations/0125_more_ee_modeling_changes.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Generated by Django 2.2.16 on 2020-11-19 16:20
|
||||
import uuid
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0124_execution_environments'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='executionenvironment',
|
||||
options={'ordering': ('-created',)},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='executionenvironment',
|
||||
name='name',
|
||||
field=models.CharField(default=uuid.uuid4, max_length=512, unique=True),
|
||||
preserve_default=False,
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='execution_environment_admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role='admin_role', related_name='+', to='main.Role'),
|
||||
preserve_default='True',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='project',
|
||||
name='default_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The default execution environment for jobs run using this project.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='kind',
|
||||
field=models.CharField(choices=[('ssh', 'Machine'), ('vault', 'Vault'), ('net', 'Network'), ('scm', 'Source Control'), ('cloud', 'Cloud'), ('registry', 'Container Registry'), ('token', 'Personal Access Token'), ('insights', 'Insights'), ('external', 'External'), ('kubernetes', 'Kubernetes'), ('galaxy', 'Galaxy/Automation Hub')], max_length=32),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='executionenvironment',
|
||||
unique_together=set(),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.16 on 2021-01-27 22:31
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0125_more_ee_modeling_changes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='executionenvironment',
|
||||
name='pull',
|
||||
field=models.CharField(choices=[('always', 'Always pull container before running.'), ('missing', 'No pull option has been selected.'), ('never', 'Never pull container before running.')], blank=True, default='', help_text='Pull image before running?', max_length=16),
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0127_reset_pod_spec_override.py
Normal file
18
awx/main/migrations/0127_reset_pod_spec_override.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.16 on 2021-02-15 22:02
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
def reset_pod_specs(apps, schema_editor):
|
||||
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||
InstanceGroup.objects.update(pod_spec_override="")
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0126_executionenvironment_container_options'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(reset_pod_specs)
|
||||
]
|
||||
20
awx/main/migrations/0128_organiaztion_read_roles_ee_admin.py
Normal file
20
awx/main/migrations/0128_organiaztion_read_roles_ee_admin.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# Generated by Django 2.2.16 on 2021-02-18 22:57
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0127_reset_pod_spec_override'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['member_role', 'auditor_role', 'execute_role', 'project_admin_role', 'inventory_admin_role', 'workflow_admin_role', 'notification_admin_role', 'credential_admin_role', 'job_template_admin_role', 'approval_role', 'execution_environment_admin_role'], related_name='+', to='main.Role'),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0129_unifiedjob_installed_collections.py
Normal file
19
awx/main/migrations/0129_unifiedjob_installed_collections.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# Generated by Django 2.2.16 on 2021-02-16 20:27
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0128_organiaztion_read_roles_ee_admin'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='installed_collections',
|
||||
field=awx.main.fields.JSONBField(blank=True, default=dict, editable=False, help_text='The Collections names and versions installed in the execution environment.'),
|
||||
),
|
||||
]
|
||||
34
awx/main/migrations/0130_ee_polymorphic_set_null.py
Normal file
34
awx/main/migrations/0130_ee_polymorphic_set_null.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# Generated by Django 2.2.16 on 2021-03-11 16:25
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0129_unifiedjob_installed_collections'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='default_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The default execution environment for jobs run by this organization.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='+', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='project',
|
||||
name='default_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The default execution environment for jobs run using this project.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='+', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='execution_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The container image to be used for execution.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='execution_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The container image to be used for execution.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
]
|
||||
19
awx/main/migrations/0131_undo_org_polymorphic_ee.py
Normal file
19
awx/main/migrations/0131_undo_org_polymorphic_ee.py
Normal file
@@ -0,0 +1,19 @@
|
||||
# Generated by Django 2.2.16 on 2021-03-11 20:50
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0130_ee_polymorphic_set_null'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='default_environment',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The default execution environment for jobs run by this organization.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='main.ExecutionEnvironment'),
|
||||
),
|
||||
]
|
||||
27
awx/main/migrations/0132_instancegroup_is_container_group.py
Normal file
27
awx/main/migrations/0132_instancegroup_is_container_group.py
Normal file
@@ -0,0 +1,27 @@
|
||||
# Generated by Django 2.2.16 on 2021-03-13 14:53
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def migrate_existing_container_groups(apps, schema_editor):
|
||||
InstanceGroup = apps.get_model('main', 'InstanceGroup')
|
||||
|
||||
for group in InstanceGroup.objects.filter(credential__isnull=False).iterator():
|
||||
group.is_container_group = True
|
||||
group.save(update_fields=['is_container_group'])
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0131_undo_org_polymorphic_ee'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='is_container_group',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.RunPython(migrate_existing_container_groups, migrations.RunPython.noop),
|
||||
]
|
||||
20
awx/main/migrations/0133_centrify_vault_credtype.py
Normal file
20
awx/main/migrations/0133_centrify_vault_credtype.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.models import CredentialType
|
||||
from awx.main.utils.common import set_current_apps
|
||||
|
||||
|
||||
def setup_tower_managed_defaults(apps, schema_editor):
|
||||
set_current_apps(apps)
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0132_instancegroup_is_container_group'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(setup_tower_managed_defaults),
|
||||
]
|
||||
18
awx/main/migrations/0134_unifiedjob_ansible_version.py
Normal file
18
awx/main/migrations/0134_unifiedjob_ansible_version.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.16 on 2021-03-16 20:49
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0133_centrify_vault_credtype'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='ansible_version',
|
||||
field=models.CharField(blank=True, default='', editable=False, help_text='The version of Ansible Core installed in the execution environment.', max_length=255),
|
||||
),
|
||||
]
|
||||
@@ -35,6 +35,7 @@ from awx.main.models.events import ( # noqa
|
||||
)
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand # noqa
|
||||
from awx.main.models.schedules import Schedule # noqa
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment # noqa
|
||||
from awx.main.models.activity_stream import ActivityStream # noqa
|
||||
from awx.main.models.ha import ( # noqa
|
||||
Instance, InstanceGroup, TowerScheduleState,
|
||||
@@ -45,7 +46,7 @@ from awx.main.models.rbac import ( # noqa
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.mixins import ( # noqa
|
||||
CustomVirtualEnvMixin, ResourceMixin, SurveyJobMixin,
|
||||
CustomVirtualEnvMixin, ExecutionEnvironmentMixin, ResourceMixin, SurveyJobMixin,
|
||||
SurveyJobTemplateMixin, TaskManagerInventoryUpdateMixin,
|
||||
TaskManagerJobMixin, TaskManagerProjectUpdateMixin,
|
||||
TaskManagerUnifiedJobMixin,
|
||||
@@ -221,6 +222,7 @@ activity_stream_registrar.connect(CredentialType)
|
||||
activity_stream_registrar.connect(Team)
|
||||
activity_stream_registrar.connect(Project)
|
||||
#activity_stream_registrar.connect(ProjectUpdate)
|
||||
activity_stream_registrar.connect(ExecutionEnvironment)
|
||||
activity_stream_registrar.connect(JobTemplate)
|
||||
activity_stream_registrar.connect(Job)
|
||||
activity_stream_registrar.connect(AdHocCommand)
|
||||
|
||||
@@ -61,6 +61,7 @@ class ActivityStream(models.Model):
|
||||
team = models.ManyToManyField("Team", blank=True)
|
||||
project = models.ManyToManyField("Project", blank=True)
|
||||
project_update = models.ManyToManyField("ProjectUpdate", blank=True)
|
||||
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
|
||||
job_template = models.ManyToManyField("JobTemplate", blank=True)
|
||||
job = models.ManyToManyField("Job", blank=True)
|
||||
workflow_job_template_node = models.ManyToManyField("WorkflowJobTemplateNode", blank=True)
|
||||
@@ -74,6 +75,7 @@ class ActivityStream(models.Model):
|
||||
ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
|
||||
schedule = models.ManyToManyField("Schedule", blank=True)
|
||||
custom_inventory_script = models.ManyToManyField("CustomInventoryScript", blank=True)
|
||||
execution_environment = models.ManyToManyField("ExecutionEnvironment", blank=True)
|
||||
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
|
||||
notification = models.ManyToManyField("Notification", blank=True)
|
||||
label = models.ManyToManyField("Label", blank=True)
|
||||
|
||||
@@ -151,8 +151,8 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_containerized(self):
|
||||
return bool(self.instance_group and self.instance_group.is_containerized)
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
@@ -198,8 +198,8 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
def copy(self):
|
||||
data = {}
|
||||
for field in ('job_type', 'inventory_id', 'limit', 'credential_id',
|
||||
'module_name', 'module_args', 'forks', 'verbosity',
|
||||
'extra_vars', 'become_enabled', 'diff_mode'):
|
||||
'execution_environment_id', 'module_name', 'module_args',
|
||||
'forks', 'verbosity', 'extra_vars', 'become_enabled', 'diff_mode'):
|
||||
data[field] = getattr(self, field)
|
||||
return AdHocCommand.objects.create(**data)
|
||||
|
||||
@@ -209,6 +209,9 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
|
||||
if 'name' not in update_fields:
|
||||
update_fields.append('name')
|
||||
if not self.execution_environment_id:
|
||||
self.execution_environment = self.resolve_execution_environment()
|
||||
update_fields.append('execution_environment')
|
||||
super(AdHocCommand, self).save(*args, **kwargs)
|
||||
|
||||
@property
|
||||
|
||||
@@ -331,6 +331,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
('net', _('Network')),
|
||||
('scm', _('Source Control')),
|
||||
('cloud', _('Cloud')),
|
||||
('registry', _('Container Registry')),
|
||||
('token', _('Personal Access Token')),
|
||||
('insights', _('Insights')),
|
||||
('external', _('External')),
|
||||
@@ -528,15 +529,20 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
# FIXME: develop some better means of referencing paths inside containers
|
||||
container_path = os.path.join(
|
||||
'/runner',
|
||||
os.path.basename(path)
|
||||
)
|
||||
|
||||
# determine if filename indicates single file or many
|
||||
if file_label.find('.') == -1:
|
||||
tower_namespace.filename = path
|
||||
tower_namespace.filename = container_path
|
||||
else:
|
||||
if not hasattr(tower_namespace, 'filename'):
|
||||
tower_namespace.filename = TowerNamespace()
|
||||
file_label = file_label.split('.')[1]
|
||||
setattr(tower_namespace.filename, file_label, path)
|
||||
setattr(tower_namespace.filename, file_label, container_path)
|
||||
|
||||
injector_field = self._meta.get_field('injectors')
|
||||
for env_var, tmpl in self.injectors.get('env', {}).items():
|
||||
@@ -564,7 +570,12 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
|
||||
if extra_vars:
|
||||
path = build_extra_vars_file(extra_vars, private_data_dir)
|
||||
args.extend(['-e', '@%s' % path])
|
||||
# FIXME: develop some better means of referencing paths inside containers
|
||||
container_path = os.path.join(
|
||||
'/runner',
|
||||
os.path.basename(path)
|
||||
)
|
||||
args.extend(['-e', '@%s' % container_path])
|
||||
|
||||
|
||||
class ManagedCredentialType(SimpleNamespace):
|
||||
@@ -1123,7 +1134,6 @@ ManagedCredentialType(
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='kubernetes_bearer_token',
|
||||
kind='kubernetes',
|
||||
@@ -1155,6 +1165,37 @@ ManagedCredentialType(
|
||||
}
|
||||
)
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='registry',
|
||||
kind='registry',
|
||||
name=ugettext_noop('Container Registry'),
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'host',
|
||||
'label': ugettext_noop('Authentication URL'),
|
||||
'type': 'string',
|
||||
'help_text': ugettext_noop('Authentication endpoint for the container registry.'),
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': ugettext_noop('Username'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'password',
|
||||
'label': ugettext_noop('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'token',
|
||||
'label': ugettext_noop('Access Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': ugettext_noop('A token to use to authenticate with. '
|
||||
'This should not be set if username/password are being used.'),
|
||||
}],
|
||||
'required': ['host'],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='galaxy_api_token',
|
||||
|
||||
@@ -35,8 +35,8 @@ def gce(cred, env, private_data_dir):
|
||||
json.dump(json_cred, f, indent=2)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = path
|
||||
env['GCP_SERVICE_ACCOUNT_FILE'] = path
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = os.path.join('/runner', os.path.basename(path))
|
||||
env['GCP_SERVICE_ACCOUNT_FILE'] = os.path.join('/runner', os.path.basename(path))
|
||||
|
||||
# Handle env variables for new module types.
|
||||
# This includes gcp_compute inventory plugin and
|
||||
@@ -92,8 +92,8 @@ def _openstack_data(cred):
|
||||
},
|
||||
}
|
||||
|
||||
if cred.has_input('project_region_name'):
|
||||
openstack_data['clouds']['devstack']['region_name'] = cred.get_input('project_region_name', default='')
|
||||
if cred.has_input('region'):
|
||||
openstack_data['clouds']['devstack']['region_name'] = cred.get_input('region', default='')
|
||||
|
||||
return openstack_data
|
||||
|
||||
@@ -105,7 +105,8 @@ def openstack(cred, env, private_data_dir):
|
||||
yaml.safe_dump(openstack_data, f, default_flow_style=False, allow_unicode=True)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['OS_CLIENT_CONFIG_FILE'] = path
|
||||
# TODO: constant for container base path
|
||||
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(path))
|
||||
|
||||
|
||||
def kubernetes_bearer_token(cred, env, private_data_dir):
|
||||
|
||||
53
awx/main/models/execution_environments.py
Normal file
53
awx/main/models/execution_environments.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from django.db import models
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModel
|
||||
|
||||
|
||||
__all__ = ['ExecutionEnvironment']
|
||||
|
||||
|
||||
class ExecutionEnvironment(CommonModel):
|
||||
class Meta:
|
||||
ordering = ('-created',)
|
||||
|
||||
PULL_CHOICES = [
|
||||
('always', _("Always pull container before running.")),
|
||||
('missing', _("No pull option has been selected.")),
|
||||
('never', _("Never pull container before running."))
|
||||
]
|
||||
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
null=True,
|
||||
default=None,
|
||||
blank=True,
|
||||
on_delete=models.CASCADE,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The organization used to determine access to this execution environment.'),
|
||||
)
|
||||
image = models.CharField(
|
||||
max_length=1024,
|
||||
verbose_name=_('image location'),
|
||||
help_text=_("The registry location where the container is stored."),
|
||||
)
|
||||
managed_by_tower = models.BooleanField(default=False, editable=False)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
pull = models.CharField(
|
||||
max_length=16,
|
||||
choices=PULL_CHOICES,
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Pull image before running?'),
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:execution_environment_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -147,6 +147,13 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
return self.rampart_groups.filter(controller__isnull=False).exists()
|
||||
|
||||
def refresh_capacity(self):
|
||||
if settings.IS_K8S:
|
||||
self.capacity = self.cpu = self.memory = self.cpu_capacity = self.mem_capacity = 0 # noqa
|
||||
self.version = awx_application_version
|
||||
self.save(update_fields=['capacity', 'version', 'modified', 'cpu',
|
||||
'memory', 'cpu_capacity', 'mem_capacity'])
|
||||
return
|
||||
|
||||
cpu = get_cpu_capacity()
|
||||
mem = get_mem_capacity()
|
||||
if self.enabled:
|
||||
@@ -192,6 +199,9 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
null=True,
|
||||
on_delete=models.CASCADE
|
||||
)
|
||||
is_container_group = models.BooleanField(
|
||||
default=False
|
||||
)
|
||||
credential = models.ForeignKey(
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
@@ -246,10 +256,6 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
def is_isolated(self):
|
||||
return bool(self.controller)
|
||||
|
||||
@property
|
||||
def is_containerized(self):
|
||||
return bool(self.credential and self.credential.kubernetes)
|
||||
|
||||
'''
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
@@ -306,9 +312,9 @@ def schedule_policy_task():
|
||||
@receiver(post_save, sender=InstanceGroup)
|
||||
def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
if created or instance.has_policy_changes():
|
||||
if not instance.is_containerized:
|
||||
if not instance.is_container_group:
|
||||
schedule_policy_task()
|
||||
elif created or instance.is_containerized:
|
||||
elif created or instance.is_container_group:
|
||||
instance.set_default_policy_fields()
|
||||
|
||||
|
||||
@@ -320,7 +326,7 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
|
||||
@receiver(post_delete, sender=InstanceGroup)
|
||||
def on_instance_group_deleted(sender, instance, using, **kwargs):
|
||||
if not instance.is_containerized:
|
||||
if not instance.is_container_group:
|
||||
schedule_policy_task()
|
||||
|
||||
|
||||
|
||||
@@ -1373,6 +1373,7 @@ class PluginFileInjector(object):
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
|
||||
# TODO: delete this method and update unit tests
|
||||
@classmethod
|
||||
def get_proper_name(cls):
|
||||
if cls.plugin_name is None:
|
||||
@@ -1397,13 +1398,12 @@ class PluginFileInjector(object):
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
source_vars = dict(inventory_update.source_vars_dict) # make a copy
|
||||
proper_name = self.get_proper_name()
|
||||
'''
|
||||
None conveys that we should use the user-provided plugin.
|
||||
Note that a plugin value of '' should still be overridden.
|
||||
'''
|
||||
if proper_name is not None:
|
||||
source_vars['plugin'] = proper_name
|
||||
if self.plugin_name is not None:
|
||||
source_vars['plugin'] = self.plugin_name
|
||||
return source_vars
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
@@ -1441,7 +1441,6 @@ class PluginFileInjector(object):
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
return env
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
@@ -1544,7 +1543,7 @@ class openstack(PluginFileInjector):
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cred_data[credential]
|
||||
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_data[credential]))
|
||||
return env
|
||||
|
||||
|
||||
@@ -1574,6 +1573,12 @@ class satellite6(PluginFileInjector):
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# this inventory plugin requires the fully qualified inventory plugin name
|
||||
ret['plugin'] = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return ret
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
|
||||
@@ -284,7 +284,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials',
|
||||
'job_slice_number', 'job_slice_count']
|
||||
'job_slice_number', 'job_slice_count', 'execution_environment']
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -768,11 +768,11 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@property
|
||||
def can_run_containerized(self):
|
||||
return any([ig for ig in self.preferred_instance_groups if ig.is_containerized])
|
||||
return any([ig for ig in self.preferred_instance_groups if ig.is_container_group])
|
||||
|
||||
@property
|
||||
def is_containerized(self):
|
||||
return bool(self.instance_group and self.instance_group.is_containerized)
|
||||
def is_container_group_task(self):
|
||||
return bool(self.instance_group and self.instance_group.is_container_group)
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
@@ -828,6 +828,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
return self.inventory.hosts.only(*only)
|
||||
|
||||
def start_job_fact_cache(self, destination, modification_times, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
os.makedirs(destination, mode=0o700)
|
||||
hosts = self._get_inventory_hosts()
|
||||
if timeout is None:
|
||||
@@ -852,6 +853,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
modification_times[filepath] = os.path.getmtime(filepath)
|
||||
|
||||
def finish_job_fact_cache(self, destination, modification_times):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
@@ -1284,6 +1286,8 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin):
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 5
|
||||
|
||||
@property
|
||||
|
||||
@@ -22,7 +22,7 @@ from awx.main.models.base import prevent_search
|
||||
from awx.main.models.rbac import (
|
||||
Role, RoleAncestorEntry, get_roles_on_resource
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
|
||||
from awx.main.utils.polymorphic import build_polymorphic_ctypes_map
|
||||
from awx.main.fields import JSONField, AskForField
|
||||
@@ -34,7 +34,7 @@ logger = logging.getLogger('awx.main.models.mixins')
|
||||
|
||||
__all__ = ['ResourceMixin', 'SurveyJobTemplateMixin', 'SurveyJobMixin',
|
||||
'TaskManagerUnifiedJobMixin', 'TaskManagerJobMixin', 'TaskManagerProjectUpdateMixin',
|
||||
'TaskManagerInventoryUpdateMixin', 'CustomVirtualEnvMixin']
|
||||
'TaskManagerInventoryUpdateMixin', 'ExecutionEnvironmentMixin', 'CustomVirtualEnvMixin']
|
||||
|
||||
|
||||
class ResourceMixin(models.Model):
|
||||
@@ -441,6 +441,44 @@ class TaskManagerInventoryUpdateMixin(TaskManagerUpdateOnLaunchMixin):
|
||||
abstract = True
|
||||
|
||||
|
||||
class ExecutionEnvironmentMixin(models.Model):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
execution_environment = models.ForeignKey(
|
||||
'ExecutionEnvironment',
|
||||
null=True,
|
||||
blank=True,
|
||||
default=None,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='%(class)ss',
|
||||
help_text=_('The container image to be used for execution.'),
|
||||
)
|
||||
|
||||
def get_execution_environment_default(self):
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
|
||||
if settings.DEFAULT_EXECUTION_ENVIRONMENT is not None:
|
||||
return settings.DEFAULT_EXECUTION_ENVIRONMENT
|
||||
return ExecutionEnvironment.objects.filter(organization=None, managed_by_tower=True).first()
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Return the execution environment that should be used when creating a new job.
|
||||
"""
|
||||
if self.execution_environment is not None:
|
||||
return self.execution_environment
|
||||
if getattr(self, 'project_id', None) and self.project.default_environment is not None:
|
||||
return self.project.default_environment
|
||||
if getattr(self, 'organization', None) and self.organization.default_environment is not None:
|
||||
return self.organization.default_environment
|
||||
if getattr(self, 'inventory', None) and self.inventory.organization is not None:
|
||||
if self.inventory.organization.default_environment is not None:
|
||||
return self.inventory.organization.default_environment
|
||||
|
||||
return self.get_execution_environment_default()
|
||||
|
||||
|
||||
class CustomVirtualEnvMixin(models.Model):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
@@ -280,6 +280,7 @@ class JobNotificationMixin(object):
|
||||
{'unified_job_template': ['id', 'name', 'description', 'unified_job_type']},
|
||||
{'instance_group': ['name', 'id']},
|
||||
{'created_by': ['id', 'username', 'first_name', 'last_name']},
|
||||
{'schedule': ['id', 'name', 'description', 'next_run']},
|
||||
{'labels': ['count', 'results']}]}]
|
||||
|
||||
@classmethod
|
||||
@@ -344,6 +345,10 @@ class JobNotificationMixin(object):
|
||||
'name': 'Stub project',
|
||||
'scm_type': 'git',
|
||||
'status': 'successful'},
|
||||
'schedule': {'description': 'Sample schedule',
|
||||
'id': 42,
|
||||
'name': 'Stub schedule',
|
||||
'next_run': datetime.datetime(2038, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc)},
|
||||
'unified_job_template': {'description': 'Sample unified job template description',
|
||||
'id': 39,
|
||||
'name': 'Stub Job Template',
|
||||
@@ -383,12 +388,17 @@ class JobNotificationMixin(object):
|
||||
and a url to the job run."""
|
||||
job_context = {'host_status_counts': {}}
|
||||
summary = None
|
||||
if hasattr(self, 'job_host_summaries'):
|
||||
summary = self.job_host_summaries.first()
|
||||
if summary:
|
||||
from awx.api.serializers import JobHostSummarySerializer
|
||||
summary_data = JobHostSummarySerializer(summary).to_representation(summary)
|
||||
job_context['host_status_counts'] = summary_data
|
||||
try:
|
||||
has_event_property = any([f for f in self.event_class._meta.fields if f.name == 'event'])
|
||||
except NotImplementedError:
|
||||
has_event_property = False
|
||||
if has_event_property:
|
||||
qs = self.get_event_queryset()
|
||||
if qs:
|
||||
event = qs.only('event_data').filter(event='playbook_on_stats').first()
|
||||
if event:
|
||||
summary = event.get_host_status_counts()
|
||||
job_context['host_status_counts'] = summary
|
||||
context = {
|
||||
'job': job_context,
|
||||
'job_friendly_name': self.get_notification_friendly_name(),
|
||||
|
||||
@@ -61,6 +61,15 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
blank=True,
|
||||
related_name='%(class)s_notification_templates_for_approvals'
|
||||
)
|
||||
default_environment = models.ForeignKey(
|
||||
'ExecutionEnvironment',
|
||||
null=True,
|
||||
blank=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='+',
|
||||
help_text=_('The default execution environment for jobs run by this organization.'),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role='singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
@@ -86,6 +95,9 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
job_template_admin_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
)
|
||||
execution_environment_admin_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
)
|
||||
auditor_role = ImplicitRoleField(
|
||||
parent_role='singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
@@ -97,7 +109,8 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
'execute_role', 'project_admin_role',
|
||||
'inventory_admin_role', 'workflow_admin_role',
|
||||
'notification_admin_role', 'credential_admin_role',
|
||||
'job_template_admin_role', 'approval_role',],
|
||||
'job_template_admin_role', 'approval_role',
|
||||
'execution_environment_admin_role',],
|
||||
)
|
||||
approval_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
|
||||
@@ -35,7 +35,7 @@ from awx.main.models.mixins import (
|
||||
CustomVirtualEnvMixin,
|
||||
RelatedJobsMixin
|
||||
)
|
||||
from awx.main.utils import update_scm_url
|
||||
from awx.main.utils import update_scm_url, polymorphic
|
||||
from awx.main.utils.ansible import skip_directory, could_be_inventory, could_be_playbook
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.models.rbac import (
|
||||
@@ -187,6 +187,14 @@ class ProjectOptions(models.Model):
|
||||
pass
|
||||
return cred
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the default execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
return self.get_execution_environment_default()
|
||||
|
||||
def get_project_path(self, check_if_exists=True):
|
||||
local_path = os.path.basename(self.local_path)
|
||||
if local_path and not local_path.startswith('.'):
|
||||
@@ -259,6 +267,15 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
app_label = 'main'
|
||||
ordering = ('id',)
|
||||
|
||||
default_environment = models.ForeignKey(
|
||||
'ExecutionEnvironment',
|
||||
null=True,
|
||||
blank=True,
|
||||
default=None,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
related_name='+',
|
||||
help_text=_('The default execution environment for jobs run using this project.'),
|
||||
)
|
||||
scm_update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Update the project when a job is launched that uses the project.'),
|
||||
@@ -554,6 +571,8 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
|
||||
@property
|
||||
def task_impact(self):
|
||||
if settings.IS_K8S:
|
||||
return 0
|
||||
return 0 if self.job_type == 'run' else 1
|
||||
|
||||
@property
|
||||
|
||||
@@ -40,6 +40,7 @@ role_names = {
|
||||
'inventory_admin_role': _('Inventory Admin'),
|
||||
'credential_admin_role': _('Credential Admin'),
|
||||
'job_template_admin_role': _('Job Template Admin'),
|
||||
'execution_environment_admin_role': _('Execution Environment Admin'),
|
||||
'workflow_admin_role': _('Workflow Admin'),
|
||||
'notification_admin_role': _('Notification Admin'),
|
||||
'auditor_role': _('Auditor'),
|
||||
@@ -60,6 +61,7 @@ role_descriptions = {
|
||||
'inventory_admin_role': _('Can manage all inventories of the %s'),
|
||||
'credential_admin_role': _('Can manage all credentials of the %s'),
|
||||
'job_template_admin_role': _('Can manage all job templates of the %s'),
|
||||
'execution_environment_admin_role': _('Can manage all execution environments of the %s'),
|
||||
'workflow_admin_role': _('Can manage all workflows of the %s'),
|
||||
'notification_admin_role': _('Can manage all notifications of the %s'),
|
||||
'auditor_role': _('Can view all aspects of the %s'),
|
||||
|
||||
@@ -39,7 +39,7 @@ from awx.main.models.base import (
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore, get_model_for_type,
|
||||
encrypt_dict, decrypt_field, _inventory_updates,
|
||||
@@ -50,16 +50,16 @@ from awx.main.utils import (
|
||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.fields import JSONField, AskForField, OrderedManyToManyField
|
||||
from awx.main.fields import JSONField, JSONBField, AskForField, OrderedManyToManyField
|
||||
|
||||
__all__ = ['UnifiedJobTemplate', 'UnifiedJob', 'StdoutMaxBytesExceeded']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.unified_jobs')
|
||||
|
||||
logger_job_lifecycle = logging.getLogger('awx.analytics.job_lifecycle')
|
||||
# NOTE: ACTIVE_STATES moved to constants because it is used by parent modules
|
||||
|
||||
|
||||
class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, NotificationFieldsModel):
|
||||
class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEnvironmentMixin, NotificationFieldsModel):
|
||||
'''
|
||||
Concrete base class for unified job templates.
|
||||
'''
|
||||
@@ -376,6 +376,8 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
unified_job.execution_environment = self.resolve_execution_environment()
|
||||
|
||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
@@ -420,7 +422,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
# have been associated to the UJ
|
||||
if unified_job.__class__ in activity_stream_registrar.models:
|
||||
activity_stream_create(None, unified_job, True)
|
||||
|
||||
unified_job.log_lifecycle("created")
|
||||
return unified_job
|
||||
|
||||
@classmethod
|
||||
@@ -527,7 +529,7 @@ class StdoutMaxBytesExceeded(Exception):
|
||||
|
||||
|
||||
class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique,
|
||||
UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin):
|
||||
UnifiedJobTypeStringMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin):
|
||||
'''
|
||||
Concrete base class for unified job run by the task engine.
|
||||
'''
|
||||
@@ -720,6 +722,19 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
'Credential',
|
||||
related_name='%(class)ss',
|
||||
)
|
||||
installed_collections = JSONBField(
|
||||
blank=True,
|
||||
default=dict,
|
||||
editable=False,
|
||||
help_text=_("The Collections names and versions installed in the execution environment."),
|
||||
)
|
||||
ansible_version = models.CharField(
|
||||
max_length=255,
|
||||
blank=True,
|
||||
default='',
|
||||
editable=False,
|
||||
help_text=_("The version of Ansible Core installed in the execution environment."),
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
RealClass = self.get_real_instance_class()
|
||||
@@ -862,7 +877,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.unified_job_template = self._get_parent_instance()
|
||||
if 'unified_job_template' not in update_fields:
|
||||
update_fields.append('unified_job_template')
|
||||
|
||||
|
||||
if self.cancel_flag and not self.canceled_on:
|
||||
# Record the 'canceled' time.
|
||||
self.canceled_on = now()
|
||||
@@ -1010,6 +1025,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
event_qs = self.get_event_queryset()
|
||||
except NotImplementedError:
|
||||
return True # Model without events, such as WFJT
|
||||
self.log_lifecycle("event_processing_finished")
|
||||
return self.emitted_events == event_qs.count()
|
||||
|
||||
def result_stdout_raw_handle(self, enforce_max_bytes=True):
|
||||
@@ -1318,6 +1334,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
if 'extra_vars' in kwargs:
|
||||
self.handle_extra_data(kwargs['extra_vars'])
|
||||
|
||||
# remove any job_explanations that may have been set while job was in pending
|
||||
if self.job_explanation != "":
|
||||
self.job_explanation = ""
|
||||
|
||||
return (True, opts)
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
@@ -1448,6 +1468,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
for name in ('awx', 'tower'):
|
||||
r['{}_workflow_job_id'.format(name)] = wj.pk
|
||||
r['{}_workflow_job_name'.format(name)] = wj.name
|
||||
r['{}_workflow_job_launch_type'.format(name)] = wj.launch_type
|
||||
if schedule:
|
||||
r['{}_parent_job_schedule_id'.format(name)] = schedule.pk
|
||||
r['{}_parent_job_schedule_name'.format(name)] = schedule.name
|
||||
@@ -1482,5 +1503,19 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
return bool(self.controller_node)
|
||||
|
||||
@property
|
||||
def is_containerized(self):
|
||||
def is_container_group_task(self):
|
||||
return False
|
||||
|
||||
def log_lifecycle(self, state, blocked_by=None):
|
||||
extra={'type': self._meta.model_name,
|
||||
'task_id': self.id,
|
||||
'state': state}
|
||||
if self.unified_job_template:
|
||||
extra["template_name"] = self.unified_job_template.name
|
||||
if state == "blocked" and blocked_by:
|
||||
blocked_by_msg = f"{blocked_by._meta.model_name}-{blocked_by.id}"
|
||||
msg = f"{self._meta.model_name}-{self.id} blocked by {blocked_by_msg}"
|
||||
extra["blocked_by"] = blocked_by_msg
|
||||
else:
|
||||
msg = f"{self._meta.model_name}-{self.id} {state.replace('_', ' ')}"
|
||||
logger_job_lifecycle.debug(msg, extra=extra)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
ProjectUpdate,
|
||||
@@ -20,119 +18,110 @@ class DependencyGraph(object):
|
||||
INVENTORY_SOURCE_UPDATES = 'inventory_source_updates'
|
||||
WORKFLOW_JOB_TEMPLATES_JOBS = 'workflow_job_template_jobs'
|
||||
|
||||
LATEST_PROJECT_UPDATES = 'latest_project_updates'
|
||||
LATEST_INVENTORY_UPDATES = 'latest_inventory_updates'
|
||||
|
||||
INVENTORY_SOURCES = 'inventory_source_ids'
|
||||
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
def __init__(self):
|
||||
self.data = {}
|
||||
# project_id -> True / False
|
||||
self.data[self.PROJECT_UPDATES] = {}
|
||||
# inventory_id -> True / False
|
||||
# The reason for tracking both inventory and inventory sources:
|
||||
# Consider InvA, which has two sources, InvSource1, InvSource2.
|
||||
# JobB might depend on InvA, which launches two updates, one for each source.
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# INVENTORY_UPDATES, instead of having to check for both entries in
|
||||
# INVENTORY_SOURCE_UPDATES.
|
||||
self.data[self.INVENTORY_UPDATES] = {}
|
||||
# job_template_id -> True / False
|
||||
self.data[self.JOB_TEMPLATE_JOBS] = {}
|
||||
|
||||
'''
|
||||
Track runnable job related project and inventory to ensure updates
|
||||
don't run while a job needing those resources is running.
|
||||
'''
|
||||
|
||||
# inventory_source_id -> True / False
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES] = {}
|
||||
# True / False
|
||||
self.data[self.SYSTEM_JOB] = True
|
||||
# workflow_job_template_id -> True / False
|
||||
self.data[self.JOB_TEMPLATE_JOBS] = {}
|
||||
self.data[self.SYSTEM_JOB] = {}
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}
|
||||
|
||||
# project_id -> latest ProjectUpdateLatestDict'
|
||||
self.data[self.LATEST_PROJECT_UPDATES] = {}
|
||||
# inventory_source_id -> latest InventoryUpdateLatestDict
|
||||
self.data[self.LATEST_INVENTORY_UPDATES] = {}
|
||||
def mark_if_no_key(self, job_type, id, job):
|
||||
# only mark first occurrence of a task. If 10 of JobA are launched
|
||||
# (concurrent disabled), the dependency graph should return that jobs
|
||||
# 2 through 10 are blocked by job1
|
||||
if id not in self.data[job_type]:
|
||||
self.data[job_type][id] = job
|
||||
|
||||
# inventory_id -> [inventory_source_ids]
|
||||
self.data[self.INVENTORY_SOURCES] = {}
|
||||
def get_item(self, job_type, id):
|
||||
return self.data[job_type].get(id, None)
|
||||
|
||||
def add_latest_project_update(self, job):
|
||||
self.data[self.LATEST_PROJECT_UPDATES][job.project_id] = job
|
||||
|
||||
def get_now(self):
|
||||
return tz_now()
|
||||
|
||||
def mark_system_job(self):
|
||||
self.data[self.SYSTEM_JOB] = False
|
||||
def mark_system_job(self, job):
|
||||
# Don't track different types of system jobs, so that only one can run
|
||||
# at a time. Therefore id in this case is just 'system_job'.
|
||||
self.mark_if_no_key(self.SYSTEM_JOB, 'system_job', job)
|
||||
|
||||
def mark_project_update(self, job):
|
||||
self.data[self.PROJECT_UPDATES][job.project_id] = False
|
||||
self.mark_if_no_key(self.PROJECT_UPDATES, job.project_id, job)
|
||||
|
||||
def mark_inventory_update(self, inventory_id):
|
||||
self.data[self.INVENTORY_UPDATES][inventory_id] = False
|
||||
def mark_inventory_update(self, job):
|
||||
if type(job) is AdHocCommand:
|
||||
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_id, job)
|
||||
else:
|
||||
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_source.inventory_id, job)
|
||||
|
||||
def mark_inventory_source_update(self, inventory_source_id):
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False
|
||||
def mark_inventory_source_update(self, job):
|
||||
self.mark_if_no_key(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id, job)
|
||||
|
||||
def mark_job_template_job(self, job):
|
||||
self.data[self.JOB_TEMPLATE_JOBS][job.job_template_id] = False
|
||||
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)
|
||||
|
||||
def mark_workflow_job(self, job):
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job.workflow_job_template_id] = False
|
||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
||||
|
||||
def can_project_update_run(self, job):
|
||||
return self.data[self.PROJECT_UPDATES].get(job.project_id, True)
|
||||
def project_update_blocked_by(self, job):
|
||||
return self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||
|
||||
def can_inventory_update_run(self, job):
|
||||
return self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
def inventory_update_blocked_by(self, job):
|
||||
return self.get_item(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id)
|
||||
|
||||
def can_job_run(self, job):
|
||||
if self.data[self.PROJECT_UPDATES].get(job.project_id, True) is True and \
|
||||
self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True) is True:
|
||||
if job.allow_simultaneous is False:
|
||||
return self.data[self.JOB_TEMPLATE_JOBS].get(job.job_template_id, True)
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
def job_blocked_by(self, job):
|
||||
project_block = self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||
inventory_block = self.get_item(self.INVENTORY_UPDATES, job.inventory_id)
|
||||
if job.allow_simultaneous is False:
|
||||
job_block = self.get_item(self.JOB_TEMPLATE_JOBS, job.job_template_id)
|
||||
else:
|
||||
job_block = None
|
||||
return project_block or inventory_block or job_block
|
||||
|
||||
def can_workflow_job_run(self, job):
|
||||
if job.allow_simultaneous:
|
||||
return True
|
||||
return self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS].get(job.workflow_job_template_id, True)
|
||||
def workflow_job_blocked_by(self, job):
|
||||
if job.allow_simultaneous is False:
|
||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
||||
return None
|
||||
|
||||
def can_system_job_run(self):
|
||||
return self.data[self.SYSTEM_JOB]
|
||||
def system_job_blocked_by(self, job):
|
||||
return self.get_item(self.SYSTEM_JOB, 'system_job')
|
||||
|
||||
def can_ad_hoc_command_run(self, job):
|
||||
return self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True)
|
||||
def ad_hoc_command_blocked_by(self, job):
|
||||
return self.get_item(self.INVENTORY_UPDATES, job.inventory_id)
|
||||
|
||||
def is_job_blocked(self, job):
|
||||
def task_blocked_by(self, job):
|
||||
if type(job) is ProjectUpdate:
|
||||
return not self.can_project_update_run(job)
|
||||
return self.project_update_blocked_by(job)
|
||||
elif type(job) is InventoryUpdate:
|
||||
return not self.can_inventory_update_run(job)
|
||||
return self.inventory_update_blocked_by(job)
|
||||
elif type(job) is Job:
|
||||
return not self.can_job_run(job)
|
||||
return self.job_blocked_by(job)
|
||||
elif type(job) is SystemJob:
|
||||
return not self.can_system_job_run()
|
||||
return self.system_job_blocked_by(job)
|
||||
elif type(job) is AdHocCommand:
|
||||
return not self.can_ad_hoc_command_run(job)
|
||||
return self.ad_hoc_command_blocked_by(job)
|
||||
elif type(job) is WorkflowJob:
|
||||
return not self.can_workflow_job_run(job)
|
||||
return self.workflow_job_blocked_by(job)
|
||||
|
||||
def add_job(self, job):
|
||||
if type(job) is ProjectUpdate:
|
||||
self.mark_project_update(job)
|
||||
elif type(job) is InventoryUpdate:
|
||||
self.mark_inventory_update(job.inventory_source.inventory_id)
|
||||
self.mark_inventory_source_update(job.inventory_source_id)
|
||||
self.mark_inventory_update(job)
|
||||
self.mark_inventory_source_update(job)
|
||||
elif type(job) is Job:
|
||||
self.mark_job_template_job(job)
|
||||
elif type(job) is WorkflowJob:
|
||||
self.mark_workflow_job(job)
|
||||
elif type(job) is SystemJob:
|
||||
self.mark_system_job()
|
||||
self.mark_system_job(job)
|
||||
elif type(job) is AdHocCommand:
|
||||
self.mark_inventory_update(job.inventory_id)
|
||||
self.mark_inventory_update(job)
|
||||
|
||||
def add_jobs(self, jobs):
|
||||
for j in jobs:
|
||||
|
||||
@@ -64,11 +64,13 @@ class TaskManager():
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
|
||||
def after_lock_init(self):
|
||||
'''
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
'''
|
||||
instances = Instance.objects.filter(~Q(hostname=None), capacity__gt=0, enabled=True)
|
||||
instances = Instance.objects.filter(~Q(hostname=None), enabled=True)
|
||||
self.real_instances = {i.hostname: i for i in instances}
|
||||
|
||||
instances_partial = [SimpleNamespace(obj=instance,
|
||||
@@ -80,26 +82,29 @@ class TaskManager():
|
||||
instances_by_hostname = {i.hostname: i for i in instances_partial}
|
||||
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(),
|
||||
capacity_total=rampart_group.capacity,
|
||||
consumed_capacity=0,
|
||||
instances=[])
|
||||
for instance in rampart_group.instances.filter(capacity__gt=0, enabled=True).order_by('hostname'):
|
||||
for instance in rampart_group.instances.filter(enabled=True).order_by('hostname'):
|
||||
if instance.hostname in instances_by_hostname:
|
||||
self.graph[rampart_group.name]['instances'].append(instances_by_hostname[instance.hostname])
|
||||
|
||||
def is_job_blocked(self, task):
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
for g in self.graph:
|
||||
if self.graph[g]['graph'].is_job_blocked(task):
|
||||
return True
|
||||
blocked_by = self.graph[g]['graph'].task_blocked_by(task)
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
return True
|
||||
blocked_by = task.dependent_jobs.first()
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
return False
|
||||
return None
|
||||
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
@@ -278,12 +283,12 @@ class TaskManager():
|
||||
task.controller_node = controller_node
|
||||
logger.debug('Submitting isolated {} to queue {} controlled by {}.'.format(
|
||||
task.log_format, task.execution_node, controller_node))
|
||||
elif rampart_group.is_containerized:
|
||||
elif rampart_group.is_container_group:
|
||||
# find one real, non-containerized instance with capacity to
|
||||
# act as the controller for k8s API interaction
|
||||
match = None
|
||||
for group in InstanceGroup.objects.all():
|
||||
if group.is_containerized or group.controller_id:
|
||||
if group.is_container_group or group.controller_id:
|
||||
continue
|
||||
match = group.fit_task_to_most_remaining_capacity_instance(task, group.instances.all())
|
||||
if match:
|
||||
@@ -312,6 +317,7 @@ class TaskManager():
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
task.log_lifecycle("waiting")
|
||||
|
||||
if rampart_group is not None:
|
||||
self.consume_capacity(task, rampart_group.name)
|
||||
@@ -450,6 +456,7 @@ class TaskManager():
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
task.log_lifecycle("acknowledged")
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
@@ -489,11 +496,18 @@ class TaskManager():
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
tasks_to_update_job_explanation = []
|
||||
for task in pending_tasks:
|
||||
if self.start_task_limit <= 0:
|
||||
break
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
@@ -507,14 +521,17 @@ class TaskManager():
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if task.can_run_containerized and rampart_group.is_containerized:
|
||||
if task.can_run_containerized and rampart_group.is_container_group:
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain(), None)
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if not rampart_group.is_containerized and self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
if (
|
||||
task.task_impact > 0 and # project updates have a cost of zero
|
||||
not rampart_group.is_container_group and
|
||||
self.get_remaining_capacity(rampart_group.name) <= 0):
|
||||
logger.debug("Skipping group {}, remaining_capacity {} <= 0".format(
|
||||
rampart_group.name, remaining_capacity))
|
||||
continue
|
||||
@@ -522,8 +539,8 @@ class TaskManager():
|
||||
execution_instance = InstanceGroup.fit_task_to_most_remaining_capacity_instance(task, self.graph[rampart_group.name]['instances']) or \
|
||||
InstanceGroup.find_largest_idle_instance(self.graph[rampart_group.name]['instances'])
|
||||
|
||||
if execution_instance or rampart_group.is_containerized:
|
||||
if not rampart_group.is_containerized:
|
||||
if execution_instance or rampart_group.is_container_group:
|
||||
if not rampart_group.is_container_group:
|
||||
execution_instance.remaining_capacity = max(0, execution_instance.remaining_capacity - task.task_impact)
|
||||
execution_instance.jobs_running += 1
|
||||
logger.debug("Starting {} in group {} instance {} (remaining_capacity={})".format(
|
||||
@@ -539,7 +556,17 @@ class TaskManager():
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
task.log_lifecycle("needs_capacity")
|
||||
job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
# Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
|
||||
# Therefore we should only update the job_explanation after some time has elapsed to
|
||||
# prevent excessive task saves.
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation, ['job_explanation'])
|
||||
|
||||
def timeout_approval_node(self):
|
||||
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
|
||||
@@ -570,7 +597,7 @@ class TaskManager():
|
||||
).exclude(
|
||||
execution_node__in=Instance.objects.values_list('hostname', flat=True)
|
||||
):
|
||||
if j.execution_node and not j.is_containerized:
|
||||
if j.execution_node and not j.is_container_group_task:
|
||||
logger.error(f'{j.execution_node} is not a registered instance; reaping {j.log_format}')
|
||||
reap_job(j, 'failed')
|
||||
|
||||
|
||||
@@ -368,6 +368,7 @@ def model_serializer_mapping():
|
||||
models.Credential: serializers.CredentialSerializer,
|
||||
models.Team: serializers.TeamSerializer,
|
||||
models.Project: serializers.ProjectSerializer,
|
||||
models.ExecutionEnvironment: serializers.ExecutionEnvironmentSerializer,
|
||||
models.JobTemplate: serializers.JobTemplateWithSpecSerializer,
|
||||
models.Job: serializers.JobSerializer,
|
||||
models.AdHocCommand: serializers.AdHocCommandSerializer,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,4 +8,5 @@ clouds:
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: true
|
||||
region_name: fooo
|
||||
verify: false
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
{
|
||||
"ansible_all_ipv4_addresses": [
|
||||
"172.17.0.7"
|
||||
],
|
||||
"ansible_all_ipv6_addresses": [
|
||||
"fe80::42:acff:fe11:7"
|
||||
],
|
||||
"ansible_architecture": "x86_64",
|
||||
"ansible_bios_date": "12/01/2006",
|
||||
"ansible_bios_version": "VirtualBox",
|
||||
"ansible_cmdline": {
|
||||
"BOOT_IMAGE": "/boot/vmlinuz64",
|
||||
"base": true,
|
||||
"console": "tty0",
|
||||
"initrd": "/boot/initrd.img",
|
||||
"loglevel": "3",
|
||||
"noembed": true,
|
||||
"nomodeset": true,
|
||||
"norestore": true,
|
||||
"user": "docker",
|
||||
"waitusb": "10:LABEL=boot2docker-data"
|
||||
},
|
||||
"ansible_date_time": {
|
||||
"date": "2016-02-02",
|
||||
"day": "02",
|
||||
"epoch": "1454424257",
|
||||
"hour": "14",
|
||||
"iso8601": "2016-02-02T14:44:17Z",
|
||||
"iso8601_basic": "20160202T144417348424",
|
||||
"iso8601_basic_short": "20160202T144417",
|
||||
"iso8601_micro": "2016-02-02T14:44:17.348496Z",
|
||||
"minute": "44",
|
||||
"month": "02",
|
||||
"second": "17",
|
||||
"time": "14:44:17",
|
||||
"tz": "UTC",
|
||||
"tz_offset": "+0000",
|
||||
"weekday": "Tuesday",
|
||||
"weekday_number": "2",
|
||||
"weeknumber": "05",
|
||||
"year": "2016"
|
||||
},
|
||||
"ansible_default_ipv4": {
|
||||
"address": "172.17.0.7",
|
||||
"alias": "eth0",
|
||||
"broadcast": "global",
|
||||
"gateway": "172.17.0.1",
|
||||
"interface": "eth0",
|
||||
"macaddress": "02:42:ac:11:00:07",
|
||||
"mtu": 1500,
|
||||
"netmask": "255.255.0.0",
|
||||
"network": "172.17.0.0",
|
||||
"type": "ether"
|
||||
},
|
||||
"ansible_default_ipv6": {},
|
||||
"ansible_devices": {
|
||||
"sda": {
|
||||
"holders": [],
|
||||
"host": "",
|
||||
"model": "VBOX HARDDISK",
|
||||
"partitions": {
|
||||
"sda1": {
|
||||
"sectors": "510015555",
|
||||
"sectorsize": 512,
|
||||
"size": "243.19 GB",
|
||||
"start": "1975995"
|
||||
},
|
||||
"sda2": {
|
||||
"sectors": "1975932",
|
||||
"sectorsize": 512,
|
||||
"size": "964.81 MB",
|
||||
"start": "63"
|
||||
}
|
||||
},
|
||||
"removable": "0",
|
||||
"rotational": "0",
|
||||
"scheduler_mode": "deadline",
|
||||
"sectors": "512000000",
|
||||
"sectorsize": "512",
|
||||
"size": "244.14 GB",
|
||||
"support_discard": "0",
|
||||
"vendor": "ATA"
|
||||
},
|
||||
"sr0": {
|
||||
"holders": [],
|
||||
"host": "",
|
||||
"model": "CD-ROM",
|
||||
"partitions": {},
|
||||
"removable": "1",
|
||||
"rotational": "1",
|
||||
"scheduler_mode": "deadline",
|
||||
"sectors": "61440",
|
||||
"sectorsize": "2048",
|
||||
"size": "120.00 MB",
|
||||
"support_discard": "0",
|
||||
"vendor": "VBOX"
|
||||
}
|
||||
},
|
||||
"ansible_distribution": "Ubuntu",
|
||||
"ansible_distribution_major_version": "14",
|
||||
"ansible_distribution_release": "trusty",
|
||||
"ansible_distribution_version": "14.04",
|
||||
"ansible_dns": {
|
||||
"nameservers": [
|
||||
"8.8.8.8"
|
||||
]
|
||||
},
|
||||
"ansible_domain": "",
|
||||
"ansible_env": {
|
||||
"HOME": "/root",
|
||||
"HOSTNAME": "ede894599989",
|
||||
"LANG": "en_US.UTF-8",
|
||||
"LC_ALL": "en_US.UTF-8",
|
||||
"LC_MESSAGES": "en_US.UTF-8",
|
||||
"LESSCLOSE": "/usr/bin/lesspipe %s %s",
|
||||
"LESSOPEN": "| /usr/bin/lesspipe %s",
|
||||
"LS_COLORS": "",
|
||||
"OLDPWD": "/ansible",
|
||||
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"PWD": "/ansible/examples",
|
||||
"SHLVL": "1",
|
||||
"_": "/usr/local/bin/ansible",
|
||||
"container": "docker"
|
||||
},
|
||||
"ansible_eth0": {
|
||||
"active": true,
|
||||
"device": "eth0",
|
||||
"ipv4": {
|
||||
"address": "172.17.0.7",
|
||||
"broadcast": "global",
|
||||
"netmask": "255.255.0.0",
|
||||
"network": "172.17.0.0"
|
||||
},
|
||||
"ipv6": [
|
||||
{
|
||||
"address": "fe80::42:acff:fe11:7",
|
||||
"prefix": "64",
|
||||
"scope": "link"
|
||||
}
|
||||
],
|
||||
"macaddress": "02:42:ac:11:00:07",
|
||||
"mtu": 1500,
|
||||
"promisc": false,
|
||||
"type": "ether"
|
||||
},
|
||||
"ansible_fips": false,
|
||||
"ansible_form_factor": "Other",
|
||||
"ansible_fqdn": "ede894599989",
|
||||
"ansible_hostname": "ede894599989",
|
||||
"ansible_interfaces": [
|
||||
"lo",
|
||||
"eth0"
|
||||
],
|
||||
"ansible_kernel": "4.1.12-boot2docker",
|
||||
"ansible_lo": {
|
||||
"active": true,
|
||||
"device": "lo",
|
||||
"ipv4": {
|
||||
"address": "127.0.0.1",
|
||||
"broadcast": "host",
|
||||
"netmask": "255.0.0.0",
|
||||
"network": "127.0.0.0"
|
||||
},
|
||||
"ipv6": [
|
||||
{
|
||||
"address": "::1",
|
||||
"prefix": "128",
|
||||
"scope": "host"
|
||||
}
|
||||
],
|
||||
"mtu": 65536,
|
||||
"promisc": false,
|
||||
"type": "loopback"
|
||||
},
|
||||
"ansible_lsb": {
|
||||
"codename": "trusty",
|
||||
"description": "Ubuntu 14.04.3 LTS",
|
||||
"id": "Ubuntu",
|
||||
"major_release": "14",
|
||||
"release": "14.04"
|
||||
},
|
||||
"ansible_machine": "x86_64",
|
||||
"ansible_memfree_mb": 3746,
|
||||
"ansible_memory_mb": {
|
||||
"nocache": {
|
||||
"free": 8896,
|
||||
"used": 3638
|
||||
},
|
||||
"real": {
|
||||
"free": 3746,
|
||||
"total": 12534,
|
||||
"used": 8788
|
||||
},
|
||||
"swap": {
|
||||
"cached": 0,
|
||||
"free": 4048,
|
||||
"total": 4048,
|
||||
"used": 0
|
||||
}
|
||||
},
|
||||
"ansible_memtotal_mb": 12534,
|
||||
"ansible_mounts": [
|
||||
{
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"mount": "/etc/resolv.conf",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 201281392640,
|
||||
"size_total": 256895700992,
|
||||
"uuid": "NA"
|
||||
},
|
||||
{
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"mount": "/etc/hostname",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 201281392640,
|
||||
"size_total": 256895700992,
|
||||
"uuid": "NA"
|
||||
},
|
||||
{
|
||||
"device": "/dev/sda1",
|
||||
"fstype": "ext4",
|
||||
"mount": "/etc/hosts",
|
||||
"options": "rw,relatime,data=ordered",
|
||||
"size_available": 201281392640,
|
||||
"size_total": 256895700992,
|
||||
"uuid": "NA"
|
||||
}
|
||||
],
|
||||
"ansible_nodename": "ede894599989",
|
||||
"ansible_os_family": "Debian",
|
||||
"ansible_pkg_mgr": "apt",
|
||||
"ansible_processor": [
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz",
|
||||
"GenuineIntel",
|
||||
"Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz"
|
||||
],
|
||||
"ansible_processor_cores": 8,
|
||||
"ansible_processor_count": 1,
|
||||
"ansible_processor_threads_per_core": 1,
|
||||
"ansible_processor_vcpus": 8,
|
||||
"ansible_product_name": "VirtualBox",
|
||||
"ansible_product_serial": "0",
|
||||
"ansible_product_uuid": "25C5EA5A-1DF1-48D9-A2C6-81227DA153C0",
|
||||
"ansible_product_version": "1.2",
|
||||
"ansible_python_version": "2.7.6",
|
||||
"ansible_selinux": false,
|
||||
"ansible_service_mgr": "upstart",
|
||||
"ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBALF0xsM8UMXgSKiWNw4t19wxbxLnxQX742t/dIM0O8YLx+/lIP+Q69Dv5uoVt0zKV39eFziRlCh96qj2KYkGEJ6XfVZFnhpculL2Pv2CPpSwKuQ1vTbDO/xxUrvY+bHpfNJf9Rh69bFEE2pTsjomFPCgp8M0qGaFtwg6czSaeBONAAAAFQCGEfVtj97JiexTVRqgQITYlFp/eQAAAIEAg+S9qWn+AIb3amwVoLL/usQYOPCmZY9RVPzpkjJ6OG+HI4B7cXeauPtNTJwT0f9vGEqzf4mPpmS+aCShj6iwdmJ+cOwR5+SJlNalab3CMBoXKVLbT1J2XWFlK0szKKnoReP96IDbkAkGQ3fkm4jz0z6Wy0u6wOQVNcd4G5cwLZ4AAACAFvBm+H1LwNrwWBjWio+ayhglZ4Y25mLMEn2+dqBz0gLK5szEbft1HMPOWIVHvl6vi3v34pAJHKpxXpkLlNliTn8iw9BzCOrgP4V8sp2/85mxEuCdI1w/QERj9cHu5iS2pZ0cUwDE3pfuuGBB3IEliaJyaapowdrM8lN12jQl11E=",
|
||||
"ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHiYp4e9RfXpxDcEWpK4EuXPHW9++xcFI9hiB0TYAZgxEF9RIgwfucpPawFk7HIFoNc7EXQMlryilLSbg155KWM=",
|
||||
"ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAILclD2JaC654azEsAfcHRIOA2Ig9/Qk6MX80i/VCEdSH",
|
||||
"ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDeSUGxZaZsgBsezld0mj3HcbAwx6aykGnejceBjcs6lVwSGMHevofzSXIQDPYBhZoyWNl0PYAHv6AsQ8+3khd2SitUMJAuHSz1ZjgHCCGQP9ijXTKHn+lWCKA8rhLG/dwYwiouoOPZfn1G+erbKO6XiVbELrrf2RadnMGuMinESIOKVj3IunXsaGRMsDOQferOnUf7MvH7xpQnoySyQ1+p4rGruaohWG+Y2cDo7+B2FylPVbrpRDDJkfbt4J96WHx0KOdD0qzOicQP8JqDflqQPJJCWcgrvjQOSe4gXdPB6GZDtBl2qgQRwt1IgizPMm+b7Bwbd2VDe1TeWV2gT/7H",
|
||||
"ansible_swapfree_mb": 4048,
|
||||
"ansible_swaptotal_mb": 4048,
|
||||
"ansible_system": "Linux",
|
||||
"ansible_system_vendor": "innotek GmbH",
|
||||
"ansible_uptime_seconds": 178398,
|
||||
"ansible_user_dir": "/root",
|
||||
"ansible_user_gecos": "root",
|
||||
"ansible_user_gid": 0,
|
||||
"ansible_user_id": "root",
|
||||
"ansible_user_shell": "/bin/bash",
|
||||
"ansible_user_uid": 0,
|
||||
"ansible_userspace_architecture": "x86_64",
|
||||
"ansible_userspace_bits": "64",
|
||||
"ansible_virtualization_role": "guest",
|
||||
"ansible_virtualization_type": "docker",
|
||||
"module_setup": true
|
||||
}
|
||||
@@ -49,6 +49,7 @@ def isolated_instance_group(instance_group, instance):
|
||||
def containerized_instance_group(instance_group, kube_credential):
|
||||
ig = InstanceGroup(name="container")
|
||||
ig.credential = kube_credential
|
||||
ig.is_container_group = True
|
||||
ig.save()
|
||||
return ig
|
||||
|
||||
@@ -255,7 +256,7 @@ def test_instance_group_update_fields(patch, instance, instance_group, admin, co
|
||||
# policy_instance_ variables can only be updated in instance groups that are NOT containerized
|
||||
# instance group (not containerized)
|
||||
ig_url = reverse("api:instance_group_detail", kwargs={'pk': instance_group.pk})
|
||||
assert not instance_group.is_containerized
|
||||
assert not instance_group.is_container_group
|
||||
assert not containerized_instance_group.is_isolated
|
||||
resp = patch(ig_url, {'policy_instance_percentage':15}, admin, expect=200)
|
||||
assert 15 == resp.data['policy_instance_percentage']
|
||||
@@ -266,7 +267,7 @@ def test_instance_group_update_fields(patch, instance, instance_group, admin, co
|
||||
|
||||
# containerized instance group
|
||||
cg_url = reverse("api:instance_group_detail", kwargs={'pk': containerized_instance_group.pk})
|
||||
assert containerized_instance_group.is_containerized
|
||||
assert containerized_instance_group.is_container_group
|
||||
assert not containerized_instance_group.is_isolated
|
||||
resp = patch(cg_url, {'policy_instance_percentage':15}, admin, expect=400)
|
||||
assert ["Containerized instances may not be managed via the API"] == resp.data['policy_instance_percentage']
|
||||
@@ -287,8 +288,8 @@ def test_containerized_group_default_fields(instance_group, kube_credential):
|
||||
assert ig.policy_instance_minimum == 5
|
||||
assert ig.policy_instance_percentage == 5
|
||||
ig.credential = kube_credential
|
||||
ig.is_container_group = True
|
||||
ig.save()
|
||||
assert ig.policy_instance_list == []
|
||||
assert ig.policy_instance_minimum == 0
|
||||
assert ig.policy_instance_percentage == 0
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
import pytest
|
||||
|
||||
# AWX
|
||||
@@ -10,7 +7,6 @@ from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplat
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
|
||||
# DRF
|
||||
@@ -302,61 +298,6 @@ def test_save_survey_passwords_on_migration(job_template_with_survey_passwords):
|
||||
assert job.survey_passwords == {'SSN': '$encrypted$', 'secret_key': '$encrypted$'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('access', ["superuser", "admin", "peon"])
|
||||
def test_job_template_custom_virtualenv(get, patch, organization_factory, job_template_factory, alice, access):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
user = alice
|
||||
if access == "superuser":
|
||||
user = objs.superusers.admin
|
||||
elif access == "admin":
|
||||
jt.admin_role.members.add(alice)
|
||||
else:
|
||||
jt.read_role.members.add(alice)
|
||||
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
|
||||
|
||||
if access == "peon":
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=user, expect=403)
|
||||
assert 'custom_virtualenv' not in get(url, user=user)
|
||||
assert JobTemplate.objects.get(pk=jt.id).custom_virtualenv is None
|
||||
else:
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=user, expect=200)
|
||||
assert get(url, user=user).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_invalid_custom_virtualenv(get, patch, organization_factory,
|
||||
job_template_factory):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=objs.superusers.admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('value', ["", None])
|
||||
def test_job_template_unset_custom_virtualenv(get, patch, organization_factory,
|
||||
job_template_factory, value):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_detail', kwargs={'pk': jt.id})
|
||||
resp = patch(url, {'custom_virtualenv': value}, user=objs.superusers.admin, expect=200)
|
||||
assert resp.data['custom_virtualenv'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_organization_follows_project(post, patch, admin_user):
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
import pytest
|
||||
|
||||
# AWX
|
||||
@@ -242,32 +237,6 @@ def test_delete_organization_xfail2(delete, organization):
|
||||
delete(reverse('api:organization_detail', kwargs={'pk': organization.id}), user=None, expect=401)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_custom_virtualenv(get, patch, organization, admin):
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
|
||||
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_invalid_custom_virtualenv(get, patch, organization, admin):
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('value', ["", None])
|
||||
def test_organization_unset_custom_virtualenv(get, patch, organization, admin, value):
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
resp = patch(url, {'custom_virtualenv': value}, user=admin, expect=200)
|
||||
assert resp.data['custom_virtualenv'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_delete(delete, admin, organization, organization_jobs_successful):
|
||||
url = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
import os
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
@@ -21,32 +17,6 @@ class TestInsightsCredential:
|
||||
expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_custom_virtualenv(get, patch, project, admin):
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
url = reverse('api:project_detail', kwargs={'pk': project.id})
|
||||
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
|
||||
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_invalid_custom_virtualenv(get, patch, project, admin):
|
||||
url = reverse('api:project_detail', kwargs={'pk': project.id})
|
||||
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
|
||||
assert resp.data['custom_virtualenv'] == [
|
||||
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('value', ["", None])
|
||||
def test_project_unset_custom_virtualenv(get, patch, project, admin, value):
|
||||
url = reverse('api:project_detail', kwargs={'pk': project.id})
|
||||
resp = patch(url, {'custom_virtualenv': value}, user=admin, expect=200)
|
||||
assert resp.data['custom_virtualenv'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_changing_overwrite_behavior_if_used(post, patch, organization, admin_user):
|
||||
r1 = post(
|
||||
|
||||
@@ -393,3 +393,43 @@ def test_saml_x509cert_validation(patch, get, admin, headers):
|
||||
}
|
||||
})
|
||||
assert resp.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_github_settings(get, put, patch, delete, admin):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'github'})
|
||||
get(url, user=admin, expect=200)
|
||||
delete(url, user=admin, expect=204)
|
||||
response = get(url, user=admin, expect=200)
|
||||
data = dict(response.data.items())
|
||||
put(url, user=admin, data=data, expect=200)
|
||||
patch(url, user=admin, data={'SOCIAL_AUTH_GITHUB_KEY': '???'}, expect=200)
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_KEY'] == '???'
|
||||
data.pop('SOCIAL_AUTH_GITHUB_KEY')
|
||||
put(url, user=admin, data=data, expect=200)
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_KEY'] == ''
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_github_enterprise_settings(get, put, patch, delete, admin):
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'github-enterprise'})
|
||||
get(url, user=admin, expect=200)
|
||||
delete(url, user=admin, expect=204)
|
||||
response = get(url, user=admin, expect=200)
|
||||
data = dict(response.data.items())
|
||||
put(url, user=admin, data=data, expect=200)
|
||||
patch(url, user=admin, data={
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'example.com',
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'example.com',
|
||||
}, expect=200)
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_ENTERPRISE_URL'] == 'example.com'
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL'] == 'example.com'
|
||||
data.pop('SOCIAL_AUTH_GITHUB_ENTERPRISE_URL')
|
||||
data.pop('SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL')
|
||||
put(url, user=admin, data=data, expect=200)
|
||||
response = get(url, user=admin, expect=200)
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_ENTERPRISE_URL'] == ''
|
||||
assert response.data['SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL'] == ''
|
||||
|
||||
@@ -52,6 +52,7 @@ from awx.main.models.events import (
|
||||
from awx.main.models.workflow import WorkflowJobTemplate
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||
from awx.main.models.oauth import OAuth2Application as Application
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
|
||||
__SWAGGER_REQUESTS__ = {}
|
||||
|
||||
@@ -850,3 +851,8 @@ def slice_job_factory(slice_jt_factory):
|
||||
node.save()
|
||||
return slice_job
|
||||
return r
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def execution_environment(organization):
|
||||
return ExecutionEnvironment.objects.create(name="test-ee", description="test-ee", organization=organization)
|
||||
|
||||
@@ -6,7 +6,7 @@ import pytest
|
||||
|
||||
#from awx.main.models import NotificationTemplates, Notifications, JobNotificationMixin
|
||||
from awx.main.models import (AdHocCommand, InventoryUpdate, Job, JobNotificationMixin, ProjectUpdate,
|
||||
SystemJob, WorkflowJob)
|
||||
Schedule, SystemJob, WorkflowJob)
|
||||
from awx.api.serializers import UnifiedJobSerializer
|
||||
|
||||
|
||||
@@ -72,6 +72,10 @@ class TestJobNotificationMixin(object):
|
||||
'name': str,
|
||||
'scm_type': str,
|
||||
'status': str},
|
||||
'schedule': {'description': str,
|
||||
'id': int,
|
||||
'name': str,
|
||||
'next_run': datetime.datetime},
|
||||
'unified_job_template': {'description': str,
|
||||
'id': int,
|
||||
'name': str,
|
||||
@@ -89,27 +93,27 @@ class TestJobNotificationMixin(object):
|
||||
'workflow_url': str,
|
||||
'url': str}
|
||||
|
||||
def check_structure(self, expected_structure, obj):
|
||||
if isinstance(expected_structure, dict):
|
||||
assert isinstance(obj, dict)
|
||||
for key in obj:
|
||||
assert key in expected_structure
|
||||
if obj[key] is None:
|
||||
continue
|
||||
if isinstance(expected_structure[key], dict):
|
||||
assert isinstance(obj[key], dict)
|
||||
self.check_structure(expected_structure[key], obj[key])
|
||||
else:
|
||||
if key == 'job_explanation':
|
||||
assert isinstance(str(obj[key]), expected_structure[key])
|
||||
else:
|
||||
assert isinstance(obj[key], expected_structure[key])
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('JobClass', [AdHocCommand, InventoryUpdate, Job, ProjectUpdate, SystemJob, WorkflowJob])
|
||||
def test_context(self, JobClass, sqlite_copy_expert, project, inventory_source):
|
||||
"""The Jinja context defines all of the fields that can be used by a template. Ensure that the context generated
|
||||
for each job type has the expected structure."""
|
||||
def check_structure(expected_structure, obj):
|
||||
if isinstance(expected_structure, dict):
|
||||
assert isinstance(obj, dict)
|
||||
for key in obj:
|
||||
assert key in expected_structure
|
||||
if obj[key] is None:
|
||||
continue
|
||||
if isinstance(expected_structure[key], dict):
|
||||
assert isinstance(obj[key], dict)
|
||||
check_structure(expected_structure[key], obj[key])
|
||||
else:
|
||||
if key == 'job_explanation':
|
||||
assert isinstance(str(obj[key]), expected_structure[key])
|
||||
else:
|
||||
assert isinstance(obj[key], expected_structure[key])
|
||||
kwargs = {}
|
||||
if JobClass is InventoryUpdate:
|
||||
kwargs['inventory_source'] = inventory_source
|
||||
@@ -121,8 +125,26 @@ class TestJobNotificationMixin(object):
|
||||
job_serialization = UnifiedJobSerializer(job).to_representation(job)
|
||||
|
||||
context = job.context(job_serialization)
|
||||
check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context)
|
||||
self.check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context)
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_schedule_context(self, job_template, admin_user):
|
||||
schedule = Schedule.objects.create(
|
||||
name='job-schedule',
|
||||
rrule='DTSTART:20171129T155939z\nFREQ=MONTHLY',
|
||||
unified_job_template=job_template
|
||||
)
|
||||
job = Job.objects.create(
|
||||
name='fake-job',
|
||||
launch_type='workflow',
|
||||
schedule=schedule,
|
||||
job_template=job_template
|
||||
)
|
||||
|
||||
job_serialization = UnifiedJobSerializer(job).to_representation(job)
|
||||
|
||||
context = job.context(job_serialization)
|
||||
self.check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context)
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_context_job_metadata_with_unicode(self):
|
||||
|
||||
@@ -154,6 +154,7 @@ class TestMetaVars:
|
||||
assert data['awx_user_id'] == admin_user.id
|
||||
assert data['awx_user_name'] == admin_user.username
|
||||
assert data['awx_workflow_job_id'] == workflow_job.pk
|
||||
assert data['awx_workflow_job_launch_type'] == workflow_job.launch_type
|
||||
|
||||
def test_scheduled_job_metavars(self, job_template, admin_user):
|
||||
schedule = Schedule.objects.create(
|
||||
@@ -197,6 +198,8 @@ class TestMetaVars:
|
||||
'tower_workflow_job_name': 'workflow-job',
|
||||
'awx_workflow_job_id': workflow_job.id,
|
||||
'tower_workflow_job_id': workflow_job.id,
|
||||
'awx_workflow_job_launch_type': workflow_job.launch_type,
|
||||
'tower_workflow_job_launch_type': workflow_job.launch_type,
|
||||
'awx_parent_job_schedule_id': schedule.id,
|
||||
'tower_parent_job_schedule_id': schedule.id,
|
||||
'awx_parent_job_schedule_name': 'job-schedule',
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,697 +0,0 @@
|
||||
[
|
||||
{
|
||||
"source": "sysv",
|
||||
"state": "running",
|
||||
"name": "iprdump"
|
||||
},
|
||||
{
|
||||
"source": "sysv",
|
||||
"state": "running",
|
||||
"name": "iprinit"
|
||||
},
|
||||
{
|
||||
"source": "sysv",
|
||||
"state": "running",
|
||||
"name": "iprupdate"
|
||||
},
|
||||
{
|
||||
"source": "sysv",
|
||||
"state": "stopped",
|
||||
"name": "netconsole"
|
||||
},
|
||||
{
|
||||
"source": "sysv",
|
||||
"state": "running",
|
||||
"name": "network"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "arp-ethers.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "auditd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "autovt@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "avahi-daemon.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "blk-availability.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "brandbot.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "console-getty.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "console-shell.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "cpupower.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "crond.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "dbus-org.fedoraproject.FirewallD1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "dbus-org.freedesktop.Avahi.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus-org.freedesktop.hostname1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus-org.freedesktop.locale1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus-org.freedesktop.login1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus-org.freedesktop.machine1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "dbus-org.freedesktop.NetworkManager.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "dbus-org.freedesktop.nm-dispatcher.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus-org.freedesktop.timedate1.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dbus.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "debug-shell.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "dhcpd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dhcpd6.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dhcrelay.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dm-event.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dnsmasq.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-cmdline.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-initqueue.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-mount.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-pre-mount.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-pre-pivot.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-pre-trigger.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-pre-udev.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "dracut-shutdown.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "ebtables.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "emergency.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "firewalld.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "getty@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "halt-local.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "initrd-cleanup.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "initrd-parse-etc.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "initrd-switch-root.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "initrd-udevadm-cleanup-db.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "irqbalance.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "kdump.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "kmod-static-nodes.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "lvm2-lvmetad.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "lvm2-monitor.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "lvm2-pvscan@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "messagebus.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "microcode.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "named-setup-rndc.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "named.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "NetworkManager-dispatcher.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "NetworkManager-wait-online.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "NetworkManager.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "ntpd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "ntpdate.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "openvpn@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-halt.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-kexec.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-poweroff.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-quit-wait.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-quit.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-read-write.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-reboot.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-start.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "plymouth-switch-root.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "polkit.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "postfix.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "quotaon.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rc-local.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rdisc.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rescue.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-autorelabel-mark.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-autorelabel.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-configure.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-dmesg.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-domainname.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-import-state.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-loadmodules.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "rhel-readonly.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "rsyslog.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "serial-getty@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "sshd-keygen.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "sshd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "sshd@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-ask-password-console.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-ask-password-plymouth.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-ask-password-wall.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-backlight@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-binfmt.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-fsck-root.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-fsck@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-halt.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-hibernate.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-hostnamed.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-hybrid-sleep.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-initctl.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-journal-flush.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-journald.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-kexec.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-localed.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-logind.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-machined.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-modules-load.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-nspawn@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-poweroff.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-quotacheck.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-random-seed.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "systemd-readahead-collect.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-readahead-done.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "systemd-readahead-drop.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "systemd-readahead-replay.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-reboot.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-remount-fs.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-shutdownd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-suspend.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-sysctl.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-timedated.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-tmpfiles-clean.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-tmpfiles-setup-dev.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-tmpfiles-setup.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-udev-settle.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-udev-trigger.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-udevd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-update-utmp-runlevel.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-update-utmp.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-user-sessions.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "systemd-vconsole-setup.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "teamd@.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "tuned.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "running",
|
||||
"name": "vmtoolsd.service"
|
||||
},
|
||||
{
|
||||
"source": "systemd",
|
||||
"state": "stopped",
|
||||
"name": "wpa_supplicant.service"
|
||||
}
|
||||
]
|
||||
@@ -13,6 +13,7 @@ from awx.main.utils import (
|
||||
@pytest.fixture
|
||||
def containerized_job(default_instance_group, kube_credential, job_template_factory):
|
||||
default_instance_group.credential = kube_credential
|
||||
default_instance_group.is_container_group = True
|
||||
default_instance_group.save()
|
||||
objects = job_template_factory('jt', organization='org1', project='proj',
|
||||
inventory='inv', credential='cred',
|
||||
@@ -29,8 +30,8 @@ def containerized_job(default_instance_group, kube_credential, job_template_fact
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_containerized_job(containerized_job):
|
||||
assert containerized_job.is_containerized
|
||||
assert containerized_job.instance_group.is_containerized
|
||||
assert containerized_job.is_container_group_task
|
||||
assert containerized_job.instance_group.is_container_group
|
||||
assert containerized_job.instance_group.credential.kubernetes
|
||||
|
||||
|
||||
|
||||
@@ -348,11 +348,11 @@ def test_job_not_blocking_project_update(default_instance_group, job_template_fa
|
||||
project_update.instance_group = default_instance_group
|
||||
project_update.status = "pending"
|
||||
project_update.save()
|
||||
assert not task_manager.is_job_blocked(project_update)
|
||||
assert not task_manager.job_blocked_by(project_update)
|
||||
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph = DependencyGraph()
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(project_update)
|
||||
assert not dependency_graph.task_blocked_by(project_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -378,11 +378,11 @@ def test_job_not_blocking_inventory_update(default_instance_group, job_template_
|
||||
inventory_update.status = "pending"
|
||||
inventory_update.save()
|
||||
|
||||
assert not task_manager.is_job_blocked(inventory_update)
|
||||
assert not task_manager.job_blocked_by(inventory_update)
|
||||
|
||||
dependency_graph = DependencyGraph(None)
|
||||
dependency_graph = DependencyGraph()
|
||||
dependency_graph.add_job(job)
|
||||
assert not dependency_graph.is_job_blocked(inventory_update)
|
||||
assert not dependency_graph.task_blocked_by(inventory_update)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -79,6 +79,7 @@ def test_default_cred_types():
|
||||
'aws',
|
||||
'azure_kv',
|
||||
'azure_rm',
|
||||
'centrify_vault_kv',
|
||||
'conjur',
|
||||
'galaxy_api_token',
|
||||
'gce',
|
||||
@@ -90,6 +91,7 @@ def test_default_cred_types():
|
||||
'kubernetes_bearer_token',
|
||||
'net',
|
||||
'openstack',
|
||||
'registry',
|
||||
'rhv',
|
||||
'satellite6',
|
||||
'scm',
|
||||
|
||||
19
awx/main/tests/functional/test_execution_environments.py
Normal file
19
awx/main/tests/functional/test_execution_environments.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (ExecutionEnvironment)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_execution_environment_creation(execution_environment, organization):
|
||||
execution_env = ExecutionEnvironment.objects.create(
|
||||
name='Hello Environment',
|
||||
image='',
|
||||
organization=organization,
|
||||
managed_by_tower=False,
|
||||
credential=None,
|
||||
pull='missing'
|
||||
)
|
||||
assert type(execution_env) is type(execution_environment)
|
||||
assert execution_env.organization == organization
|
||||
assert execution_env.name == 'Hello Environment'
|
||||
assert execution_env.pull == 'missing'
|
||||
@@ -6,7 +6,7 @@ import re
|
||||
from collections import namedtuple
|
||||
|
||||
from awx.main.tasks import RunInventoryUpdate
|
||||
from awx.main.models import InventorySource, Credential, CredentialType, UnifiedJob
|
||||
from awx.main.models import InventorySource, Credential, CredentialType, UnifiedJob, ExecutionEnvironment
|
||||
from awx.main.constants import CLOUD_PROVIDERS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.tests import data
|
||||
|
||||
@@ -110,7 +110,8 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
||||
continue # Ansible runner
|
||||
abs_file_path = os.path.join(private_data_dir, filename)
|
||||
file_aliases[abs_file_path] = filename
|
||||
if abs_file_path in inverse_env:
|
||||
runner_path = os.path.join('/runner', os.path.basename(abs_file_path))
|
||||
if runner_path in inverse_env:
|
||||
referenced_paths.add(abs_file_path)
|
||||
alias = 'file_reference'
|
||||
for i in range(10):
|
||||
@@ -121,7 +122,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
||||
raise RuntimeError('Test not able to cope with >10 references by env vars. '
|
||||
'Something probably went very wrong.')
|
||||
file_aliases[abs_file_path] = alias
|
||||
for env_key in inverse_env[abs_file_path]:
|
||||
for env_key in inverse_env[runner_path]:
|
||||
env[env_key] = '{{{{ {} }}}}'.format(alias)
|
||||
try:
|
||||
with open(abs_file_path, 'r') as f:
|
||||
@@ -182,6 +183,8 @@ def create_reference_data(source_dir, env, content):
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('this_kind', CLOUD_PROVIDERS)
|
||||
def test_inventory_update_injected_content(this_kind, inventory, fake_credential_factory):
|
||||
ExecutionEnvironment.objects.create(name='test EE', managed_by_tower=True)
|
||||
|
||||
injector = InventorySource.injectors[this_kind]
|
||||
if injector.plugin_name is None:
|
||||
pytest.skip('Use of inventory plugin is not enabled for this source')
|
||||
@@ -197,12 +200,14 @@ def test_inventory_update_injected_content(this_kind, inventory, fake_credential
|
||||
inventory_update = inventory_source.create_unified_job()
|
||||
task = RunInventoryUpdate()
|
||||
|
||||
def substitute_run(envvars=None, **_kw):
|
||||
def substitute_run(awx_receptor_job):
|
||||
"""This method will replace run_pexpect
|
||||
instead of running, it will read the private data directory contents
|
||||
It will make assertions that the contents are correct
|
||||
If MAKE_INVENTORY_REFERENCE_FILES is set, it will produce reference files
|
||||
"""
|
||||
envvars = awx_receptor_job.runner_params['envvars']
|
||||
|
||||
private_data_dir = envvars.pop('AWX_PRIVATE_DATA_DIR')
|
||||
assert envvars.pop('ANSIBLE_INVENTORY_ENABLED') == 'auto'
|
||||
set_files = bool(os.getenv("MAKE_INVENTORY_REFERENCE_FILES", 'false').lower()[0] not in ['f', '0'])
|
||||
@@ -214,9 +219,6 @@ def test_inventory_update_injected_content(this_kind, inventory, fake_credential
|
||||
f"'{inventory_filename}' file not found in inventory update runtime files {content.keys()}"
|
||||
|
||||
env.pop('ANSIBLE_COLLECTIONS_PATHS', None) # collection paths not relevant to this test
|
||||
env.pop('PYTHONPATH')
|
||||
env.pop('VIRTUAL_ENV')
|
||||
env.pop('PROOT_TMP_DIR')
|
||||
base_dir = os.path.join(DATA, 'plugins')
|
||||
if not os.path.exists(base_dir):
|
||||
os.mkdir(base_dir)
|
||||
@@ -256,6 +258,6 @@ def test_inventory_update_injected_content(this_kind, inventory, fake_credential
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
with mock.patch('awx.main.tasks.AWXReceptorJob.run', substitute_run):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
|
||||
@@ -49,7 +49,7 @@ def test_python_and_js_licenses():
|
||||
|
||||
def read_api_requirements(path):
|
||||
ret = {}
|
||||
for req_file in ['requirements.txt', 'requirements_ansible.txt', 'requirements_git.txt', 'requirements_ansible_git.txt']:
|
||||
for req_file in ['requirements.txt', 'requirements_git.txt']:
|
||||
fname = '%s/%s' % (path, req_file)
|
||||
|
||||
for reqt in parse_requirements(fname, session=''):
|
||||
|
||||
@@ -40,7 +40,7 @@ def project_update(mocker):
|
||||
@pytest.fixture
|
||||
def job(mocker, job_template, project_update):
|
||||
return mocker.MagicMock(pk=5, job_template=job_template, project_update=project_update,
|
||||
workflow_job_id=None)
|
||||
workflow_job_id=None, execution_environment_id=None)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -35,16 +35,17 @@ data_loggly = {
|
||||
# Test reconfigure logging settings function
|
||||
# name this whatever you want
|
||||
@pytest.mark.parametrize(
|
||||
'enabled, log_type, host, port, protocol, expected_config', [
|
||||
'enabled, log_type, host, port, protocol, errorfile, expected_config', [
|
||||
(
|
||||
True,
|
||||
'loggly',
|
||||
'http://logs-01.loggly.com/inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/',
|
||||
None,
|
||||
'https',
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -53,6 +54,7 @@ data_loggly = {
|
||||
'localhost',
|
||||
9000,
|
||||
'udp',
|
||||
'', # empty errorfile
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
@@ -64,6 +66,7 @@ data_loggly = {
|
||||
'localhost',
|
||||
9000,
|
||||
'tcp',
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
@@ -75,9 +78,10 @@ data_loggly = {
|
||||
'https://yoursplunk/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -86,9 +90,10 @@ data_loggly = {
|
||||
'http://yoursplunk/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -97,9 +102,10 @@ data_loggly = {
|
||||
'https://yoursplunk:8088/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -108,9 +114,10 @@ data_loggly = {
|
||||
'https://yoursplunk/services/collector/event',
|
||||
8088,
|
||||
None,
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -119,9 +126,10 @@ data_loggly = {
|
||||
'yoursplunk.org/services/collector/event',
|
||||
8088,
|
||||
'https',
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -130,9 +138,10 @@ data_loggly = {
|
||||
'http://yoursplunk.org/services/collector/event',
|
||||
8088,
|
||||
None,
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
@@ -141,14 +150,15 @@ data_loggly = {
|
||||
'https://endpoint5.collection.us2.sumologic.com/receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==', # noqa
|
||||
None,
|
||||
'https',
|
||||
'/var/log/tower/rsyslog.err',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
])
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_rsyslog_conf_template(enabled, log_type, host, port, protocol, expected_config):
|
||||
def test_rsyslog_conf_template(enabled, log_type, host, port, protocol, errorfile, expected_config):
|
||||
|
||||
mock_settings, _ = _mock_logging_defaults()
|
||||
|
||||
@@ -159,6 +169,7 @@ def test_rsyslog_conf_template(enabled, log_type, host, port, protocol, expected
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_ENABLED', enabled)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_TYPE', log_type)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_HOST', host)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE', errorfile)
|
||||
if port:
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_PORT', port)
|
||||
if protocol:
|
||||
|
||||
@@ -11,7 +11,7 @@ class FakeObject(object):
|
||||
|
||||
class Job(FakeObject):
|
||||
task_impact = 43
|
||||
is_containerized = False
|
||||
is_container_group_task = False
|
||||
|
||||
def log_format(self):
|
||||
return 'job 382 (fake)'
|
||||
|
||||
@@ -6,7 +6,6 @@ import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
import fcntl
|
||||
from unittest import mock
|
||||
import pytest
|
||||
@@ -19,6 +18,7 @@ from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Credential,
|
||||
CredentialType,
|
||||
ExecutionEnvironment,
|
||||
Inventory,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
@@ -249,7 +249,7 @@ def test_openstack_client_config_generation_with_project_domain_name(mocker, sou
|
||||
@pytest.mark.parametrize("source,expected", [
|
||||
(None, True), (False, False), (True, True)
|
||||
])
|
||||
def test_openstack_client_config_generation_with_project_region_name(mocker, source, expected, private_data_dir):
|
||||
def test_openstack_client_config_generation_with_region(mocker, source, expected, private_data_dir):
|
||||
update = tasks.RunInventoryUpdate()
|
||||
credential_type = CredentialType.defaults['openstack']()
|
||||
inputs = {
|
||||
@@ -259,7 +259,7 @@ def test_openstack_client_config_generation_with_project_region_name(mocker, sou
|
||||
'project': 'demo-project',
|
||||
'domain': 'my-demo-domain',
|
||||
'project_domain_name': 'project-domain',
|
||||
'project_region_name': 'region-name',
|
||||
'region': 'region-name',
|
||||
}
|
||||
if source is not None:
|
||||
inputs['verify_ssl'] = source
|
||||
@@ -347,11 +347,12 @@ def pytest_generate_tests(metafunc):
|
||||
)
|
||||
|
||||
|
||||
def parse_extra_vars(args):
|
||||
def parse_extra_vars(args, private_data_dir):
|
||||
extra_vars = {}
|
||||
for chunk in args:
|
||||
if chunk.startswith('@/tmp/'):
|
||||
with open(chunk.strip('@'), 'r') as f:
|
||||
if chunk.startswith('@/runner/'):
|
||||
local_path = os.path.join(private_data_dir, os.path.basename(chunk.strip('@')))
|
||||
with open(local_path, 'r') as f:
|
||||
extra_vars.update(yaml.load(f, Loader=SafeLoader))
|
||||
return extra_vars
|
||||
|
||||
@@ -527,7 +528,7 @@ class TestGenericRun():
|
||||
task.instance = Job(pk=1, id=1)
|
||||
task.event_ct = 17
|
||||
task.finished_callback(None)
|
||||
task.dispatcher.dispatch.assert_called_with({'event': 'EOF', 'final_counter': 17, 'job_id': 1})
|
||||
task.dispatcher.dispatch.assert_called_with({'event': 'EOF', 'final_counter': 17, 'job_id': 1, 'guid': None})
|
||||
|
||||
def test_save_job_metadata(self, job, update_model_wrapper):
|
||||
class MockMe():
|
||||
@@ -546,44 +547,6 @@ class TestGenericRun():
|
||||
job_cwd='/foobar', job_env={'switch': 'blade', 'foot': 'ball', 'secret_key': 'redacted_value'})
|
||||
|
||||
|
||||
def test_uses_process_isolation(self, settings):
|
||||
job = Job(project=Project(), inventory=Inventory())
|
||||
task = tasks.RunJob()
|
||||
task.should_use_proot = lambda instance: True
|
||||
task.instance = job
|
||||
|
||||
private_data_dir = '/foo'
|
||||
cwd = '/bar'
|
||||
|
||||
settings.AWX_PROOT_HIDE_PATHS = ['/AWX_PROOT_HIDE_PATHS1', '/AWX_PROOT_HIDE_PATHS2']
|
||||
settings.ANSIBLE_VENV_PATH = '/ANSIBLE_VENV_PATH'
|
||||
settings.AWX_VENV_PATH = '/AWX_VENV_PATH'
|
||||
|
||||
process_isolation_params = task.build_params_process_isolation(job, private_data_dir, cwd)
|
||||
assert True is process_isolation_params['process_isolation']
|
||||
assert process_isolation_params['process_isolation_path'].startswith(settings.AWX_PROOT_BASE_PATH), \
|
||||
"Directory where a temp directory will be created for the remapping to take place"
|
||||
assert private_data_dir in process_isolation_params['process_isolation_show_paths'], \
|
||||
"The per-job private data dir should be in the list of directories the user can see."
|
||||
assert cwd in process_isolation_params['process_isolation_show_paths'], \
|
||||
"The current working directory should be in the list of directories the user can see."
|
||||
|
||||
for p in [settings.AWX_PROOT_BASE_PATH,
|
||||
'/etc/tower',
|
||||
'/etc/ssh',
|
||||
'/var/lib/awx',
|
||||
'/var/log',
|
||||
settings.PROJECTS_ROOT,
|
||||
settings.JOBOUTPUT_ROOT,
|
||||
'/AWX_PROOT_HIDE_PATHS1',
|
||||
'/AWX_PROOT_HIDE_PATHS2']:
|
||||
assert p in process_isolation_params['process_isolation_hide_paths']
|
||||
assert 9 == len(process_isolation_params['process_isolation_hide_paths'])
|
||||
assert '/ANSIBLE_VENV_PATH' in process_isolation_params['process_isolation_ro_paths']
|
||||
assert '/AWX_VENV_PATH' in process_isolation_params['process_isolation_ro_paths']
|
||||
assert 2 == len(process_isolation_params['process_isolation_ro_paths'])
|
||||
|
||||
|
||||
@mock.patch('os.makedirs')
|
||||
def test_build_params_resource_profiling(self, os_makedirs):
|
||||
job = Job(project=Project(), inventory=Inventory())
|
||||
@@ -597,7 +560,7 @@ class TestGenericRun():
|
||||
assert resource_profiling_params['resource_profiling_cpu_poll_interval'] == '0.25'
|
||||
assert resource_profiling_params['resource_profiling_memory_poll_interval'] == '0.25'
|
||||
assert resource_profiling_params['resource_profiling_pid_poll_interval'] == '0.25'
|
||||
assert resource_profiling_params['resource_profiling_results_dir'] == '/fake_private_data_dir/artifacts/playbook_profiling'
|
||||
assert resource_profiling_params['resource_profiling_results_dir'] == '/runner/artifacts/playbook_profiling'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("scenario, profiling_enabled", [
|
||||
@@ -656,34 +619,13 @@ class TestGenericRun():
|
||||
env = task.build_env(job, private_data_dir)
|
||||
assert env['FOO'] == 'BAR'
|
||||
|
||||
def test_valid_custom_virtualenv(self, patch_Job, private_data_dir):
|
||||
job = Job(project=Project(), inventory=Inventory())
|
||||
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as tempdir:
|
||||
job.project.custom_virtualenv = tempdir
|
||||
os.makedirs(os.path.join(tempdir, 'lib'))
|
||||
os.makedirs(os.path.join(tempdir, 'bin', 'activate'))
|
||||
|
||||
task = tasks.RunJob()
|
||||
env = task.build_env(job, private_data_dir)
|
||||
|
||||
assert env['PATH'].startswith(os.path.join(tempdir, 'bin'))
|
||||
assert env['VIRTUAL_ENV'] == tempdir
|
||||
|
||||
def test_invalid_custom_virtualenv(self, patch_Job, private_data_dir):
|
||||
job = Job(project=Project(), inventory=Inventory())
|
||||
job.project.custom_virtualenv = '/var/lib/awx/venv/missing'
|
||||
task = tasks.RunJob()
|
||||
|
||||
with pytest.raises(tasks.InvalidVirtualenvError) as e:
|
||||
task.build_env(job, private_data_dir)
|
||||
|
||||
assert 'Invalid virtual environment selected: /var/lib/awx/venv/missing' == str(e.value)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAdhocRun(TestJobExecution):
|
||||
|
||||
def test_options_jinja_usage(self, adhoc_job, adhoc_update_model_wrapper):
|
||||
ExecutionEnvironment.objects.create(name='test EE', managed_by_tower=True)
|
||||
|
||||
adhoc_job.module_args = '{{ ansible_ssh_pass }}'
|
||||
adhoc_job.websocket_emit_status = mock.Mock()
|
||||
adhoc_job.send_notification_templates = mock.Mock()
|
||||
@@ -1203,7 +1145,9 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential.credential_type.inject_credential(
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
json_data = json.load(open(env['GCE_CREDENTIALS_FILE_PATH'], 'rb'))
|
||||
runner_path = env['GCE_CREDENTIALS_FILE_PATH']
|
||||
local_path = os.path.join(private_data_dir, os.path.basename(runner_path))
|
||||
json_data = json.load(open(local_path, 'rb'))
|
||||
assert json_data['type'] == 'service_account'
|
||||
assert json_data['private_key'] == self.EXAMPLE_PRIVATE_KEY
|
||||
assert json_data['client_email'] == 'bob'
|
||||
@@ -1306,7 +1250,11 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential, env, {}, [], private_data_dir
|
||||
)
|
||||
|
||||
shade_config = open(env['OS_CLIENT_CONFIG_FILE'], 'r').read()
|
||||
# convert container path to host machine path
|
||||
config_loc = os.path.join(
|
||||
private_data_dir, os.path.basename(env['OS_CLIENT_CONFIG_FILE'])
|
||||
)
|
||||
shade_config = open(config_loc, 'r').read()
|
||||
assert shade_config == '\n'.join([
|
||||
'clouds:',
|
||||
' devstack:',
|
||||
@@ -1344,7 +1292,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['OVIRT_INI_PATH'])
|
||||
config.read(os.path.join(private_data_dir, os.path.basename(env['OVIRT_INI_PATH'])))
|
||||
assert config.get('ovirt', 'ovirt_url') == 'some-ovirt-host.example.org'
|
||||
assert config.get('ovirt', 'ovirt_username') == 'bob'
|
||||
assert config.get('ovirt', 'ovirt_password') == 'some-pass'
|
||||
@@ -1577,7 +1525,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential.credential_type.inject_credential(
|
||||
credential, {}, {}, args, private_data_dir
|
||||
)
|
||||
extra_vars = parse_extra_vars(args)
|
||||
extra_vars = parse_extra_vars(args, private_data_dir)
|
||||
|
||||
assert extra_vars["api_token"] == "ABC123"
|
||||
assert hasattr(extra_vars["api_token"], '__UNSAFE__')
|
||||
@@ -1612,7 +1560,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential.credential_type.inject_credential(
|
||||
credential, {}, {}, args, private_data_dir
|
||||
)
|
||||
extra_vars = parse_extra_vars(args)
|
||||
extra_vars = parse_extra_vars(args, private_data_dir)
|
||||
|
||||
assert extra_vars["turbo_button"] == "True"
|
||||
return ['successful', 0]
|
||||
@@ -1647,7 +1595,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential.credential_type.inject_credential(
|
||||
credential, {}, {}, args, private_data_dir
|
||||
)
|
||||
extra_vars = parse_extra_vars(args)
|
||||
extra_vars = parse_extra_vars(args, private_data_dir)
|
||||
|
||||
assert extra_vars["turbo_button"] == "FAST!"
|
||||
|
||||
@@ -1687,7 +1635,7 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential, {}, {}, args, private_data_dir
|
||||
)
|
||||
|
||||
extra_vars = parse_extra_vars(args)
|
||||
extra_vars = parse_extra_vars(args, private_data_dir)
|
||||
assert extra_vars["password"] == "SUPER-SECRET-123"
|
||||
|
||||
def test_custom_environment_injectors_with_file(self, private_data_dir):
|
||||
@@ -1722,7 +1670,8 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential, env, {}, [], private_data_dir
|
||||
)
|
||||
|
||||
assert open(env['MY_CLOUD_INI_FILE'], 'r').read() == '[mycloud]\nABC123'
|
||||
path = os.path.join(private_data_dir, os.path.basename(env['MY_CLOUD_INI_FILE']))
|
||||
assert open(path, 'r').read() == '[mycloud]\nABC123'
|
||||
|
||||
def test_custom_environment_injectors_with_unicode_content(self, private_data_dir):
|
||||
value = 'Iñtërnâtiônàlizætiøn'
|
||||
@@ -1746,7 +1695,8 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential, env, {}, [], private_data_dir
|
||||
)
|
||||
|
||||
assert open(env['MY_CLOUD_INI_FILE'], 'r').read() == value
|
||||
path = os.path.join(private_data_dir, os.path.basename(env['MY_CLOUD_INI_FILE']))
|
||||
assert open(path, 'r').read() == value
|
||||
|
||||
def test_custom_environment_injectors_with_files(self, private_data_dir):
|
||||
some_cloud = CredentialType(
|
||||
@@ -1786,8 +1736,10 @@ class TestJobCredentials(TestJobExecution):
|
||||
credential, env, {}, [], private_data_dir
|
||||
)
|
||||
|
||||
assert open(env['MY_CERT_INI_FILE'], 'r').read() == '[mycert]\nCERT123'
|
||||
assert open(env['MY_KEY_INI_FILE'], 'r').read() == '[mykey]\nKEY123'
|
||||
cert_path = os.path.join(private_data_dir, os.path.basename(env['MY_CERT_INI_FILE']))
|
||||
key_path = os.path.join(private_data_dir, os.path.basename(env['MY_KEY_INI_FILE']))
|
||||
assert open(cert_path, 'r').read() == '[mycert]\nCERT123'
|
||||
assert open(key_path, 'r').read() == '[mykey]\nKEY123'
|
||||
|
||||
def test_multi_cloud(self, private_data_dir):
|
||||
gce = CredentialType.defaults['gce']()
|
||||
@@ -1826,7 +1778,8 @@ class TestJobCredentials(TestJobExecution):
|
||||
assert env['AZURE_AD_USER'] == 'bob'
|
||||
assert env['AZURE_PASSWORD'] == 'secret'
|
||||
|
||||
json_data = json.load(open(env['GCE_CREDENTIALS_FILE_PATH'], 'rb'))
|
||||
path = os.path.join(private_data_dir, os.path.basename(env['GCE_CREDENTIALS_FILE_PATH']))
|
||||
json_data = json.load(open(path, 'rb'))
|
||||
assert json_data['type'] == 'service_account'
|
||||
assert json_data['private_key'] == self.EXAMPLE_PRIVATE_KEY
|
||||
assert json_data['client_email'] == 'bob'
|
||||
@@ -1971,29 +1924,6 @@ class TestProjectUpdateCredentials(TestJobExecution):
|
||||
]
|
||||
}
|
||||
|
||||
def test_process_isolation_exposes_projects_root(self, private_data_dir, project_update):
|
||||
task = tasks.RunProjectUpdate()
|
||||
task.revision_path = 'foobar'
|
||||
task.instance = project_update
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
project_update.scm_type = 'git'
|
||||
project_update.credential = Credential(
|
||||
pk=1,
|
||||
credential_type=ssh,
|
||||
)
|
||||
process_isolation = task.build_params_process_isolation(job, private_data_dir, 'cwd')
|
||||
|
||||
assert process_isolation['process_isolation'] is True
|
||||
assert settings.PROJECTS_ROOT in process_isolation['process_isolation_show_paths']
|
||||
|
||||
task._write_extra_vars_file = mock.Mock()
|
||||
|
||||
with mock.patch.object(Licenser, 'validate', lambda *args, **kw: {}):
|
||||
task.build_extra_vars_file(project_update, private_data_dir)
|
||||
|
||||
call_args, _ = task._write_extra_vars_file.call_args_list[0]
|
||||
_, extra_vars = call_args
|
||||
|
||||
def test_username_and_password_auth(self, project_update, scm_type):
|
||||
task = tasks.RunProjectUpdate()
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
@@ -2107,7 +2037,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
|
||||
assert '-i' in ' '.join(args)
|
||||
script = args[args.index('-i') + 1]
|
||||
with open(script, 'r') as f:
|
||||
host_script = script.replace('/runner', private_data_dir)
|
||||
with open(host_script, 'r') as f:
|
||||
assert f.read() == inventory_update.source_script.script
|
||||
assert env['FOO'] == 'BAR'
|
||||
if with_credential:
|
||||
@@ -2307,7 +2238,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
shade_config = open(env['OS_CLIENT_CONFIG_FILE'], 'r').read()
|
||||
path = os.path.join(private_data_dir, os.path.basename(env['OS_CLIENT_CONFIG_FILE']))
|
||||
shade_config = open(path, 'r').read()
|
||||
assert '\n'.join([
|
||||
'clouds:',
|
||||
' devstack:',
|
||||
|
||||
@@ -9,9 +9,6 @@ import json
|
||||
import yaml
|
||||
from unittest import mock
|
||||
|
||||
from backports.tempfile import TemporaryDirectory
|
||||
from django.conf import settings
|
||||
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from awx.main.utils import common
|
||||
@@ -194,24 +191,3 @@ def test_extract_ansible_vars():
|
||||
redacted, var_list = common.extract_ansible_vars(json.dumps(my_dict))
|
||||
assert var_list == set(['ansible_connetion_setting'])
|
||||
assert redacted == {"foobar": "baz"}
|
||||
|
||||
|
||||
def test_get_custom_venv_choices():
|
||||
bundled_venv = os.path.join(settings.BASE_VENV_PATH, 'ansible', '')
|
||||
assert sorted(common.get_custom_venv_choices()) == [bundled_venv]
|
||||
|
||||
with TemporaryDirectory(dir=settings.BASE_VENV_PATH, prefix='tmp') as temp_dir:
|
||||
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
|
||||
|
||||
custom_venv_dir = os.path.join(temp_dir, 'custom')
|
||||
custom_venv_1 = os.path.join(custom_venv_dir, 'venv-1')
|
||||
custom_venv_awx = os.path.join(custom_venv_dir, 'custom', 'awx')
|
||||
|
||||
os.makedirs(os.path.join(custom_venv_1, 'bin', 'activate'))
|
||||
os.makedirs(os.path.join(custom_venv_awx, 'bin', 'activate'))
|
||||
|
||||
assert sorted(common.get_custom_venv_choices([custom_venv_dir])) == [
|
||||
bundled_venv,
|
||||
os.path.join(temp_dir, ''),
|
||||
os.path.join(custom_venv_1, '')
|
||||
]
|
||||
|
||||
@@ -55,7 +55,8 @@ __all__ = [
|
||||
'model_instance_diff', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
|
||||
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',
|
||||
'get_custom_venv_choices', 'get_external_account', 'task_manager_bulk_reschedule',
|
||||
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout'
|
||||
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout',
|
||||
'deepmerge'
|
||||
]
|
||||
|
||||
|
||||
@@ -1079,3 +1080,21 @@ def truncate_stdout(stdout, size):
|
||||
set_count += 1
|
||||
|
||||
return stdout + u'\u001b[0m' * (set_count - reset_count)
|
||||
|
||||
|
||||
def deepmerge(a, b):
|
||||
"""
|
||||
Merge dict structures and return the result.
|
||||
|
||||
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
|
||||
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
|
||||
>>> import pprint; pprint.pprint(deepmerge(a, b))
|
||||
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
|
||||
"""
|
||||
if isinstance(a, dict) and isinstance(b, dict):
|
||||
return dict([(k, deepmerge(a.get(k), b.get(k)))
|
||||
for k in set(a.keys()).union(b.keys())])
|
||||
elif b is None:
|
||||
return a
|
||||
else:
|
||||
return b
|
||||
|
||||
@@ -18,6 +18,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
timeout = getattr(settings, 'LOG_AGGREGATOR_TCP_TIMEOUT', 5)
|
||||
max_disk_space = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_GB', 1)
|
||||
spool_directory = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_PATH', '/var/lib/awx').rstrip('/')
|
||||
error_log_file = getattr(settings, 'LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE', '')
|
||||
|
||||
if not os.access(spool_directory, os.W_OK):
|
||||
spool_directory = '/var/lib/awx'
|
||||
@@ -31,7 +32,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
'$IncludeConfig /var/lib/awx/rsyslog/conf.d/*.conf',
|
||||
f'main_queue(queue.spoolDirectory="{spool_directory}" queue.maxdiskspace="{max_disk_space}g" queue.type="Disk" queue.filename="awx-external-logger-backlog")', # noqa
|
||||
'module(load="imuxsock" SysSock.Use="off")',
|
||||
'input(type="imuxsock" Socket="' + settings.LOGGING['handlers']['external_logger']['address'] + '" unlink="on")',
|
||||
'input(type="imuxsock" Socket="' + settings.LOGGING['handlers']['external_logger']['address'] + '" unlink="on" RateLimit.Burst="0")',
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
])
|
||||
|
||||
@@ -74,9 +75,10 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
f'skipverifyhost="{skip_verify}"',
|
||||
'action.resumeRetryCount="-1"',
|
||||
'template="awx"',
|
||||
'errorfile="/var/log/tower/rsyslog.err"',
|
||||
f'action.resumeInterval="{timeout}"'
|
||||
]
|
||||
if error_log_file:
|
||||
params.append(f'errorfile="{error_log_file}"')
|
||||
if parsed.path:
|
||||
path = urlparse.quote(parsed.path[1:], safe='/=')
|
||||
if parsed.query:
|
||||
@@ -114,7 +116,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
def reconfigure_rsyslog():
|
||||
tmpl = construct_rsyslog_conf_template()
|
||||
# Write config to a temp file then move it to preserve atomicity
|
||||
with tempfile.TemporaryDirectory(prefix='rsyslog-conf-') as temp_dir:
|
||||
with tempfile.TemporaryDirectory(dir='/var/lib/awx/rsyslog/', prefix='rsyslog-conf-') as temp_dir:
|
||||
path = temp_dir + '/rsyslog.conf.temp'
|
||||
with open(path, 'w') as f:
|
||||
os.chmod(path, 0o640)
|
||||
|
||||
@@ -15,6 +15,10 @@ from django.apps import apps
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
|
||||
from django_guid.log_filters import CorrelationId
|
||||
from django_guid.middleware import GuidMiddleware
|
||||
|
||||
from awx import MODE
|
||||
from awx.main.constants import LOGGER_BLOCKLIST
|
||||
from awx.main.utils.common import get_search_fields
|
||||
|
||||
@@ -364,3 +368,14 @@ class SmartFilter(object):
|
||||
return res[0].result
|
||||
|
||||
raise RuntimeError("Parsing the filter_string %s went terribly wrong" % filter_string)
|
||||
|
||||
|
||||
|
||||
class DefaultCorrelationId(CorrelationId):
|
||||
|
||||
def filter(self, record):
|
||||
guid = GuidMiddleware.get_guid() or '-'
|
||||
if MODE == 'development':
|
||||
guid = guid[:8]
|
||||
record.guid = guid
|
||||
return True
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
from copy import copy
|
||||
import json
|
||||
import json_log_formatter
|
||||
import logging
|
||||
import traceback
|
||||
import socket
|
||||
@@ -14,6 +15,15 @@ from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class JobLifeCycleFormatter(json_log_formatter.JSONFormatter):
|
||||
def json_record(self, message: str, extra: dict, record: logging.LogRecord):
|
||||
if 'time' not in extra:
|
||||
extra['time'] = now()
|
||||
if record.exc_info:
|
||||
extra['exc_info'] = self.formatException(record.exc_info)
|
||||
return extra
|
||||
|
||||
|
||||
class TimeFormatter(logging.Formatter):
|
||||
'''
|
||||
Custom log formatter used for inventory imports
|
||||
@@ -144,6 +154,9 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
|
||||
if kind == 'job_events' and raw_data.get('python_objects', {}).get('job_event'):
|
||||
job_event = raw_data['python_objects']['job_event']
|
||||
guid = job_event.event_data.pop('guid', None)
|
||||
if guid:
|
||||
data_for_log['guid'] = guid
|
||||
for field_object in job_event._meta.fields:
|
||||
|
||||
if not field_object.__class__ or not field_object.__class__.__name__:
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
|
||||
# Python
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -21,27 +22,31 @@ class RSysLogHandler(logging.handlers.SysLogHandler):
|
||||
super(RSysLogHandler, self)._connect_unixsocket(address)
|
||||
self.socket.setblocking(False)
|
||||
|
||||
def handleError(self, record):
|
||||
# for any number of reasons, rsyslogd has gone to lunch;
|
||||
# this usually means that it's just been restarted (due to
|
||||
# a configuration change) unfortunately, we can't log that
|
||||
# because...rsyslogd is down (and would just put us back down this
|
||||
# code path)
|
||||
# as a fallback, it makes the most sense to just write the
|
||||
# messages to sys.stderr (which will end up in supervisord logs,
|
||||
# and in containerized installs, cascaded down to pod logs)
|
||||
# because the alternative is blocking the
|
||||
# socket.send() in the Python process, which we definitely don't
|
||||
# want to do)
|
||||
msg = f'{record.asctime} ERROR rsyslogd was unresponsive: '
|
||||
exc = traceback.format_exc()
|
||||
try:
|
||||
msg += exc.splitlines()[-1]
|
||||
except Exception:
|
||||
msg += exc
|
||||
msg = '\n'.join([msg, record.msg, ''])
|
||||
sys.stderr.write(msg)
|
||||
|
||||
def emit(self, msg):
|
||||
if not settings.LOG_AGGREGATOR_ENABLED:
|
||||
return
|
||||
if not os.path.exists(settings.LOGGING['handlers']['external_logger']['address']):
|
||||
return
|
||||
try:
|
||||
return super(RSysLogHandler, self).emit(msg)
|
||||
except ConnectionRefusedError:
|
||||
# rsyslogd has gone to lunch; this generally means that it's just
|
||||
# been restarted (due to a configuration change)
|
||||
# unfortunately, we can't log that because...rsyslogd is down (and
|
||||
# would just us back ddown this code path)
|
||||
pass
|
||||
except BlockingIOError:
|
||||
# for <some reason>, rsyslogd is no longer reading from the domain socket, and
|
||||
# we're unable to write any more to it without blocking (we've seen this behavior
|
||||
# from time to time when logging is totally misconfigured;
|
||||
# in this scenario, it also makes more sense to just drop the messages,
|
||||
# because the alternative is blocking the socket.send() in the
|
||||
# Python process, which we definitely don't want to do)
|
||||
pass
|
||||
return super(RSysLogHandler, self).emit(msg)
|
||||
|
||||
|
||||
class SpecialInventoryHandler(logging.Handler):
|
||||
@@ -103,6 +108,15 @@ if settings.COLOR_LOGS is True:
|
||||
from logutils.colorize import ColorizingStreamHandler
|
||||
|
||||
class ColorHandler(ColorizingStreamHandler):
|
||||
def colorize(self, line, record):
|
||||
# comment out this method if you don't like the job_lifecycle
|
||||
# logs rendered with cyan text
|
||||
previous_level_map = self.level_map.copy()
|
||||
if record.name == "awx.analytics.job_lifecycle":
|
||||
self.level_map[logging.DEBUG] = (None, 'cyan', True)
|
||||
msg = super(ColorHandler, self).colorize(line, record)
|
||||
self.level_map = previous_level_map
|
||||
return msg
|
||||
|
||||
def format(self, record):
|
||||
message = logging.StreamHandler.format(self, record)
|
||||
|
||||
@@ -24,9 +24,7 @@
|
||||
tasks:
|
||||
|
||||
- name: delete project directory before update
|
||||
file:
|
||||
path: "{{project_path|quote}}"
|
||||
state: absent
|
||||
command: "rm -rf {{project_path}}/*" # volume mounted, cannot delete folder itself
|
||||
tags:
|
||||
- delete
|
||||
|
||||
@@ -57,6 +55,8 @@
|
||||
force: "{{scm_clean}}"
|
||||
username: "{{scm_username|default(omit)}}"
|
||||
password: "{{scm_password|default(omit)}}"
|
||||
# must be in_place because folder pre-existing, because it is mounted
|
||||
in_place: true
|
||||
environment:
|
||||
LC_ALL: 'en_US.UTF-8'
|
||||
register: svn_result
|
||||
@@ -206,6 +206,9 @@
|
||||
ANSIBLE_FORCE_COLOR: false
|
||||
ANSIBLE_COLLECTIONS_PATHS: "{{projects_root}}/.__awx_cache/{{local_path}}/stage/requirements_collections"
|
||||
GIT_SSH_COMMAND: "ssh -o StrictHostKeyChecking=no"
|
||||
# Put the local tmp directory in same volume as collection destination
|
||||
# otherwise, files cannot be moved accross volumes and will cause error
|
||||
ANSIBLE_LOCAL_TEMP: "{{projects_root}}/.__awx_cache/{{local_path}}/stage/tmp"
|
||||
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.9', '>=')"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import base64
|
||||
import os
|
||||
import re # noqa
|
||||
import sys
|
||||
@@ -58,11 +59,23 @@ DATABASES = {
|
||||
}
|
||||
}
|
||||
|
||||
# Whether or not the deployment is a K8S-based deployment
|
||||
# In K8S-based deployments, instances have zero capacity - all playbook
|
||||
# automation is intended to flow through defined Container Groups that
|
||||
# interface with some (or some set of) K8S api (which may or may not include
|
||||
# the K8S cluster where awx itself is running)
|
||||
IS_K8S = False
|
||||
|
||||
# TODO: remove this setting in favor of a default execution environment
|
||||
AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE = 'quay.io/ansible/awx-ee'
|
||||
|
||||
AWX_CONTAINER_GROUP_K8S_API_TIMEOUT = 10
|
||||
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES = 100
|
||||
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY = 5
|
||||
AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = 'default'
|
||||
AWX_CONTAINER_GROUP_DEFAULT_IMAGE = 'ansible/ansible-runner'
|
||||
AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = os.getenv('MY_POD_NAMESPACE', 'default')
|
||||
|
||||
# TODO: remove this setting in favor of a default execution environment
|
||||
AWX_CONTAINER_GROUP_DEFAULT_IMAGE = AWX_EXECUTION_ENVIRONMENT_DEFAULT_IMAGE
|
||||
|
||||
# Internationalization
|
||||
# https://docs.djangoproject.com/en/dev/topics/i18n/
|
||||
@@ -148,7 +161,10 @@ SCHEDULE_MAX_JOBS = 10
|
||||
SITE_ID = 1
|
||||
|
||||
# Make this unique, and don't share it with anybody.
|
||||
SECRET_KEY = 'p7z7g1ql4%6+(6nlebb6hdk7sd^&fnjpal308%n%+p^_e6vo1y'
|
||||
if os.path.exists('/etc/tower/SECRET_KEY'):
|
||||
SECRET_KEY = open('/etc/tower/SECRET_KEY', 'rb').read().strip()
|
||||
else:
|
||||
SECRET_KEY = base64.encodebytes(os.urandom(32)).decode().rstrip()
|
||||
|
||||
# Hosts/domain names that are valid for this site; required if DEBUG is False
|
||||
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
|
||||
@@ -169,6 +185,7 @@ REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
|
||||
PROXY_IP_ALLOWED_LIST = []
|
||||
|
||||
CUSTOM_VENV_PATHS = []
|
||||
DEFAULT_EXECUTION_ENVIRONMENT = None
|
||||
|
||||
# Note: This setting may be overridden by database settings.
|
||||
STDOUT_MAX_BYTES_DISPLAY = 1048576
|
||||
@@ -262,6 +279,7 @@ TEMPLATES = [
|
||||
'DIRS': [
|
||||
os.path.join(BASE_DIR, 'templates'),
|
||||
os.path.join(BASE_DIR, 'ui_next', 'build'),
|
||||
os.path.join(BASE_DIR, 'ui_next', 'public')
|
||||
],
|
||||
},
|
||||
]
|
||||
@@ -284,11 +302,13 @@ INSTALLED_APPS = [
|
||||
'polymorphic',
|
||||
'taggit',
|
||||
'social_django',
|
||||
'django_guid',
|
||||
'corsheaders',
|
||||
'awx.conf',
|
||||
'awx.main',
|
||||
'awx.api',
|
||||
'awx.ui',
|
||||
'awx.ui_next',
|
||||
'awx.sso',
|
||||
'solo'
|
||||
]
|
||||
@@ -344,6 +364,9 @@ AUTHENTICATION_BACKENDS = (
|
||||
'social_core.backends.github.GithubOAuth2',
|
||||
'social_core.backends.github.GithubOrganizationOAuth2',
|
||||
'social_core.backends.github.GithubTeamOAuth2',
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseOAuth2',
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseOrganizationOAuth2',
|
||||
'social_core.backends.github_enterprise.GithubEnterpriseTeamOAuth2',
|
||||
'social_core.backends.azuread.AzureADOAuth2',
|
||||
'awx.sso.backends.SAMLAuth',
|
||||
'django.contrib.auth.backends.ModelBackend',
|
||||
@@ -520,6 +543,20 @@ SOCIAL_AUTH_GITHUB_TEAM_SECRET = ''
|
||||
SOCIAL_AUTH_GITHUB_TEAM_ID = ''
|
||||
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org']
|
||||
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_SCOPE = ['user:email', 'read:org']
|
||||
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SCOPE = ['user:email', 'read:org']
|
||||
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID = ''
|
||||
SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SCOPE = ['user:email', 'read:org']
|
||||
|
||||
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = ''
|
||||
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = ''
|
||||
|
||||
@@ -655,7 +692,7 @@ AD_HOC_COMMANDS = [
|
||||
'win_user',
|
||||
]
|
||||
|
||||
INV_ENV_VARIABLE_BLOCKED = ("HOME", "USER", "_", "TERM")
|
||||
INV_ENV_VARIABLE_BLOCKED = ("HOME", "USER", "_", "TERM", "PATH")
|
||||
|
||||
# ----------------
|
||||
# -- Amazon EC2 --
|
||||
@@ -718,10 +755,10 @@ TOWER_INSTANCE_ID_VAR = 'remote_tower_id'
|
||||
# ---------------------
|
||||
# ----- Foreman -----
|
||||
# ---------------------
|
||||
SATELLITE6_ENABLED_VAR = 'foreman.enabled'
|
||||
SATELLITE6_ENABLED_VAR = 'foreman_enabled'
|
||||
SATELLITE6_ENABLED_VALUE = 'True'
|
||||
SATELLITE6_EXCLUDE_EMPTY_GROUPS = True
|
||||
SATELLITE6_INSTANCE_ID_VAR = 'foreman.id'
|
||||
SATELLITE6_INSTANCE_ID_VAR = 'foreman_id'
|
||||
# SATELLITE6_GROUP_PREFIX and SATELLITE6_GROUP_PATTERNS defined in source vars
|
||||
|
||||
# ---------------------
|
||||
@@ -759,6 +796,8 @@ TOWER_URL_BASE = "https://towerhost"
|
||||
|
||||
INSIGHTS_URL_BASE = "https://example.org"
|
||||
INSIGHTS_AGENT_MIME = 'application/example'
|
||||
# See https://github.com/ansible/awx-facts-playbooks
|
||||
INSIGHTS_SYSTEM_ID_FILE='/etc/redhat-access-insights/machine-id'
|
||||
|
||||
TOWER_SETTINGS_MANIFEST = {}
|
||||
|
||||
@@ -770,6 +809,7 @@ LOG_AGGREGATOR_LEVEL = 'INFO'
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB = 1
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH = '/var/lib/awx'
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG = False
|
||||
LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE = '/var/log/tower/rsyslog.err'
|
||||
|
||||
# The number of retry attempts for websocket session establishment
|
||||
# If you're encountering issues establishing websockets in clustered Tower,
|
||||
@@ -808,11 +848,14 @@ LOGGING = {
|
||||
},
|
||||
'dynamic_level_filter': {
|
||||
'()': 'awx.main.utils.filters.DynamicLevelFilter'
|
||||
}
|
||||
},
|
||||
'guid': {
|
||||
'()': 'awx.main.utils.filters.DefaultCorrelationId'
|
||||
},
|
||||
},
|
||||
'formatters': {
|
||||
'simple': {
|
||||
'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s',
|
||||
'format': '%(asctime)s %(levelname)-8s [%(guid)s] %(name)s %(message)s',
|
||||
},
|
||||
'json': {
|
||||
'()': 'awx.main.utils.formatters.LogstashFormatter'
|
||||
@@ -822,14 +865,17 @@ LOGGING = {
|
||||
'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'
|
||||
},
|
||||
'dispatcher': {
|
||||
'format': '%(asctime)s %(levelname)-8s %(name)s PID:%(process)d %(message)s',
|
||||
'format': '%(asctime)s %(levelname)-8s [%(guid)s] %(name)s PID:%(process)d %(message)s',
|
||||
},
|
||||
'job_lifecycle': {
|
||||
'()': 'awx.main.utils.formatters.JobLifeCycleFormatter',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'()': 'logging.StreamHandler',
|
||||
'level': 'DEBUG',
|
||||
'filters': ['require_debug_true_or_test'],
|
||||
'filters': ['require_debug_true_or_test', 'guid'],
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'null': {
|
||||
@@ -849,42 +895,34 @@ LOGGING = {
|
||||
'class': 'awx.main.utils.handlers.RSysLogHandler',
|
||||
'formatter': 'json',
|
||||
'address': '/var/run/awx-rsyslog/rsyslog.sock',
|
||||
'filters': ['external_log_enabled', 'dynamic_level_filter'],
|
||||
'filters': ['external_log_enabled', 'dynamic_level_filter', 'guid'],
|
||||
},
|
||||
'tower_warnings': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter'],
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'callback_receiver': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter'],
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'dispatcher': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter'],
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'dispatcher.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'dispatcher',
|
||||
},
|
||||
'wsbroadcast': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter'],
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'wsbroadcast.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'celery.beat': {
|
||||
@@ -898,48 +936,44 @@ LOGGING = {
|
||||
},
|
||||
'task_system': {
|
||||
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter'],
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false', 'dynamic_level_filter', 'guid'],
|
||||
'filename': os.path.join(LOG_ROOT, 'task_system.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'management_playbooks': {
|
||||
'level': 'DEBUG',
|
||||
'class':'logging.handlers.RotatingFileHandler',
|
||||
'class':'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'system_tracking_migrations': {
|
||||
'level': 'WARNING',
|
||||
'class':'logging.handlers.RotatingFileHandler',
|
||||
'class':'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'rbac_migrations': {
|
||||
'level': 'WARNING',
|
||||
'class':'logging.handlers.RotatingFileHandler',
|
||||
'class':'logging.handlers.WatchedFileHandler',
|
||||
'filters': ['require_debug_false'],
|
||||
'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'isolated_manager': {
|
||||
'level': 'WARNING',
|
||||
'class':'logging.handlers.RotatingFileHandler',
|
||||
'class':'logging.handlers.WatchedFileHandler',
|
||||
'filename': os.path.join(LOG_ROOT, 'isolated_manager.log'),
|
||||
'maxBytes': 1024 * 1024 * 5, # 5 MB
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'job_lifecycle': {
|
||||
'level': 'DEBUG',
|
||||
'class':'logging.handlers.WatchedFileHandler',
|
||||
'filename': os.path.join(LOG_ROOT, 'job_lifecycle.log'),
|
||||
'formatter': 'job_lifecycle',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'django': {
|
||||
@@ -1029,6 +1063,16 @@ LOGGING = {
|
||||
'level': 'INFO',
|
||||
'propagate': False
|
||||
},
|
||||
'awx.analytics.performance': {
|
||||
'handlers': ['console', 'file', 'tower_warnings', 'external_logger'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'awx.analytics.job_lifecycle': {
|
||||
'handlers': ['console', 'job_lifecycle'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'django_auth_ldap': {
|
||||
'handlers': ['console', 'file', 'tower_warnings'],
|
||||
'level': 'DEBUG',
|
||||
@@ -1078,6 +1122,7 @@ AWX_CALLBACK_PROFILE = False
|
||||
AWX_CLEANUP_PATHS = True
|
||||
|
||||
MIDDLEWARE = [
|
||||
'django_guid.middleware.GuidMiddleware',
|
||||
'awx.main.middleware.TimingMiddleware',
|
||||
'django.contrib.sessions.middleware.SessionMiddleware',
|
||||
'awx.main.middleware.MigrationRanCheckMiddleware',
|
||||
@@ -1118,3 +1163,7 @@ BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10
|
||||
|
||||
# How often websocket process will generate stats
|
||||
BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
|
||||
|
||||
DJANGO_GUID = {
|
||||
'GUID_HEADER_NAME': 'X-API-Request-Id',
|
||||
}
|
||||
|
||||
@@ -21,13 +21,6 @@ from split_settings.tools import optional, include
|
||||
# Load default settings.
|
||||
from .defaults import * # NOQA
|
||||
|
||||
if "pytest" in sys.modules:
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
|
||||
'LOCATION': 'unique-{}'.format(str(uuid.uuid4())),
|
||||
},
|
||||
}
|
||||
|
||||
# awx-manage shell_plus --notebook
|
||||
NOTEBOOK_ARGUMENTS = [
|
||||
@@ -53,6 +46,10 @@ LOGGING['loggers']['awx.isolated.manager.playbooks']['propagate'] = True # noqa
|
||||
|
||||
# celery is annoyingly loud when docker containers start
|
||||
LOGGING['loggers'].pop('celery', None) # noqa
|
||||
# avoid awx.main.dispatch WARNING-level scaling worker up/down messages
|
||||
LOGGING['loggers']['awx.main.dispatch']['level'] = 'ERROR' # noqa
|
||||
# suppress the spamminess of the awx.main.scheduler and .tasks loggers
|
||||
LOGGING['loggers']['awx']['level'] = 'INFO' # noqa
|
||||
|
||||
ALLOWED_HOSTS = ['*']
|
||||
|
||||
@@ -158,11 +155,35 @@ AWX_VENV_PATH = os.path.join(BASE_VENV_PATH, "awx")
|
||||
# default settings for development. If not present, we can still run using
|
||||
# only the defaults.
|
||||
try:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
if os.getenv('AWX_KUBE_DEVEL', False):
|
||||
include(optional('minikube.py'), scope=locals())
|
||||
else:
|
||||
include(optional('local_*.py'), scope=locals())
|
||||
except ImportError:
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# Use SQLite for unit tests instead of PostgreSQL. If the lines below are
|
||||
# commented out, Django will create the test_awx-dev database in PostgreSQL to
|
||||
# run unit tests.
|
||||
if "pytest" in sys.modules:
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
|
||||
'LOCATION': 'unique-{}'.format(str(uuid.uuid4())),
|
||||
},
|
||||
}
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), # noqa
|
||||
'TEST': {
|
||||
# Test database cannot be :memory: for inventory tests.
|
||||
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'), # noqa
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CELERYBEAT_SCHEDULE.update({ # noqa
|
||||
'isolated_heartbeat': {
|
||||
@@ -174,15 +195,6 @@ CELERYBEAT_SCHEDULE.update({ # noqa
|
||||
|
||||
CLUSTER_HOST_ID = socket.gethostname()
|
||||
|
||||
|
||||
if 'Docker Desktop' in os.getenv('OS', ''):
|
||||
os.environ['SDB_NOTIFY_HOST'] = 'docker.for.mac.host.internal'
|
||||
else:
|
||||
try:
|
||||
os.environ['SDB_NOTIFY_HOST'] = os.popen('ip route').read().split(' ')[2]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
AWX_CALLBACK_PROFILE = True
|
||||
|
||||
if 'sqlite3' not in DATABASES['default']['ENGINE']: # noqa
|
||||
|
||||
4
awx/settings/minikube.py
Normal file
4
awx/settings/minikube.py
Normal file
@@ -0,0 +1,4 @@
|
||||
BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
|
||||
BROADCAST_WEBSOCKET_PORT = 8013
|
||||
BROADCAST_WEBSOCKET_VERIFY_CERT = False
|
||||
BROADCAST_WEBSOCKET_PROTOCOL = 'http'
|
||||
292
awx/sso/conf.py
292
awx/sso/conf.py
@@ -842,6 +842,298 @@ register(
|
||||
placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# GITHUB ENTERPRISE OAUTH2 AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_CALLBACK_URL',
|
||||
field_class=fields.CharField,
|
||||
read_only=True,
|
||||
default=SocialAuthCallbackURL('github-enterprise'),
|
||||
label=_('GitHub Enterprise OAuth2 Callback URL'),
|
||||
help_text=_('Provide this URL as the callback URL for your application as part '
|
||||
'of your registration process. Refer to the Ansible Tower '
|
||||
'documentation for more detail.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
depends_on=['TOWER_URL_BASE'],
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise URL'),
|
||||
help_text=_('The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise '
|
||||
'documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise API URL'),
|
||||
help_text=_('The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github '
|
||||
'Enterprise documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise OAuth2 Key'),
|
||||
help_text=_('The OAuth2 key (Client ID) from your GitHub Enterprise developer application.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise OAuth2 Secret'),
|
||||
help_text=_('The OAuth2 secret (Client Secret) from your GitHub Enterprise developer application.'),
|
||||
category=_('GitHub OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORGANIZATION_MAP',
|
||||
field_class=SocialOrganizationMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise OAuth2 Organization Map'),
|
||||
help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_MAP',
|
||||
field_class=SocialTeamMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise OAuth2 Team Map'),
|
||||
help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise',
|
||||
placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# GITHUB ENTERPRISE ORG OAUTH2 AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_CALLBACK_URL',
|
||||
field_class=fields.CharField,
|
||||
read_only=True,
|
||||
default=SocialAuthCallbackURL('github-enterprise-org'),
|
||||
label=_('GitHub Enterprise Organization OAuth2 Callback URL'),
|
||||
help_text=_('Provide this URL as the callback URL for your application as part '
|
||||
'of your registration process. Refer to the Ansible Tower '
|
||||
'documentation for more detail.'),
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
depends_on=['TOWER_URL_BASE'],
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Organization URL'),
|
||||
help_text=_('The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise '
|
||||
'documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Organization API URL'),
|
||||
help_text=_('The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github '
|
||||
'Enterprise documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_KEY',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Organization OAuth2 Key'),
|
||||
help_text=_('The OAuth2 key (Client ID) from your GitHub Enterprise organization application.'),
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_SECRET',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Organization OAuth2 Secret'),
|
||||
help_text=_('The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application.'),
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Organization Name'),
|
||||
help_text=_('The name of your GitHub Enterprise organization, as used in your '
|
||||
'organization\'s URL: https://github.com/<yourorg>/.'),
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_ORGANIZATION_MAP',
|
||||
field_class=SocialOrganizationMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise Organization OAuth2 Organization Map'),
|
||||
help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_TEAM_MAP',
|
||||
field_class=SocialTeamMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise Organization OAuth2 Team Map'),
|
||||
help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise Organization OAuth2'),
|
||||
category_slug='github-enterprise-org',
|
||||
placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# GITHUB ENTERPRISE TEAM OAUTH2 AUTHENTICATION SETTINGS
|
||||
###############################################################################
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_CALLBACK_URL',
|
||||
field_class=fields.CharField,
|
||||
read_only=True,
|
||||
default=SocialAuthCallbackURL('github-enterprise-team'),
|
||||
label=_('GitHub Enterprise Team OAuth2 Callback URL'),
|
||||
help_text=_('Create an organization-owned application at '
|
||||
'https://github.com/organizations/<yourorg>/settings/applications '
|
||||
'and obtain an OAuth2 key (Client ID) and secret (Client Secret). '
|
||||
'Provide this URL as the callback URL for your application.'),
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
depends_on=['TOWER_URL_BASE'],
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Team URL'),
|
||||
help_text=_('The URL for your Github Enterprise instance, e.g.: http(s)://hostname/. Refer to Github Enterprise '
|
||||
'documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Team API URL'),
|
||||
help_text=_('The API URL for your GitHub Enterprise instance, e.g.: http(s)://hostname/api/v3/. Refer to Github '
|
||||
'Enterprise documentation for more details.'),
|
||||
category=_('GitHub Enterprise OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_KEY',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Team OAuth2 Key'),
|
||||
help_text=_('The OAuth2 key (Client ID) from your GitHub Enterprise organization application.'),
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_SECRET',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Team OAuth2 Secret'),
|
||||
help_text=_('The OAuth2 secret (Client Secret) from your GitHub Enterprise organization application.'),
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID',
|
||||
field_class=fields.CharField,
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('GitHub Enterprise Team ID'),
|
||||
help_text=_('Find the numeric team ID using the Github Enterprise API: '
|
||||
'http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/.'),
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ORGANIZATION_MAP',
|
||||
field_class=SocialOrganizationMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise Team OAuth2 Organization Map'),
|
||||
help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
register(
|
||||
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_TEAM_MAP',
|
||||
field_class=SocialTeamMapField,
|
||||
allow_null=True,
|
||||
default=None,
|
||||
label=_('GitHub Enterprise Team OAuth2 Team Map'),
|
||||
help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT,
|
||||
category=_('GitHub Enterprise Team OAuth2'),
|
||||
category_slug='github-enterprise-team',
|
||||
placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER,
|
||||
)
|
||||
|
||||
###############################################################################
|
||||
# MICROSOFT AZURE ACTIVE DIRECTORY SETTINGS
|
||||
###############################################################################
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user