mirror of
https://github.com/ansible/awx.git
synced 2026-02-04 19:18:13 -03:30
Compare commits
919 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5bea2b557 | ||
|
|
6d0c47fdd0 | ||
|
|
54b4acbdfc | ||
|
|
a41766090e | ||
|
|
34fa897dda | ||
|
|
32df114e41 | ||
|
|
018f235a64 | ||
|
|
7e77235d5e | ||
|
|
139d8f0ae2 | ||
|
|
7691365aea | ||
|
|
59f61517d4 | ||
|
|
fa670e2d7f | ||
|
|
a87a044d64 | ||
|
|
381ade1148 | ||
|
|
864a30e3d4 | ||
|
|
5f42db67e6 | ||
|
|
ddf4f288d4 | ||
|
|
e75bc8bc1e | ||
|
|
bb533287b8 | ||
|
|
9979fc659e | ||
|
|
9e5babc093 | ||
|
|
c71e2524ed | ||
|
|
48b4c62186 | ||
|
|
853730acb9 | ||
|
|
f1448fced1 | ||
|
|
7697b6a69b | ||
|
|
22a491c32c | ||
|
|
cbd9dce940 | ||
|
|
a4fdcc1cca | ||
|
|
df95439008 | ||
|
|
acd834df8b | ||
|
|
587f0ecf98 | ||
|
|
5a2091f7bf | ||
|
|
fa7423819a | ||
|
|
fde8af9f11 | ||
|
|
209e7e27b1 | ||
|
|
6c7d29a982 | ||
|
|
282ba36839 | ||
|
|
b727d2c3b3 | ||
|
|
7fc3d5c7c7 | ||
|
|
4e055f46c4 | ||
|
|
f595985b7c | ||
|
|
ea232315bf | ||
|
|
ee251812b5 | ||
|
|
00ba1ea569 | ||
|
|
d91af132c1 | ||
|
|
94e5795dfc | ||
|
|
c4688d6298 | ||
|
|
6763badea3 | ||
|
|
2c4ad6ef0f | ||
|
|
37f44d7214 | ||
|
|
98bbc836a6 | ||
|
|
b59aff50dc | ||
|
|
a70b0c1ddc | ||
|
|
db72c9d5b8 | ||
|
|
4e0d19914f | ||
|
|
6f2307f50e | ||
|
|
dbc2215bb6 | ||
|
|
7c08b29827 | ||
|
|
407194d320 | ||
|
|
853af295d9 | ||
|
|
4738c8333a | ||
|
|
13dcea0afd | ||
|
|
bc2d339981 | ||
|
|
bef9ef10bb | ||
|
|
8645fe5c57 | ||
|
|
b93aa20362 | ||
|
|
4bbfc8a946 | ||
|
|
2c8eef413b | ||
|
|
d5bad1a533 | ||
|
|
f6c0effcb2 | ||
|
|
31a086b11a | ||
|
|
d94f766fcb | ||
|
|
a7113549eb | ||
|
|
bfd811f408 | ||
|
|
030704a9e1 | ||
|
|
c312d9bce3 | ||
|
|
aadcc217eb | ||
|
|
345c1c11e9 | ||
|
|
2c3a7fafc5 | ||
|
|
dbcd32a1d9 | ||
|
|
d45e258a78 | ||
|
|
d16b69a102 | ||
|
|
8b4efbc973 | ||
|
|
4cb061e7db | ||
|
|
31db6a1447 | ||
|
|
ad9d5904d8 | ||
|
|
b837d549ff | ||
|
|
9e22865d2e | ||
|
|
ee3e3e1516 | ||
|
|
4a8f6e45f8 | ||
|
|
6a317cca1b | ||
|
|
d67af79451 | ||
|
|
fe77fda7b2 | ||
|
|
f613b76baa | ||
|
|
054cbe69d7 | ||
|
|
87e9dcb6d7 | ||
|
|
c8829b057e | ||
|
|
a0b376a6ca | ||
|
|
d675207f99 | ||
|
|
20504042c9 | ||
|
|
0e87e97820 | ||
|
|
1f154742df | ||
|
|
85fc81aab1 | ||
|
|
5cfeeb3e87 | ||
|
|
a8c07b06d8 | ||
|
|
53c5feaf6b | ||
|
|
6f57aaa8f5 | ||
|
|
bea74a401d | ||
|
|
54e85813c8 | ||
|
|
b69ed08fe5 | ||
|
|
de25408a23 | ||
|
|
b17f0a188b | ||
|
|
fb860d76ce | ||
|
|
451f20ce0f | ||
|
|
c1dc0c7b86 | ||
|
|
d65ea2a3d5 | ||
|
|
8827ae7554 | ||
|
|
4915262af1 | ||
|
|
d43c91e1a5 | ||
|
|
b470ca32af | ||
|
|
793777bec7 | ||
|
|
6dc4a4508d | ||
|
|
cf09a4220d | ||
|
|
659c3b64de | ||
|
|
37ad690d09 | ||
|
|
7845ec7e01 | ||
|
|
a15bcf1d55 | ||
|
|
7b3fb2c2a8 | ||
|
|
6df47c8449 | ||
|
|
cae42653bf | ||
|
|
da46a29f40 | ||
|
|
0eb465531c | ||
|
|
d0fe0ed796 | ||
|
|
ceafa14c9d | ||
|
|
08e1454098 | ||
|
|
776b661fb3 | ||
|
|
af6ccdbde5 | ||
|
|
559ab3564b | ||
|
|
208ef0ce25 | ||
|
|
c3d9aa54d8 | ||
|
|
66efe7198a | ||
|
|
adf930ee42 | ||
|
|
892410477a | ||
|
|
0d4f653794 | ||
|
|
8de8f6dce2 | ||
|
|
fc9064e27f | ||
|
|
7de350dc3e | ||
|
|
d4bdaad4d8 | ||
|
|
a9b2ffa3e9 | ||
|
|
1b8d409043 | ||
|
|
da2bccf5a8 | ||
|
|
a2f083bd8e | ||
|
|
4d641b6cf5 | ||
|
|
439c3f0c23 | ||
|
|
946bbe3560 | ||
|
|
20f054d600 | ||
|
|
918d5b3565 | ||
|
|
158314af50 | ||
|
|
4754819a09 | ||
|
|
78fc23138a | ||
|
|
014534bfa5 | ||
|
|
2502e7c7d8 | ||
|
|
fb237e3834 | ||
|
|
e4646ae611 | ||
|
|
7dc77546f4 | ||
|
|
f5f85666c8 | ||
|
|
47a061eb39 | ||
|
|
c760577855 | ||
|
|
814ceb0d06 | ||
|
|
f178c84728 | ||
|
|
c0f71801f6 | ||
|
|
4e8e1398d7 | ||
|
|
3d6a8fd4ef | ||
|
|
e873bb1304 | ||
|
|
672f1eb745 | ||
|
|
199507c6f1 | ||
|
|
a176c04c14 | ||
|
|
e3af658f82 | ||
|
|
e8a3b96482 | ||
|
|
c015e8413e | ||
|
|
390c2d8907 | ||
|
|
97605c5f19 | ||
|
|
818c326160 | ||
|
|
c98727d83e | ||
|
|
a138a92e67 | ||
|
|
7aed19ffda | ||
|
|
3bb559dd09 | ||
|
|
389a729b75 | ||
|
|
2f3c9122fd | ||
|
|
733478ee19 | ||
|
|
41c6337fc1 | ||
|
|
7446da1c2f | ||
|
|
c79fca5ceb | ||
|
|
dc5f43927a | ||
|
|
35a5a81e19 | ||
|
|
9dcc11d54c | ||
|
|
74ce21fa54 | ||
|
|
eb93660b36 | ||
|
|
f50e597548 | ||
|
|
817c3b36b9 | ||
|
|
1859a6ae69 | ||
|
|
0645d342dd | ||
|
|
61ec03e540 | ||
|
|
09f0a366bf | ||
|
|
778961d31e | ||
|
|
f962c88df3 | ||
|
|
8db3ffe719 | ||
|
|
cc5d4dd119 | ||
|
|
86204cf23b | ||
|
|
468949b899 | ||
|
|
f1d9966224 | ||
|
|
b022b50966 | ||
|
|
e2f4213839 | ||
|
|
ae1235b223 | ||
|
|
c061f59f1c | ||
|
|
3edaaebba2 | ||
|
|
7cdf1c7f96 | ||
|
|
d558204192 | ||
|
|
d06ce8f911 | ||
|
|
4b6f7e0ebe | ||
|
|
370c567be1 | ||
|
|
9be64f3de5 | ||
|
|
30500e5a95 | ||
|
|
bb323c5710 | ||
|
|
7571df49d5 | ||
|
|
1559c21033 | ||
|
|
d9b81731e9 | ||
|
|
2034cca3a9 | ||
|
|
0b5e59d9cb | ||
|
|
f48b2d1ae5 | ||
|
|
b44bb98c7e | ||
|
|
8cafdf0400 | ||
|
|
3f566c8737 | ||
|
|
c8021a25bf | ||
|
|
934646a0f6 | ||
|
|
9bb97dd658 | ||
|
|
7150f5edc6 | ||
|
|
93da15c0ee | ||
|
|
ab593bda45 | ||
|
|
065bd3ae2a | ||
|
|
8ff7260bc6 | ||
|
|
a635445082 | ||
|
|
949e7efab1 | ||
|
|
615f09226f | ||
|
|
d903c524f5 | ||
|
|
393d9c39c6 | ||
|
|
dfab342bb4 | ||
|
|
12843eccf7 | ||
|
|
dd9160135d | ||
|
|
ad96a92fa7 | ||
|
|
ca8085fe7e | ||
|
|
b076cb00a9 | ||
|
|
ee9eac15dc | ||
|
|
3f2f7b75a6 | ||
|
|
b71645f3b1 | ||
|
|
eb300252b8 | ||
|
|
2e2cd7f2de | ||
|
|
727278aaa3 | ||
|
|
81825ab755 | ||
|
|
7f2a1b6b03 | ||
|
|
1b56d94d30 | ||
|
|
e1e32c971c | ||
|
|
a4a2fabc01 | ||
|
|
b7b7bfa520 | ||
|
|
887604317e | ||
|
|
d35d8b6ed7 | ||
|
|
ec28eff7f7 | ||
|
|
a5d17539c6 | ||
|
|
a49d894cf1 | ||
|
|
b3466d4449 | ||
|
|
237adc6150 | ||
|
|
09b028ee3c | ||
|
|
fb83bfbc31 | ||
|
|
88e406e121 | ||
|
|
59d0bcc63f | ||
|
|
3fb3125bc3 | ||
|
|
d70c6b9474 | ||
|
|
5549516a37 | ||
|
|
14ac91a8a2 | ||
|
|
d5753818a0 | ||
|
|
33010a2e02 | ||
|
|
14454cc670 | ||
|
|
7ab2bca16e | ||
|
|
f0f655f2c3 | ||
|
|
4286d411a7 | ||
|
|
06ad32ed8e | ||
|
|
1ebff23232 | ||
|
|
700de14c76 | ||
|
|
8605e339df | ||
|
|
e50954ce40 | ||
|
|
7caca60308 | ||
|
|
f4e13af056 | ||
|
|
decdb56288 | ||
|
|
bcd4c2e8ef | ||
|
|
d663066ac5 | ||
|
|
1ceebb275c | ||
|
|
f78ba282a6 | ||
|
|
81d88df757 | ||
|
|
0bdb01a9e9 | ||
|
|
cd91fbf59f | ||
|
|
f240e640e5 | ||
|
|
46f489185e | ||
|
|
dbb80fb7e3 | ||
|
|
cb3d357ce1 | ||
|
|
dfa4db9266 | ||
|
|
6906a88dc9 | ||
|
|
1f7be9258c | ||
|
|
dcce024424 | ||
|
|
79d7179c72 | ||
|
|
4d80f886e0 | ||
|
|
5179333185 | ||
|
|
362e11aaf2 | ||
|
|
decff01fa4 | ||
|
|
a14cc8199d | ||
|
|
b6436826f6 | ||
|
|
2109b5039e | ||
|
|
b6f9b73418 | ||
|
|
40a8a3cb2f | ||
|
|
19f80c0a26 | ||
|
|
5d1bb2125e | ||
|
|
99c512bcef | ||
|
|
ed0329f5db | ||
|
|
dd53345397 | ||
|
|
f66cde51d7 | ||
|
|
d1c31687fc | ||
|
|
38424487f1 | ||
|
|
b0565e9937 | ||
|
|
44d85b589c | ||
|
|
46f816e7a4 | ||
|
|
54b32c10f0 | ||
|
|
20202054cc | ||
|
|
e84e2962d0 | ||
|
|
2259047527 | ||
|
|
f429ef6ca7 | ||
|
|
4b637c1319 | ||
|
|
4c41f6b018 | ||
|
|
3ae72219b4 | ||
|
|
402c29dc52 | ||
|
|
8eb4a9a2a0 | ||
|
|
36f3b46726 | ||
|
|
55c6a319dc | ||
|
|
56b6a07f6e | ||
|
|
519fd22bec | ||
|
|
2e5306ae8e | ||
|
|
068e6acbd5 | ||
|
|
f9a23a5645 | ||
|
|
40150a2be8 | ||
|
|
b79aa5b1ed | ||
|
|
b3aeb962ce | ||
|
|
2300b8fddf | ||
|
|
3a3284b5df | ||
|
|
2359004cc1 | ||
|
|
694d7e98e7 | ||
|
|
8c9c02c975 | ||
|
|
8a902debd5 | ||
|
|
6dcaa09dfb | ||
|
|
21fd6af0f9 | ||
|
|
eeae1d59d4 | ||
|
|
a252d0ae33 | ||
|
|
48971411cc | ||
|
|
083c05f12a | ||
|
|
b558397b67 | ||
|
|
904c6001e9 | ||
|
|
818e11dfdc | ||
|
|
7fc13a0569 | ||
|
|
92c693f14e | ||
|
|
f2417f0ed2 | ||
|
|
8f22188116 | ||
|
|
05502c0af8 | ||
|
|
957ce59bf7 | ||
|
|
cc4cc37d46 | ||
|
|
1e254c804c | ||
|
|
1b44bebed3 | ||
|
|
a4cf55bdba | ||
|
|
c333d0e82f | ||
|
|
b093c89a84 | ||
|
|
f98493aa61 | ||
|
|
c36d2b0485 | ||
|
|
8ddb604bf1 | ||
|
|
cd9dd43be7 | ||
|
|
82323390a7 | ||
|
|
4c5ac1d3da | ||
|
|
9c06370e33 | ||
|
|
449b95d1eb | ||
|
|
1712540c8e | ||
|
|
7cf639d8eb | ||
|
|
dbfcc40d7c | ||
|
|
73d2c92ae3 | ||
|
|
24a4242147 | ||
|
|
92ce85b688 | ||
|
|
9531f8377a | ||
|
|
15a16b3dd1 | ||
|
|
a37e7bf147 | ||
|
|
a2fcd2f97a | ||
|
|
c394ffdd19 | ||
|
|
69102cf265 | ||
|
|
a188798543 | ||
|
|
60108ebd10 | ||
|
|
8c7c00451a | ||
|
|
7a1ed406da | ||
|
|
f916ffe1e9 | ||
|
|
901dbd697e | ||
|
|
d8b4a9825e | ||
|
|
6db66c5f81 | ||
|
|
82ad7dcf40 | ||
|
|
93500f9fea | ||
|
|
9ba70c151d | ||
|
|
46dc61253f | ||
|
|
6cb2cd18b0 | ||
|
|
5d1dd8ec41 | ||
|
|
9f69daf787 | ||
|
|
16ece5de7e | ||
|
|
ab0e9265c5 | ||
|
|
04cbbbccfa | ||
|
|
d1cacf64de | ||
|
|
5385eb0fb3 | ||
|
|
7d7503279d | ||
|
|
d860d1d91b | ||
|
|
3a17c45b64 | ||
|
|
bca68bcdf1 | ||
|
|
c32f234ebb | ||
|
|
5cb3d3b078 | ||
|
|
5199cc5246 | ||
|
|
387e877485 | ||
|
|
d54c5934ff | ||
|
|
2fa5116197 | ||
|
|
527755d986 | ||
|
|
f9c0b97c53 | ||
|
|
65655f84de | ||
|
|
9aa3d5584a | ||
|
|
266e31d71a | ||
|
|
a1bbe75aed | ||
|
|
695f1cf892 | ||
|
|
0ab103d8c4 | ||
|
|
9ac1c0f6c2 | ||
|
|
2e168d8177 | ||
|
|
d4f7bfef18 | ||
|
|
985a8d499d | ||
|
|
e3b52f0169 | ||
|
|
f69f600cff | ||
|
|
74cd23be5c | ||
|
|
209747d88e | ||
|
|
d91da39f81 | ||
|
|
5cd029df96 | ||
|
|
5a93a519f6 | ||
|
|
5f5cd960d5 | ||
|
|
42701f32fe | ||
|
|
30d4df788f | ||
|
|
1bcd71a8ac | ||
|
|
43be90f051 | ||
|
|
bb1922cdbb | ||
|
|
403f545071 | ||
|
|
a06a2a883c | ||
|
|
2529fdcfd7 | ||
|
|
19dff9c2d1 | ||
|
|
2a6cf032f8 | ||
|
|
6119b33a50 | ||
|
|
aacf9653c5 | ||
|
|
325f5250db | ||
|
|
b14518c1e5 | ||
|
|
6440e3cb55 | ||
|
|
b5f6aac3aa | ||
|
|
6e5e1c8fff | ||
|
|
bf42c63c12 | ||
|
|
df24cb692b | ||
|
|
0d825a744b | ||
|
|
5e48bf091b | ||
|
|
1294cec92c | ||
|
|
dae12ee1b8 | ||
|
|
b091f6cf79 | ||
|
|
fe564c5fad | ||
|
|
eb3bc84461 | ||
|
|
6aa2997dce | ||
|
|
dd00bbba42 | ||
|
|
fe6bac6d9e | ||
|
|
87abbd4b10 | ||
|
|
fb04e5d9f6 | ||
|
|
478e2cb28d | ||
|
|
2ac304d289 | ||
|
|
3e5851f3af | ||
|
|
adb1b12074 | ||
|
|
8fae20c48a | ||
|
|
ec364cc60e | ||
|
|
1cfd51764e | ||
|
|
0b8fedfd04 | ||
|
|
72a8173462 | ||
|
|
873b1fbe07 | ||
|
|
1f36e84b45 | ||
|
|
8c4bff2b86 | ||
|
|
14f636af84 | ||
|
|
0057c8daf6 | ||
|
|
d8a28b3c06 | ||
|
|
40c2b700fe | ||
|
|
71d548f9e5 | ||
|
|
dd98963f86 | ||
|
|
4b467dfd8d | ||
|
|
456b56778e | ||
|
|
5b3cb20f92 | ||
|
|
d7086a3c88 | ||
|
|
21e7ab078c | ||
|
|
946ca0b3b8 | ||
|
|
b831dbd608 | ||
|
|
943e455f9d | ||
|
|
53bc88abe2 | ||
|
|
3b4d95633e | ||
|
|
93c329d9d5 | ||
|
|
f4c53aaf22 | ||
|
|
333ef76cbd | ||
|
|
fc0b58fd04 | ||
|
|
bef0a8b23a | ||
|
|
a5f33456b6 | ||
|
|
21fb395912 | ||
|
|
44255f378d | ||
|
|
71a6d48612 | ||
|
|
b7e5f5d1e1 | ||
|
|
b6b167627c | ||
|
|
20f5b255c9 | ||
|
|
3bcf46555d | ||
|
|
94703ccf84 | ||
|
|
6cdea1909d | ||
|
|
f133580172 | ||
|
|
4b90a7fcd1 | ||
|
|
95bfedad5b | ||
|
|
1081f2d8e9 | ||
|
|
c4ab54d7f3 | ||
|
|
bcefcd8cf8 | ||
|
|
0bd057529d | ||
|
|
a82c03e2e2 | ||
|
|
447ac77535 | ||
|
|
72d0928f1b | ||
|
|
6d727d4bc4 | ||
|
|
6040e44d9d | ||
|
|
b99ce5cd62 | ||
|
|
ba8a90c55f | ||
|
|
7ee2172517 | ||
|
|
07f49f5925 | ||
|
|
376993077a | ||
|
|
48f586bac4 | ||
|
|
16dab57c63 | ||
|
|
75a71492fd | ||
|
|
e9bd99c1ff | ||
|
|
56878b4910 | ||
|
|
19ca480078 | ||
|
|
64eb963025 | ||
|
|
dc34d0887a | ||
|
|
160634fb6f | ||
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa | ||
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 | ||
|
|
c9190ebd8f | ||
|
|
eb33973fa3 | ||
|
|
40be2e7b6e | ||
|
|
485813211a | ||
|
|
0a87bf1b5e | ||
|
|
fa0e0b2576 | ||
|
|
1d3b2f57ce | ||
|
|
0577e1ee79 | ||
|
|
470ecc4a4f | ||
|
|
965127637b | ||
|
|
eba130cf41 | ||
|
|
441336301e | ||
|
|
2a0be898e6 | ||
|
|
c47acc5988 | ||
|
|
70ba32b5b2 | ||
|
|
81e06dace2 | ||
|
|
3e8202590c | ||
|
|
ad96a72ebe | ||
|
|
eb0058268b | ||
|
|
2bf6512a8e | ||
|
|
855f61a04e | ||
|
|
532e71ff45 | ||
|
|
b9ea114cac | ||
|
|
e41ad82687 | ||
|
|
3bd25c682e | ||
|
|
7169c75b1a | ||
|
|
fdb359a67b | ||
|
|
ed2a59c1a3 | ||
|
|
906f8a1dce | ||
|
|
6833976c54 | ||
|
|
d15405eafe | ||
|
|
6c3bbfc3be | ||
|
|
2e3e6cbde5 | ||
|
|
54894c14dc | ||
|
|
2a51f23b7d | ||
|
|
80df31fc4e | ||
|
|
8f8462b38e | ||
|
|
0c41abea0e | ||
|
|
3eda1ede8d | ||
|
|
40fca6db57 | ||
|
|
148111a072 | ||
|
|
9cad45feac | ||
|
|
6834568c5d | ||
|
|
f7fdb7fe8d | ||
|
|
d8abd4912b | ||
|
|
4fbdc412ad | ||
|
|
db1af57daa | ||
|
|
ffa59864ee | ||
|
|
b209bc67b4 | ||
|
|
1faea020af | ||
|
|
b55a099620 | ||
|
|
f6dd3cb988 | ||
|
|
c448b87c85 | ||
|
|
4dd823121a | ||
|
|
ec4f10d868 | ||
|
|
2a1dffd363 | ||
|
|
8c7ab8fcf2 | ||
|
|
3de8455960 | ||
|
|
d832e75e99 | ||
|
|
a89e266feb | ||
|
|
8e1516eeb7 | ||
|
|
c7f2fdbe57 | ||
|
|
c75757bf22 | ||
|
|
b8ec7c4072 | ||
|
|
bb1c155bc9 | ||
|
|
4822dd79fc | ||
|
|
4cd90163fc | ||
|
|
8dc6ceffee | ||
|
|
2c7184f9d2 | ||
|
|
5cf93febaa | ||
|
|
284bd8377a | ||
|
|
14992cee17 | ||
|
|
6db663eacb | ||
|
|
87bb70bcc0 | ||
|
|
c2d02841e8 | ||
|
|
e5a6007bf1 | ||
|
|
6f9ea1892b | ||
|
|
abc56305cc | ||
|
|
9bb6786a58 | ||
|
|
aec9a9ca56 | ||
|
|
7e4cf859f5 | ||
|
|
90c3d8a275 | ||
|
|
6d1c8de4ed | ||
|
|
601b62deef | ||
|
|
131dd088cd | ||
|
|
445d892050 | ||
|
|
35a576f2dd | ||
|
|
7838641215 | ||
|
|
ab5cc2e69c | ||
|
|
5a63533967 | ||
|
|
b549ae1efa | ||
|
|
bd0089fd35 | ||
|
|
40d18e95c2 | ||
|
|
191a0f7f2a | ||
|
|
852bb0717c | ||
|
|
98bfe3f43f | ||
|
|
53a7b7818e | ||
|
|
e7c7454a3a | ||
|
|
63e82aa4a3 | ||
|
|
fc1b74aa68 | ||
|
|
ea455df9f4 | ||
|
|
8e2a5ed8ae | ||
|
|
1d7e54bd39 | ||
|
|
83df056f71 | ||
|
|
48edb15a03 | ||
|
|
8ddc19a927 | ||
|
|
b021ad7b28 | ||
|
|
b8ba2feecd | ||
|
|
8cfb704f86 | ||
|
|
efcac860de | ||
|
|
6c5590e0e6 | ||
|
|
0edcd688a2 | ||
|
|
b8c48f7d50 | ||
|
|
07e30a3d5f | ||
|
|
cb5a8aa194 | ||
|
|
8b49f910c7 | ||
|
|
a4f808df34 | ||
|
|
82abd18927 | ||
|
|
5e9d514e5e | ||
|
|
4a34ee1f1e | ||
|
|
3624fe2cac | ||
|
|
0f96d9aca2 | ||
|
|
989b80e771 | ||
|
|
cc64be937d | ||
|
|
94183d602c | ||
|
|
ac4ef141bf | ||
|
|
86f6b54eec | ||
|
|
bd8108b27c | ||
|
|
aed96fb365 | ||
|
|
fe2da52eec | ||
|
|
974465e46a | ||
|
|
c736986023 | ||
|
|
6b381aa79e | ||
|
|
755e55ec70 | ||
|
|
255c2e4172 | ||
|
|
aa8437fd77 | ||
|
|
66f14bfe8f | ||
|
|
721a2002dc | ||
|
|
af39b2cd3f | ||
|
|
cdd48dd7cd | ||
|
|
d3de884baf | ||
|
|
fa8968b95b | ||
|
|
897a19e127 | ||
|
|
4bae961b5f | ||
|
|
900c4fd8f1 | ||
|
|
4d5bbd7065 | ||
|
|
fb8fadc7f9 | ||
|
|
ba99ddfd82 | ||
|
|
9676a95e05 | ||
|
|
36d6ed9cac | ||
|
|
875f1a82e4 | ||
|
|
db71b63829 | ||
|
|
cd4d83acb7 | ||
|
|
7e25a694f3 | ||
|
|
baca43ee62 | ||
|
|
3b69552260 | ||
|
|
f9bd780d62 | ||
|
|
a665d96026 | ||
|
|
e47d30974c | ||
|
|
2b8ed66f3e | ||
|
|
dfe8b3b16b | ||
|
|
c738d0788e | ||
|
|
0c2d589109 | ||
|
|
a47bbb5479 | ||
|
|
4b4b73c02a | ||
|
|
d1d08fe499 | ||
|
|
7e7a9f541c | ||
|
|
98d67e2133 | ||
|
|
7a36041bf2 | ||
|
|
b96564da55 | ||
|
|
044d6bf97c | ||
|
|
d357c1162f | ||
|
|
3c22fc9242 | ||
|
|
8c86092bf5 | ||
|
|
081206965c | ||
|
|
036f85cd80 | ||
|
|
6976ac9273 | ||
|
|
9009a21a32 | ||
|
|
aafd4df288 | ||
|
|
844666df4c | ||
|
|
0ae720244c | ||
|
|
b70fa88b78 | ||
|
|
fbaeb90268 | ||
|
|
2a549c0b23 | ||
|
|
2c320cb16d | ||
|
|
434595481c | ||
|
|
444d05447e | ||
|
|
fbe202bdbf | ||
|
|
d89cad0d9e | ||
|
|
bdfd6f47ff | ||
|
|
ae7be2eea1 | ||
|
|
8957a84738 | ||
|
|
bac124004f | ||
|
|
f46c7452d1 | ||
|
|
098861d906 | ||
|
|
daf39dc77e | ||
|
|
00d8291d40 | ||
|
|
88d1a484fa | ||
|
|
5afdfb1135 | ||
|
|
2f15cc5170 | ||
|
|
f15d40286c | ||
|
|
f58c44590d | ||
|
|
ef99770383 | ||
|
|
84f67c7f82 | ||
|
|
433c28caa8 | ||
|
|
fa05f55512 | ||
|
|
0d5c0bcb91 | ||
|
|
f3fa75d832 | ||
|
|
285b7b0e5f | ||
|
|
08e8147374 | ||
|
|
09bd398a9e | ||
|
|
8d6f50fae8 | ||
|
|
ecfbcb641e | ||
|
|
e434b1e0f3 | ||
|
|
66c3acf777 | ||
|
|
ed1983bd8c | ||
|
|
5c4277958c | ||
|
|
7e4da7efa2 | ||
|
|
7b1cb281c2 | ||
|
|
dee39f3f1c | ||
|
|
ba7f97f84b | ||
|
|
85e7189ee3 | ||
|
|
06430741ab | ||
|
|
cf091d7836 | ||
|
|
a66acd87e6 | ||
|
|
595b4e3876 | ||
|
|
74c46568c1 | ||
|
|
05e9b29460 | ||
|
|
f1196fc019 | ||
|
|
7f020052db | ||
|
|
53260213ba | ||
|
|
7d1ee37689 | ||
|
|
45c13c25a4 | ||
|
|
ba0e9831d2 | ||
|
|
92dce85468 | ||
|
|
77139e4138 | ||
|
|
b28e14c630 | ||
|
|
bf5594e338 | ||
|
|
f012a69c93 | ||
|
|
0fb334e372 | ||
|
|
b7c5cbac3f | ||
|
|
eb7407593f | ||
|
|
287596234c | ||
|
|
ee7b3470da | ||
|
|
0faa1c8a24 | ||
|
|
77175d2862 | ||
|
|
22464a5838 | ||
|
|
3919ea6270 | ||
|
|
9d9f650051 | ||
|
|
66a3cb6b09 | ||
|
|
d282393035 | ||
|
|
6ea3b20912 | ||
|
|
3025ef0dfa | ||
|
|
397d58c459 | ||
|
|
d739a4a90a | ||
|
|
3fe64ad101 | ||
|
|
919d1e5d40 | ||
|
|
7fda4b0675 | ||
|
|
d8af19d169 | ||
|
|
1821e540f7 | ||
|
|
77be6c7495 | ||
|
|
baed869d93 | ||
|
|
b87ff45c07 | ||
|
|
7acc0067f5 | ||
|
|
0a13762f11 | ||
|
|
2c673c8f1f | ||
|
|
8c187c74fc | ||
|
|
2ce9440bab | ||
|
|
765487390f | ||
|
|
086722149c | ||
|
|
c10ada6f44 | ||
|
|
b350cd053d | ||
|
|
d0acb1c53f | ||
|
|
f61b73010a | ||
|
|
adb89cd48f | ||
|
|
3e509b3d55 | ||
|
|
f0badea9d3 | ||
|
|
6a1ec0dc89 | ||
|
|
329fb88bbb | ||
|
|
177f8cb7b2 | ||
|
|
b43107a5e9 | ||
|
|
4857685e1c | ||
|
|
8ba1a2bcf7 | ||
|
|
e7c80fe1e8 | ||
|
|
33f1c35292 | ||
|
|
ba899324f2 | ||
|
|
9c236eb8dd | ||
|
|
36559a4539 | ||
|
|
7a4b3ed139 | ||
|
|
cd5cc64d6a | ||
|
|
71a11ea3ad | ||
|
|
cfbbc4cb92 | ||
|
|
592920ee51 | ||
|
|
b75b84e282 | ||
|
|
f4b80c70e3 | ||
|
|
9870187af5 | ||
|
|
d57f549a4c | ||
|
|
93e6f974f6 | ||
|
|
68b32b9b4f | ||
|
|
105609ec20 | ||
|
|
4a3d437b32 | ||
|
|
184719e9f2 | ||
|
|
b0c416334f | ||
|
|
7c4aedf716 | ||
|
|
76f03b9adc |
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -19,6 +19,8 @@ body:
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
@@ -42,6 +44,7 @@ body:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: UI (tech preview)
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
|
||||
34
.github/actions/awx_devel_image/action.yml
vendored
Normal file
34
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Setup images for AWX
|
||||
description: Builds new awx_devel image
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Set lower case owner name
|
||||
shell: bash
|
||||
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull -q ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
67
.github/actions/run_awx_devel/action.yml
vendored
Normal file
67
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Run AWX docker-compose
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token to pass to awx_devel_image
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
outputs:
|
||||
ip:
|
||||
description: The IP of the tools_awx_1 container
|
||||
value: ${{ steps.data.outputs.ip }}
|
||||
admin-token:
|
||||
description: OAuth token for admin user
|
||||
value: ${{ steps.data.outputs.admin_token }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_OWNER=${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
SECONDS=0
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]; do
|
||||
if [[ $SECONDS -gt 600 ]]; then
|
||||
echo "Timing out, AWX never came up"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Get instance data
|
||||
id: data
|
||||
shell: bash
|
||||
run: |
|
||||
AWX_IP=$(docker inspect -f '{{.NetworkSettings.Networks.awx.IPAddress}}' tools_awx_1)
|
||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Upload logs
|
||||
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||
inputs:
|
||||
log-filename:
|
||||
description: "*Unique* name of the log file"
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get AWX logs
|
||||
shell: bash
|
||||
run: |
|
||||
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||
|
||||
- name: Upload AWX logs as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker-compose-logs
|
||||
path: ${{ inputs.log-filename }}
|
||||
19
.github/dependabot.yml
vendored
19
.github/dependabot.yml
vendored
@@ -1,19 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/awx/ui"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/docsite/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 5
|
||||
allow:
|
||||
- dependency-type: "production"
|
||||
reviewers:
|
||||
- "AlexSCorey"
|
||||
- "keithjgrant"
|
||||
- "kialam"
|
||||
- "mabashian"
|
||||
- "marshmalien"
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
labels:
|
||||
- "component:ui"
|
||||
- "docs"
|
||||
- "dependencies"
|
||||
target-branch: "devel"
|
||||
|
||||
2
.github/issue_labeler.yml
vendored
2
.github/issue_labeler.yml
vendored
@@ -6,6 +6,8 @@ needs_triage:
|
||||
- "Feature Summary"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:ui_next":
|
||||
- "\\[X\\] UI \\(tech preview\\)"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
|
||||
3
.github/pr_labeler.yml
vendored
3
.github/pr_labeler.yml
vendored
@@ -15,5 +15,4 @@
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
- any: ["requirements/*"]
|
||||
|
||||
6
.github/triage_replies.md
vendored
6
.github/triage_replies.md
vendored
@@ -1,14 +1,14 @@
|
||||
## General
|
||||
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
|
||||
- Hello, we think your question is answered in our FAQ. Does this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
- You can find the latest documentation here: https://docs.ansible.com/automation-controller/latest/html/userguide/index.html
|
||||
- You can find the latest documentation here: https://ansible.readthedocs.io/projects/awx/en/latest/userguide/index.html
|
||||
|
||||
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
### Visit the Forum or Matrix
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
|
||||
|
||||
### Denied Submission
|
||||
|
||||
|
||||
208
.github/workflows/ci.yml
vendored
208
.github/workflows/ci.yml
vendored
@@ -3,7 +3,7 @@ name: CI
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
@@ -11,6 +11,7 @@ jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -20,6 +21,8 @@ jobs:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
- name: api-migrations
|
||||
command: /start_tests.sh test_migrations
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
- name: api-swagger
|
||||
@@ -35,30 +38,51 @@ jobs:
|
||||
- name: ui-test-general
|
||||
command: make ui-test-general
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
DEBUG_OUTPUT_DIR: /tmp/awx_operator_molecule_test
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
path: awx
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false\
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
@@ -67,7 +91,7 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -78,11 +102,11 @@ jobs:
|
||||
- name: Build AWX image
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/build.yml \
|
||||
-e headless=yes \
|
||||
-e awx_image=awx \
|
||||
-e awx_image_tag=ci \
|
||||
-e ansible_python_interpreter=$(which python3)
|
||||
VERSION=`make version-for-buildyml` make awx-kube-build
|
||||
env:
|
||||
COMPOSE_TAG: ci
|
||||
DEV_DOCKER_TAG_BASE: local
|
||||
HEADLESS: yes
|
||||
|
||||
- name: Run test deployment with awx-operator
|
||||
working-directory: awx-operator
|
||||
@@ -91,18 +115,30 @@ jobs:
|
||||
ansible-galaxy collection install -r molecule/requirements.yml
|
||||
sudo rm -f $(which kustomize)
|
||||
make kustomize
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind -- --skip-tags=replicas
|
||||
env:
|
||||
AWX_TEST_IMAGE: awx
|
||||
AWX_TEST_IMAGE: local/awx
|
||||
AWX_TEST_VERSION: ci
|
||||
AWX_EE_TEST_IMAGE: quay.io/ansible/awx-ee:latest
|
||||
STORE_DEBUG_OUTPUT: true
|
||||
|
||||
- name: Upload debug output
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-operator-debug-output
|
||||
path: ${{ env.DEBUG_OUTPUT_DIR }}
|
||||
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
@@ -110,7 +146,143 @@ jobs:
|
||||
|
||||
- name: Run sanity tests
|
||||
run: make test_collection_sanity
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target-regex:
|
||||
- name: a-h
|
||||
regex: ^[a-h]
|
||||
- name: i-p
|
||||
regex: ^[i-p]
|
||||
- name: r-z0-9
|
||||
regex: ^[r-z0-9]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies for running tests
|
||||
run: |
|
||||
python3 -m pip install -e ./awxkit/
|
||||
python3 -m pip install -r awx_collection/requirements.txt
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||
echo '[general]' > ~/.tower_cli.cfg
|
||||
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||
env:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||
cd coverage
|
||||
for i in coverage-*; do
|
||||
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
done
|
||||
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-test coverage combine --requirements
|
||||
ansible-test coverage html
|
||||
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-collection-integration-coverage-html
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||
|
||||
57
.github/workflows/dab-release.yml
vendored
Normal file
57
.github/workflows/dab-release.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
name: django-ansible-base requirements update
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 6 * * *' # once an day @ 6 AM
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
jobs:
|
||||
dab-pin-newest:
|
||||
if: (github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')) || github.event_name != 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- id: dab-release
|
||||
name: Get current django-ansible-base release version
|
||||
uses: pozetroninc/github-action-get-latest-release@2a61c339ea7ef0a336d1daa35ef0cb1418e7676c # v0.8.0
|
||||
with:
|
||||
owner: ansible
|
||||
repo: django-ansible-base
|
||||
excludes: prerelease, draft
|
||||
|
||||
- name: Check out respository code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: dab-pinned
|
||||
name: Get current django-ansible-base pinned version
|
||||
run:
|
||||
echo "version=$(requirements/django-ansible-base-pinned-version.sh)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Update django-ansible-base pinned version to upstream release
|
||||
run:
|
||||
requirements/django-ansible-base-pinned-version.sh -s ${{ steps.dab-release.outputs.release }}
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6
|
||||
with:
|
||||
base: devel
|
||||
branch: bump-django-ansible-base
|
||||
title: Bump django-ansible-base to ${{ steps.dab-release.outputs.release }}
|
||||
body: |
|
||||
##### SUMMARY
|
||||
Automated .github/workflows/dab-release.yml
|
||||
|
||||
django-ansible-base upstream released version == ${{ steps.dab-release.outputs.release }}
|
||||
requirements_git.txt django-ansible-base pinned version == ${{ steps.dab-pinned.outputs.version }}
|
||||
|
||||
##### ISSUE TYPE
|
||||
- Bug, Docs Fix or other nominal change
|
||||
|
||||
##### COMPONENT NAME
|
||||
- API
|
||||
|
||||
commit-message: |
|
||||
Update django-ansible-base version to ${{ steps.dab-pinned.outputs.version }}
|
||||
add-paths:
|
||||
requirements/requirements_git.txt
|
||||
67
.github/workflows/devel_images.yml
vendored
67
.github/workflows/devel_images.yml
vendored
@@ -2,33 +2,59 @@
|
||||
name: Build/Push Development Images
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
DOCKER_CACHE: "--no-cache" # using the cache will not rebuild git requirements and other things
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
- release_*
|
||||
- feature_*
|
||||
jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
push-development-images:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 120
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build-targets:
|
||||
- image-name: awx_devel
|
||||
make-target: docker-compose-buildx
|
||||
- image-name: awx_kube_devel
|
||||
make-target: awx-kube-dev-buildx
|
||||
- image-name: awx
|
||||
make-target: awx-kube-buildx
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Set lower case owner name
|
||||
- name: Skipping build of awx image for non-awx repository
|
||||
run: |
|
||||
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
|
||||
echo "Skipping build of awx image for non-awx repository"
|
||||
exit 0
|
||||
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set GITHUB_ENV variables
|
||||
run: |
|
||||
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -36,20 +62,17 @@ jobs:
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
|
||||
- name: Setup node and npm for the new UI build
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '18'
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Build images
|
||||
- name: Prebuild new UI for awx image (to speed up build process)
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
make ui-next
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Push image
|
||||
- name: Build and push AWX devel images
|
||||
run: |
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
make ${{ matrix.build-targets.make-target }}
|
||||
|
||||
19
.github/workflows/docs.yml
vendored
Normal file
19
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Assure docs can be built
|
||||
run: tox -e docs
|
||||
109
.github/workflows/e2e_test.yml
vendored
109
.github/workflows/e2e_test.yml
vendored
@@ -1,109 +0,0 @@
|
||||
---
|
||||
name: E2E Tests
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [labeled]
|
||||
jobs:
|
||||
e2e-test:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'qe:e2e')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 40
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
strategy:
|
||||
matrix:
|
||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build UI
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
||||
|
||||
- name: Start AWX
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
docker pull quay.io/awx/awx_cypress_base:latest
|
||||
|
||||
- name: Checkout test project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/tower-qa
|
||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||
path: tower-qa
|
||||
ref: devel
|
||||
|
||||
- name: Build cypress
|
||||
run: |
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
docker build -t awx-pf-tests .
|
||||
|
||||
- name: Update default AWX password
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5;
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
run: |
|
||||
export COMMIT_INFO_BRANCH=$GITHUB_HEAD_REF
|
||||
export COMMIT_INFO_AUTHOR=$GITHUB_ACTOR
|
||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
printenv > .env
|
||||
echo "Executing tests:"
|
||||
docker run \
|
||||
--network '_sources_default' \
|
||||
--ipc=host \
|
||||
--env-file=.env \
|
||||
-e CYPRESS_baseUrl="https://$AWX_IP:8043" \
|
||||
-e CYPRESS_AWX_E2E_USERNAME=admin \
|
||||
-e CYPRESS_AWX_E2E_PASSWORD='password' \
|
||||
-e COMMAND="npm run cypress-concurrently-gha" \
|
||||
-v /dev/shm:/dev/shm \
|
||||
-v $PWD:/e2e \
|
||||
-w /e2e \
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- name: Save AWX logs
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
13
.github/workflows/feature_branch_deletion.yml
vendored
13
.github/workflows/feature_branch_deletion.yml
vendored
@@ -2,13 +2,12 @@
|
||||
name: Feature branch deletion cleanup
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
delete:
|
||||
branches:
|
||||
- feature_**
|
||||
on: delete
|
||||
jobs:
|
||||
push:
|
||||
branch_delete:
|
||||
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -21,6 +20,4 @@ jobs:
|
||||
run: |
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"
|
||||
|
||||
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read"
|
||||
|
||||
15
.github/workflows/label_issue.yml
vendored
15
.github/workflows/label_issue.yml
vendored
@@ -6,18 +6,19 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code
|
||||
issues: write # to label issues
|
||||
permissions:
|
||||
contents: write # to fetch code
|
||||
issues: write # to label issues
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue
|
||||
|
||||
steps:
|
||||
- name: Label Issue
|
||||
uses: github/issue-labeler@v2.4.1
|
||||
uses: github/issue-labeler@v3.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
@@ -26,9 +27,13 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
9
.github/workflows/label_pr.yml
vendored
9
.github/workflows/label_pr.yml
vendored
@@ -8,12 +8,13 @@ on:
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read # to determine modified files (actions/labeler)
|
||||
contents: write # to determine modified files (actions/labeler)
|
||||
pull-requests: write # to add labels to PRs (actions/labeler)
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR
|
||||
|
||||
steps:
|
||||
@@ -25,9 +26,13 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
2
.github/workflows/pr_body_check.yml
vendored
2
.github/workflows/pr_body_check.yml
vendored
@@ -7,8 +7,10 @@ on:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
68
.github/workflows/promote.yml
vendored
68
.github/workflows/promote.yml
vendored
@@ -7,23 +7,40 @@ env:
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag_name:
|
||||
description: 'Name for the tag of the release.'
|
||||
required: true
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Set GitHub Env vars for workflow_dispatch event
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: |
|
||||
echo "TAG_NAME=${{ github.event.inputs.tag_name }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set GitHub Env vars if release event
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
run: |
|
||||
echo "TAG_NAME=${{ github.event.release.tag_name }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -40,14 +57,23 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
env:
|
||||
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||
COLLECTION_VERSION: ${{ env.TAG_NAME }}
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
sudo apt-get install jq
|
||||
make build_collection
|
||||
count=$(curl -s https://galaxy.ansible.com/api/v3/plugin/ansible/search/collection-versions/\?namespace\=${COLLECTION_NAMESPACE}\&name\=awx\&version\=${COLLECTION_VERSION} | jq .meta.count)
|
||||
if [[ "$count" == "1" ]]; then
|
||||
echo "Galaxy release already done";
|
||||
elif [[ "$count" == "0" ]]; then
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz; \
|
||||
awx_collection_build/${COLLECTION_NAMESPACE}-awx-${COLLECTION_VERSION}.tar.gz;
|
||||
else
|
||||
echo "Unexpected count from galaxy search: $count";
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
- name: Set official pypi info
|
||||
@@ -59,9 +85,11 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build awxkit and upload to pypi
|
||||
env:
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ env.TAG_NAME }}
|
||||
run: |
|
||||
git reset --hard
|
||||
cd awxkit && python3 setup.py bdist_wheel
|
||||
cd awxkit && python3 setup.py sdist bdist_wheel
|
||||
twine upload \
|
||||
-r ${{ env.pypi_repo }} \
|
||||
-u ${{ secrets.PYPI_USERNAME }} \
|
||||
@@ -78,11 +106,15 @@ jobs:
|
||||
|
||||
- name: Re-tag and promote awx image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \
|
||||
--tag quay.io/${{ github.repository }}:${{ env.TAG_NAME }}
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \
|
||||
--tag quay.io/${{ github.repository }}:latest
|
||||
|
||||
- name: Re-tag and promote awx-ee image
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }} \
|
||||
--tag quay.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }}
|
||||
|
||||
107
.github/workflows/stage.yml
vendored
107
.github/workflows/stage.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
stage:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
permissions:
|
||||
packages: write
|
||||
contents: write
|
||||
@@ -44,68 +45,100 @@ jobs:
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
path: awx
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
show-progress: false
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
- name: Build and stage AWX
|
||||
- name: Log into registry ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Copy logos for inclusion in sdist for official build
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/build.yml \
|
||||
-e registry=ghcr.io \
|
||||
-e registry_username=${{ github.actor }} \
|
||||
-e registry_password=${{ secrets.GITHUB_TOKEN }} \
|
||||
-e awx_image=${{ github.repository }} \
|
||||
-e awx_version=${{ github.event.inputs.version }} \
|
||||
-e ansible_python_interpreter=$(which python3) \
|
||||
-e push=yes \
|
||||
-e awx_official=yes
|
||||
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
|
||||
|
||||
- name: Log in to GHCR
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
- name: Setup node and npm for new UI build
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Log in to Quay
|
||||
- name: Prebuild new UI for awx image (to speed up build process)
|
||||
working-directory: awx
|
||||
run: make ui-next
|
||||
|
||||
- name: Set build env variables
|
||||
run: |
|
||||
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
|
||||
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||
echo "COMPOSE_TAG=${{ github.event.inputs.version }}" >> $GITHUB_ENV
|
||||
echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
|
||||
echo "AWX_TEST_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
|
||||
echo "AWX_TEST_IMAGE=ghcr.io/${OWNER,,}/awx" >> $GITHUB_ENV
|
||||
echo "AWX_EE_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-ee:${{ github.event.inputs.version }}" >> $GITHUB_ENV
|
||||
echo "AWX_OPERATOR_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-operator:${{ github.event.inputs.operator_version }}" >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
|
||||
- name: Build and stage AWX
|
||||
working-directory: awx
|
||||
env:
|
||||
DOCKER_BUILDX_PUSH: true
|
||||
HEADLESS: false
|
||||
PLATFORMS: linux/amd64,linux/arm64
|
||||
run: |
|
||||
make awx-kube-buildx
|
||||
|
||||
- name: tag awx-ee:latest with version input
|
||||
run: |
|
||||
docker pull quay.io/ansible/awx-ee:latest
|
||||
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
docker buildx imagetools create \
|
||||
quay.io/ansible/awx-ee:latest \
|
||||
--tag ${AWX_EE_TEST_IMAGE}
|
||||
|
||||
- name: Build and stage awx-operator
|
||||
- name: Stage awx-operator image
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
|
||||
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
||||
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
|
||||
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
|
||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \
|
||||
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
||||
IMG=${AWX_OPERATOR_TEST_IMAGE} \
|
||||
make docker-buildx
|
||||
|
||||
- name: Pulling images for test deployment with awx-operator
|
||||
# awx operator molecue test expect to kind load image and buildx exports image to registry and not local
|
||||
run: |
|
||||
docker pull -q ${AWX_OPERATOR_TEST_IMAGE}
|
||||
docker pull -q ${AWX_EE_TEST_IMAGE}
|
||||
docker pull -q ${AWX_TEST_IMAGE}:${AWX_TEST_VERSION}
|
||||
|
||||
- name: Run test deployment with awx-operator
|
||||
working-directory: awx-operator
|
||||
@@ -115,10 +148,6 @@ jobs:
|
||||
sudo rm -f $(which kustomize)
|
||||
make kustomize
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule test -s kind
|
||||
env:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
AWX_EE_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
|
||||
5
.github/workflows/update_dependabot_prs.yml
vendored
5
.github/workflows/update_dependabot_prs.yml
vendored
@@ -9,10 +9,13 @@ jobs:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Update PR Body
|
||||
env:
|
||||
|
||||
9
.github/workflows/upload_schema.yml
vendored
9
.github/workflows/upload_schema.yml
vendored
@@ -13,17 +13,20 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -33,7 +36,7 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -46,6 +46,11 @@ tools/docker-compose/overrides/
|
||||
tools/docker-compose-minikube/_sources
|
||||
tools/docker-compose/keycloak.awx.realm.json
|
||||
|
||||
!tools/docker-compose/editable_dependencies
|
||||
tools/docker-compose/editable_dependencies/*
|
||||
!tools/docker-compose/editable_dependencies/README.md
|
||||
!tools/docker-compose/editable_dependencies/install.sh
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
**/provision_docker
|
||||
@@ -157,10 +162,18 @@ use_dev_supervisor.txt
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/awx/ui/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile
|
||||
/Dockerfile.dev
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
# Pyenv
|
||||
.python-version
|
||||
|
||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[allowlist]
|
||||
description = "Documentation contains example secrets and passwords"
|
||||
paths = [
|
||||
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||
]
|
||||
5
.pip-tools.toml
Normal file
5
.pip-tools.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[tool.pip-tools]
|
||||
resolver = "backtracking"
|
||||
allow-unsafe = true
|
||||
strip-extras = true
|
||||
quiet = true
|
||||
16
.readthedocs.yaml
Normal file
16
.readthedocs.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: >-
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs --notest -v
|
||||
- python3 -m tox -e docs --skip-pkg-install -q
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
113
.vscode/launch.json
vendored
Normal file
113
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "run_ws_heartbeat",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_ws_heartbeat"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-ws-heartbeat",
|
||||
"postDebugTask": "start awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"name": "run_cache_clear",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_cache_clear"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-cache-clear",
|
||||
"postDebugTask": "start awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"name": "run_callback_receiver",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_callback_receiver"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-receiver",
|
||||
"postDebugTask": "start awx-receiver"
|
||||
},
|
||||
{
|
||||
"name": "run_dispatcher",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_dispatcher"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-dispatcher",
|
||||
"postDebugTask": "start awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"name": "run_rsyslog_configurer",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_rsyslog_configurer"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-rsyslog-configurer",
|
||||
"postDebugTask": "start awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"name": "run_cache_clear",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_cache_clear"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-cache-clear",
|
||||
"postDebugTask": "start awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"name": "run_wsrelay",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_wsrelay"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-wsrelay",
|
||||
"postDebugTask": "start awx-wsrelay"
|
||||
},
|
||||
{
|
||||
"name": "daphne",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "/var/lib/awx/venv/awx/bin/daphne",
|
||||
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-daphne",
|
||||
"postDebugTask": "start awx-daphne"
|
||||
},
|
||||
{
|
||||
"name": "runserver(uwsgi alternative)",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["runserver", "127.0.0.1:8052"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-uwsgi",
|
||||
"postDebugTask": "start awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"name": "runserver_plus(uwsgi alternative)",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["runserver_plus", "127.0.0.1:8052"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
|
||||
"postDebugTask": "start awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"name": "shell_plus",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["shell_plus"],
|
||||
"django": true,
|
||||
},
|
||||
]
|
||||
}
|
||||
100
.vscode/tasks.json
vendored
Normal file
100
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "start awx-cache-clear",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-cache-clear",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"label": "start awx-daphne",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-daphne"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-daphne",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-daphne"
|
||||
},
|
||||
{
|
||||
"label": "start awx-dispatcher",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-dispatcher",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"label": "start awx-receiver",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-receiver"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-receiver",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-receiver"
|
||||
},
|
||||
{
|
||||
"label": "start awx-rsyslog-configurer",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-rsyslog-configurer",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"label": "start awx-rsyslogd",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-rsyslogd"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-rsyslogd",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
|
||||
},
|
||||
{
|
||||
"label": "start awx-uwsgi",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-uwsgi",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-uwsgi and install Werkzeug",
|
||||
"type": "shell",
|
||||
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "start awx-ws-heartbeat",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-ws-heartbeat",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"label": "start awx-wsrelay",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-wsrelay"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-wsrelay",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-wsrelay"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,6 +10,9 @@ ignore: |
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
.readthedocs.yaml
|
||||
tools/loki
|
||||
tools/otel
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
|
||||
#### Frontend Development
|
||||
|
||||
See [the ui development documentation](awx/ui/CONTRIBUTING.md).
|
||||
See [the ansible-ui development documentation](https://github.com/ansible/ansible-ui/blob/main/CONTRIBUTING.md).
|
||||
|
||||
#### Fork and clone the AWX repo
|
||||
|
||||
@@ -121,7 +121,7 @@ If it has someone assigned to it then that person is the person responsible for
|
||||
|
||||
**NOTES**
|
||||
|
||||
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
|
||||
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
|
||||
|
||||
|
||||
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
@@ -132,7 +132,7 @@ If it has someone assigned to it then that person is the person responsible for
|
||||
|
||||
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
|
||||
|
||||
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
|
||||
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
|
||||
|
||||
|
||||
## Submitting Pull Requests
|
||||
@@ -161,7 +161,7 @@ Sometimes it might take us a while to fully review your PR. We try to keep the `
|
||||
When your PR is initially submitted the checks will not be run until a maintainer allows them to be. Once a maintainer has done a quick review of your work the PR will have the linter and unit tests run against them via GitHub Actions, and the status reported in the PR.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
|
||||
|
||||
## Getting Help
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
As of version 18.0, `awx-operator` is the preferred install/upgrade method. Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
https://github.com/ansible/awx-operator/blob/devel/docs/upgrade/upgrading.md
|
||||
|
||||
@@ -31,7 +31,7 @@ If your issue isn't considered high priority, then please be patient as it may t
|
||||
|
||||
`state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state.
|
||||
|
||||
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familar with an area of the code base the issue is for.
|
||||
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familiar with an area of the code base the issue is for.
|
||||
|
||||
`state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on.
|
||||
|
||||
@@ -80,7 +80,7 @@ If any of those items are missing your pull request will still get the `needs_tr
|
||||
Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc...
|
||||
These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
|
||||
|
||||
The `state:needs_triage` label will will remain on your pull request until a person has looked at it.
|
||||
The `state:needs_triage` label will remain on your pull request until a person has looked at it.
|
||||
|
||||
You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request.
|
||||
The comment will look something like `CC @matburt @wwitzel3 ...`.
|
||||
|
||||
@@ -22,7 +22,7 @@ recursive-exclude awx/settings local_settings.py*
|
||||
include tools/scripts/request_tower_configuration.sh
|
||||
include tools/scripts/request_tower_configuration.ps1
|
||||
include tools/scripts/automation-controller-service
|
||||
include tools/scripts/failure-event-handler
|
||||
include tools/scripts/rsyslog-4xx-recovery
|
||||
include tools/scripts/awx-python
|
||||
include awx/playbooks/library/mkfifo.py
|
||||
include tools/sosreport/*
|
||||
|
||||
242
Makefile
242
Makefile
@@ -1,14 +1,16 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON ?= python3.9
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
PYTHON := $(notdir $(shell for i in python3.11 python3; do command -v $$i; done|sed 1q))
|
||||
SHELL := bash
|
||||
DOCKER_COMPOSE ?= docker compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null)
|
||||
|
||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
@@ -27,6 +29,8 @@ COLLECTION_TEMPLATE_VERSION ?= false
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
MAIN_NODE_TYPE ?= hybrid
|
||||
# If set to true docker-compose will also start a pgbouncer instance and use it
|
||||
PGBOUNCER ?= false
|
||||
# If set to true docker-compose will also start a keycloak instance
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
@@ -37,22 +41,42 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance
|
||||
VAULT ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance with TLS enabled
|
||||
VAULT_TLS ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
# If set to true docker-compose will also start an OpenTelemetry Collector instance
|
||||
OTEL ?= false
|
||||
# If set to true docker-compose will also start a Loki instance
|
||||
LOKI ?= false
|
||||
# If set to true docker-compose will install editable dependencies
|
||||
EDITABLE_DEPENDENCIES ?= false
|
||||
# If set to true, use tls for postgres connection
|
||||
PG_TLS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
|
||||
DEV_DOCKER_OWNER ?= ansible
|
||||
# Docker will only accept lowercase, so github names like Paul need to be paul
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
IMAGE_KUBE_DEV=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
IMAGE_KUBE=$(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG)
|
||||
|
||||
# Common command to use for running ansible-playbook
|
||||
ANSIBLE_PLAYBOOK ?= ansible-playbook -e ansible_python_interpreter=$(PYTHON)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0 cython==0.29.37
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -64,13 +88,28 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
|
||||
|
||||
I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
||||
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
|
||||
|
||||
# Set up cache variables for image builds, allowing to control whether cache is used or not, ex:
|
||||
# DOCKER_CACHE=--no-cache make docker-compose-build
|
||||
ifeq ($(DOCKER_CACHE),)
|
||||
DOCKER_DEVEL_CACHE_FLAG=--cache-from=$(DEVEL_IMAGE_NAME)
|
||||
DOCKER_KUBE_DEV_CACHE_FLAG=--cache-from=$(IMAGE_KUBE_DEV)
|
||||
DOCKER_KUBE_CACHE_FLAG=--cache-from=$(IMAGE_KUBE)
|
||||
else
|
||||
DOCKER_DEVEL_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
DOCKER_KUBE_DEV_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
DOCKER_KUBE_CACHE_FLAG=$(DOCKER_CACHE)
|
||||
endif
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -202,8 +241,6 @@ collectstatic:
|
||||
fi; \
|
||||
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -211,7 +248,7 @@ uwsgi: collectstatic
|
||||
uwsgi /etc/tower/uwsgi.ini
|
||||
|
||||
awx-autoreload:
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -264,11 +301,11 @@ run-wsrelay:
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
run-ws-heartbeat:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
$(PYTHON) manage.py run_ws_heartbeat
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
@@ -291,18 +328,18 @@ swagger: reports
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
|
||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
BLACK_ARGS="--check" $(MAKE) black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
## Run egg_info_dev to generate awx.egg-info for development.
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
@@ -315,21 +352,16 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
## Login to Github container image registry, pull image, then build image.
|
||||
github_ci_setup:
|
||||
# GITHUB_ACTOR is automatic github actions env var
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
make docker-compose-build
|
||||
test_migrations:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
|
||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
||||
github_ci_runner: github_ci_setup docker-runner
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -353,7 +385,7 @@ symlink_collection:
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
awx_collection_build: $(shell find awx_collection -type f)
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml \
|
||||
$(ANSIBLE_PLAYBOOK) -i localhost, awx_collection/tools/template_galaxy.yml \
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
@@ -371,11 +403,11 @@ test_collection_sanity:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -470,13 +502,7 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
|
||||
endif
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
|
||||
@@ -507,27 +533,45 @@ endif
|
||||
|
||||
docker-compose-sources: .git/hooks/pre-commit
|
||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
fi;
|
||||
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
-e awx_image=$(DEV_DOCKER_TAG_BASE)/awx_devel \
|
||||
-e awx_image_tag=$(COMPOSE_TAG) \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_pgbouncer=$(PGBOUNCER) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
-e enable_otel=$(OTEL) \
|
||||
-e enable_loki=$(LOKI) \
|
||||
-e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \
|
||||
-e pg_tls=$(PG_TLS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
$(ANSIBLE_PLAYBOOK) -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_ldap=$(LDAP); \
|
||||
$(MAKE) docker-compose-up
|
||||
|
||||
docker-compose-up:
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-down:
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) down --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
|
||||
@@ -556,19 +600,42 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_devel image for docker compose development environment
|
||||
docker-compose-build: Dockerfile.dev
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
$(DOCKER_DEVEL_CACHE_FLAG) .
|
||||
|
||||
.PHONY: docker-compose-buildx
|
||||
## Build awx_devel image for docker compose development environment for multiple architectures
|
||||
docker-compose-buildx: Dockerfile.dev
|
||||
- docker buildx create --name docker-compose-buildx
|
||||
docker buildx use docker-compose-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
$(DOCKER_DEVEL_CACHE_FLAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEVEL_IMAGE_NAME) \
|
||||
-f Dockerfile.dev .
|
||||
- docker buildx rm docker-compose-buildx
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(shell docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -580,7 +647,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -590,21 +657,59 @@ clean-elk:
|
||||
docker rm tools_elasticsearch_1
|
||||
docker rm tools_kibana_1
|
||||
|
||||
psql-container:
|
||||
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
@echo "$(subst python,,$(PYTHON))"
|
||||
|
||||
.PHONY: version-for-buildyml
|
||||
version-for-buildyml:
|
||||
@echo $(firstword $(subst +, ,$(VERSION)))
|
||||
# version-for-buildyml prints a special version string for build.yml,
|
||||
# chopping off the sha after the '+' sign.
|
||||
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
|
||||
# This does the same thing in native make without
|
||||
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
|
||||
# Example:
|
||||
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
|
||||
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
$(DOCKER_KUBE_CACHE_FLAG) \
|
||||
-t $(IMAGE_KUBE) .
|
||||
|
||||
## Build multi-arch awx image for deployment on Kubernetes environment.
|
||||
awx-kube-buildx: Dockerfile
|
||||
- docker buildx create --name awx-kube-buildx
|
||||
docker buildx use awx-kube-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
--platform=$(PLATFORMS) \
|
||||
$(DOCKER_KUBE_CACHE_FLAG) \
|
||||
--tag $(IMAGE_KUBE) \
|
||||
-f Dockerfile .
|
||||
- docker buildx rm awx-kube-buildx
|
||||
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
$(ANSIBLE_PLAYBOOK) tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
-e kube_dev=True \
|
||||
-e template_dest=_build_kube_dev \
|
||||
@@ -614,16 +719,24 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
|
||||
-t $(IMAGE_KUBE_DEV) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
|
||||
awx-kube-dev-buildx: Dockerfile.kube-dev
|
||||
- docker buildx create --name awx-kube-dev-buildx
|
||||
docker buildx use awx-kube-dev-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
$(DOCKER_KUBE_DEV_CACHE_FLAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(IMAGE_KUBE_DEV) \
|
||||
-f Dockerfile.kube-dev .
|
||||
- docker buildx rm awx-kube-dev-buildx
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(IMAGE_KUBE_DEV)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -631,10 +744,12 @@ awx-kube-build: Dockerfile
|
||||
## generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||
|
||||
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@@ -643,6 +758,7 @@ messages:
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
.PHONY: print-%
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -654,12 +770,12 @@ HELP_FILTER=.PHONY
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
@$(MAKE) -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@@ -683,4 +799,4 @@ help/generate:
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@make -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
10
README.md
10
README.md
@@ -1,5 +1,5 @@
|
||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://libera.chat)
|
||||
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -7,7 +7,7 @@ AWX provides a web-based user interface, REST API, and task engine built on top
|
||||
|
||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||
|
||||
To learn more about using AWX, and Tower, view the [Tower docs site](http://docs.ansible.com/ansible-tower/index.html).
|
||||
To learn more about using AWX, view the [AWX docs site](https://ansible.readthedocs.io/projects/awx/en/latest/).
|
||||
|
||||
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
||||
|
||||
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||
|
||||
@@ -52,39 +52,14 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django # noqa: F401
|
||||
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
pass
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
def names_digest(*args, length):
|
||||
"""
|
||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
||||
identifying names. Support for use in FIPS environments.
|
||||
"""
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(arg.encode())
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
@@ -179,10 +154,12 @@ def manage():
|
||||
from django.conf import settings
|
||||
from django.core.management import execute_from_command_line
|
||||
|
||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1
|
||||
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
|
||||
# The return of connection.pg_version is something like 12013
|
||||
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
|
||||
if (connection.pg_version // 10000) < 12:
|
||||
sys.stderr.write("Postgres version 12 is required\n")
|
||||
sys.stderr.write("At a minimum, postgres version 12 is required\n")
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||
|
||||
@@ -93,6 +93,7 @@ register(
|
||||
default='',
|
||||
label=_('Login redirect override URL'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
|
||||
warning_text=_('Changing the redirect URL could impact the ability to login if local authentication is also disabled.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
@@ -1,450 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import re
|
||||
import json
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError, FieldDoesNotExist
|
||||
from django.db import models
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField, TextField, JSONField
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.db.models.functions import Cast
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.contenttypes.fields import GenericForeignKey
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
from rest_framework.filters import BaseFilterBackend
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
|
||||
|
||||
class TypeFilterBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter on type field now returned with all objects.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
types = None
|
||||
for key, value in request.query_params.items():
|
||||
if key == 'type':
|
||||
if ',' in value:
|
||||
types = value.split(',')
|
||||
else:
|
||||
types = (value,)
|
||||
if types:
|
||||
types_map = {}
|
||||
for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')):
|
||||
ct_model = ct.model_class()
|
||||
if not ct_model:
|
||||
continue
|
||||
ct_type = get_type_for_model(ct_model)
|
||||
types_map[ct_type] = ct.pk
|
||||
model = queryset.model
|
||||
model_type = get_type_for_model(model)
|
||||
if 'polymorphic_ctype' in get_all_field_names(model):
|
||||
types_pks = set([v for k, v in types_map.items() if k in types])
|
||||
queryset = queryset.filter(polymorphic_ctype_id__in=types_pks)
|
||||
elif model_type in types:
|
||||
queryset = queryset
|
||||
else:
|
||||
queryset = queryset.none()
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_fields_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
"""
|
||||
# Store of all the fields used to detect repeats
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
raise ParseError(_('No related model for field {}.').format(name))
|
||||
# HACK: Make project and inventory source filtering by old field names work for backwards compatibility.
|
||||
if model._meta.object_name in ('Project', 'InventorySource'):
|
||||
name = {'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run'}.get(
|
||||
name, name
|
||||
)
|
||||
|
||||
if name == 'type' and 'polymorphic_ctype' in get_all_field_names(model):
|
||||
name = 'polymorphic_ctype'
|
||||
new_parts.append('polymorphic_ctype__model')
|
||||
else:
|
||||
new_parts.append(name)
|
||||
|
||||
if name in getattr(model, 'PASSWORD_FIELDS', ()):
|
||||
raise PermissionDenied(_('Filtering on password fields is not allowed.'))
|
||||
elif name == 'pk':
|
||||
field = model._meta.pk
|
||||
else:
|
||||
name_alt = name.replace("_", "")
|
||||
if name_alt in model._meta.fields_map.keys():
|
||||
field = model._meta.fields_map[name_alt]
|
||||
new_parts.pop()
|
||||
new_parts.append(name_alt)
|
||||
else:
|
||||
field = model._meta.get_field(name)
|
||||
if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
"""
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter using field lookups provided via query string parameters.
|
||||
"""
|
||||
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||
|
||||
SUPPORTED_LOOKUPS = (
|
||||
'exact',
|
||||
'iexact',
|
||||
'contains',
|
||||
'icontains',
|
||||
'startswith',
|
||||
'istartswith',
|
||||
'endswith',
|
||||
'iendswith',
|
||||
'regex',
|
||||
'iregex',
|
||||
'gt',
|
||||
'gte',
|
||||
'lt',
|
||||
'lte',
|
||||
'in',
|
||||
'isnull',
|
||||
'search',
|
||||
)
|
||||
|
||||
# A list of fields that we know can be filtered on without the possibility
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
else:
|
||||
path = lookup
|
||||
suffix = 'exact'
|
||||
|
||||
if not path:
|
||||
raise ParseError(_('Query string field name not provided.'))
|
||||
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_str(value)
|
||||
if value.lower() in ('none', 'null'):
|
||||
return None
|
||||
else:
|
||||
return int(value)
|
||||
|
||||
def value_to_python_for_field(self, field, value):
|
||||
if isinstance(field, models.BooleanField):
|
||||
return to_python_boolean(value)
|
||||
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
|
||||
try:
|
||||
return self.to_python_related(value)
|
||||
except ValueError:
|
||||
raise ParseError(_('Invalid {field_name} id: {field_id}').format(field_name=getattr(field, 'name', 'related field'), field_id=value))
|
||||
else:
|
||||
return field.to_python(value)
|
||||
|
||||
def value_to_python(self, model, lookup, value):
|
||||
try:
|
||||
lookup.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = not all(isinstance(f, self.NO_DUPLICATES_ALLOW_LIST) for f in field_list)
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
# for polymorphic_ctype__model lookups.
|
||||
if new_lookup.startswith('polymorphic_ctype__model'):
|
||||
value = value.replace('_', '')
|
||||
elif new_lookup.endswith('__isnull'):
|
||||
value = to_python_boolean(value)
|
||||
elif new_lookup.endswith('__in'):
|
||||
items = []
|
||||
if not value:
|
||||
raise ValueError('cannot provide empty value for __in')
|
||||
for item in value.split(','):
|
||||
items.append(self.value_to_python_for_field(field, item))
|
||||
value = items
|
||||
elif new_lookup.endswith('__regex') or new_lookup.endswith('__iregex'):
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__iexact'):
|
||||
if not isinstance(field, (CharField, TextField)):
|
||||
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
raise ValueError('%s is not searchable' % new_lookup[:-8])
|
||||
new_lookups = []
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
if isinstance(field, JSONField):
|
||||
new_lookup = new_lookup.replace(field.name, f'{field.name}_as_txt')
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
# Apply filters specified via query_params. Each entry in the lists
|
||||
# below is (negate, field, value).
|
||||
and_filters = []
|
||||
or_filters = []
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
|
||||
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
continue
|
||||
|
||||
# HACK: make `created` available via API for the Django User ORM model
|
||||
# so it keep compatibility with other objects which exposes the `created` attr.
|
||||
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
|
||||
key = key.replace('created', 'date_joined')
|
||||
|
||||
# HACK: Make job event filtering by host name mostly work even
|
||||
# when not capturing job event hosts M2M.
|
||||
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
|
||||
key = key.replace('hosts__name', 'or__host__name')
|
||||
or_filters.append((False, 'host__name__isnull', True))
|
||||
|
||||
# Custom __int filter suffix (internal use only).
|
||||
q_int = False
|
||||
if key.endswith('__int'):
|
||||
key = key[:-5]
|
||||
q_int = True
|
||||
|
||||
# RBAC filtering
|
||||
if key == 'role_level':
|
||||
role_filters.append(values[0])
|
||||
continue
|
||||
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
if values and ',' in values[0]:
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_str(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
# precede not__).
|
||||
q_chain = False
|
||||
q_or = False
|
||||
if key.startswith('chain__'):
|
||||
key = key[7:]
|
||||
q_chain = True
|
||||
elif key.startswith('or__'):
|
||||
key = key[4:]
|
||||
q_or = True
|
||||
|
||||
# Custom not__ filter prefix.
|
||||
q_not = False
|
||||
if key.startswith('not__'):
|
||||
key = key[5:]
|
||||
q_not = True
|
||||
|
||||
# Convert value(s) to python and add to the appropriate list.
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if '_as_txt' in new_key:
|
||||
fname = next(item for item in new_key.split('__') if item.endswith('_as_txt'))
|
||||
queryset = queryset.annotate(**{fname: Cast(fname[:-7], output_field=TextField())})
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
or_filters.append((q_not, new_key, value))
|
||||
else:
|
||||
and_filters.append((q_not, new_key, value))
|
||||
|
||||
# Now build Q objects for database query filter.
|
||||
if and_filters or or_filters or chain_filters or role_filters or search_filters:
|
||||
args = []
|
||||
for n, k, v in and_filters:
|
||||
if n:
|
||||
args.append(~Q(**{k: v}))
|
||||
else:
|
||||
args.append(Q(**{k: v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model ' 'does not use roles for access control.'))
|
||||
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
|
||||
if or_filters:
|
||||
q = Q()
|
||||
for n, k, v in or_filters:
|
||||
if n:
|
||||
q |= ~Q(**{k: v})
|
||||
else:
|
||||
q |= Q(**{k: v})
|
||||
args.append(q)
|
||||
if search_filters and search_filter_relation == 'OR':
|
||||
q = Q()
|
||||
for term, constrains in search_filters.items():
|
||||
for constrain in constrains:
|
||||
q |= Q(**{constrain: term})
|
||||
args.append(q)
|
||||
elif search_filters and search_filter_relation == 'AND':
|
||||
for term, constrains in search_filters.items():
|
||||
q_chain = Q()
|
||||
for constrain in constrains:
|
||||
q_chain |= Q(**{constrain: term})
|
||||
queryset = queryset.filter(q_chain)
|
||||
for n, k, v in chain_filters:
|
||||
if n:
|
||||
q = ~Q(**{k: v})
|
||||
else:
|
||||
q = Q(**{k: v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
except ValidationError as e:
|
||||
raise ParseError(json.dumps(e.messages, ensure_ascii=False))
|
||||
|
||||
|
||||
class OrderByBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter to apply ordering based on query string parameters.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
order_by = None
|
||||
for key, value in request.query_params.items():
|
||||
if key in ('order', 'order_by'):
|
||||
order_by = value
|
||||
if ',' in value:
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
default_order_by = self.get_default_ordering(view)
|
||||
# glue the order by and default order by together so that the default is the backup option
|
||||
order_by = list(order_by or []) + list(default_order_by or [])
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
# Special handling of the type field for ordering. In this
|
||||
# case, we're not sorting exactly on the type field, but
|
||||
# given the limited number of views with multiple types,
|
||||
# sorting on polymorphic_ctype.model is effectively the same.
|
||||
new_order_by = []
|
||||
if 'polymorphic_ctype' in get_all_field_names(queryset.model):
|
||||
for field in order_by:
|
||||
if field == 'type':
|
||||
new_order_by.append('polymorphic_ctype__model')
|
||||
elif field == '-type':
|
||||
new_order_by.append('-polymorphic_ctype__model')
|
||||
else:
|
||||
new_order_by.append(field)
|
||||
else:
|
||||
for field in order_by:
|
||||
if field not in ('type', '-type'):
|
||||
new_order_by.append(field)
|
||||
queryset = queryset.order_by(*new_order_by)
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
def get_default_ordering(self, view):
|
||||
ordering = getattr(view, 'ordering', None)
|
||||
if isinstance(ordering, str):
|
||||
return (ordering,)
|
||||
return ordering
|
||||
|
||||
def _validate_ordering_fields(self, model, order_by):
|
||||
for field_name in order_by:
|
||||
# strip off the negation prefix `-` if it exists
|
||||
prefix = ''
|
||||
path = field_name
|
||||
if field_name[0] == '-':
|
||||
prefix = field_name[0]
|
||||
path = field_name[1:]
|
||||
try:
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
new_path = '{}{}'.format(prefix, new_path)
|
||||
except (FieldError, FieldDoesNotExist) as e:
|
||||
raise ParseError(e.args[0])
|
||||
yield new_path
|
||||
@@ -5,13 +5,11 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -32,13 +30,21 @@ from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import StaticHTMLRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
from ansible_base.lib.utils.models import get_all_field_names
|
||||
from ansible_base.lib.utils.requests import get_remote_host
|
||||
from ansible_base.rbac.models import RoleEvaluation, RoleDefinition
|
||||
from ansible_base.rbac.permission_registry import permission_registry
|
||||
from ansible_base.jwt_consumer.common.util import validate_x_trusted_proxy_header
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.rbac import give_creator_permissions
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.utils.proxy import is_proxy_in_headers, delete_headers_starting_with_http
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
@@ -90,25 +96,31 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||
ip = get_remote_host(request) # request.META.get('REMOTE_ADDR', None)
|
||||
if request.user.is_authenticated:
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
||||
ret.set_cookie('userLoggedIn', 'true')
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, ip)))
|
||||
ret.set_cookie(
|
||||
'userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False), samesite=getattr(settings, 'USER_COOKIE_SAMESITE', 'Lax')
|
||||
)
|
||||
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||
|
||||
return ret
|
||||
else:
|
||||
if 'username' in self.request.POST:
|
||||
logger.warning(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), request.META.get('REMOTE_ADDR', None))))
|
||||
logger.warning(smart_str(u"Login failed for user {} from {}".format(self.request.POST.get('username'), ip)))
|
||||
ret.status_code = 401
|
||||
return ret
|
||||
|
||||
|
||||
class LoggedLogoutView(auth_views.LogoutView):
|
||||
|
||||
success_url_allowed_hosts = set(settings.LOGOUT_ALLOWED_HOSTS.split(",")) if settings.LOGOUT_ALLOWED_HOSTS else set()
|
||||
|
||||
def dispatch(self, request, *args, **kwargs):
|
||||
original_user = getattr(request, 'user', None)
|
||||
ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs)
|
||||
current_user = getattr(request, 'user', None)
|
||||
ret.set_cookie('userLoggedIn', 'false')
|
||||
ret.set_cookie('userLoggedIn', 'false', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||
if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user:
|
||||
logger.info("User {} logged out.".format(original_user.username))
|
||||
return ret
|
||||
@@ -143,22 +155,23 @@ class APIView(views.APIView):
|
||||
Store the Django REST Framework Request object as an attribute on the
|
||||
normal Django request, store time the request started.
|
||||
"""
|
||||
remote_headers = ['REMOTE_ADDR', 'REMOTE_HOST']
|
||||
|
||||
self.time_started = time.time()
|
||||
if getattr(settings, 'SQL_DEBUG', False):
|
||||
self.queries_before = len(connection.queries)
|
||||
|
||||
if 'HTTP_X_TRUSTED_PROXY' in request.environ:
|
||||
if validate_x_trusted_proxy_header(request.environ['HTTP_X_TRUSTED_PROXY']):
|
||||
remote_headers = settings.REMOTE_HOST_HEADERS
|
||||
else:
|
||||
logger.warning("Request appeared to be a trusted upstream proxy but failed to provide a matching shared secret.")
|
||||
|
||||
# If there are any custom headers in REMOTE_HOST_HEADERS, make sure
|
||||
# they respect the allowed proxy list
|
||||
if all(
|
||||
[
|
||||
settings.PROXY_IP_ALLOWED_LIST,
|
||||
request.environ.get('REMOTE_ADDR') not in settings.PROXY_IP_ALLOWED_LIST,
|
||||
request.environ.get('REMOTE_HOST') not in settings.PROXY_IP_ALLOWED_LIST,
|
||||
]
|
||||
):
|
||||
for custom_header in settings.REMOTE_HOST_HEADERS:
|
||||
if custom_header.startswith('HTTP_'):
|
||||
request.environ.pop(custom_header, None)
|
||||
if settings.PROXY_IP_ALLOWED_LIST:
|
||||
if not is_proxy_in_headers(self.request, settings.PROXY_IP_ALLOWED_LIST, remote_headers):
|
||||
delete_headers_starting_with_http(request, settings.REMOTE_HOST_HEADERS)
|
||||
|
||||
drf_request = super(APIView, self).initialize_request(request, *args, **kwargs)
|
||||
request.drf_request = drf_request
|
||||
@@ -171,7 +184,7 @@ class APIView(views.APIView):
|
||||
self.__init_request_error__ = exc
|
||||
except UnsupportedMediaType as exc:
|
||||
exc.detail = _(
|
||||
'You did not use correct Content-Type in your HTTP request. ' 'If you are using our REST API, the Content-Type must be application/json'
|
||||
'You did not use correct Content-Type in your HTTP request. If you are using our REST API, the Content-Type must be application/json'
|
||||
)
|
||||
self.__init_request_error__ = exc
|
||||
return drf_request
|
||||
@@ -203,17 +216,21 @@ class APIView(views.APIView):
|
||||
return response
|
||||
|
||||
if response.status_code >= 400:
|
||||
ip = get_remote_host(request) # request.META.get('REMOTE_ADDR', None)
|
||||
msg_data = {
|
||||
'status_code': response.status_code,
|
||||
'user_name': request.user,
|
||||
'url_path': request.path,
|
||||
'remote_addr': request.META.get('REMOTE_ADDR', None),
|
||||
'remote_addr': ip,
|
||||
}
|
||||
|
||||
if type(response.data) is dict:
|
||||
msg_data['error'] = response.data.get('error', response.status_text)
|
||||
elif type(response.data) is list:
|
||||
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
|
||||
if len(response.data) > 0 and isinstance(response.data[0], str):
|
||||
msg_data['error'] = str(response.data[0])
|
||||
else:
|
||||
msg_data['error'] = ", ".join(list(map(lambda x: x.get('error', response.status_text), response.data)))
|
||||
else:
|
||||
msg_data['error'] = response.status_text
|
||||
|
||||
@@ -234,7 +251,8 @@ class APIView(views.APIView):
|
||||
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
if request.user.is_authenticated:
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = server_product_name()
|
||||
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
@@ -364,12 +382,7 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
qs = self.model._default_manager
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
qs = optimize_queryset(qs)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
@@ -477,7 +490,11 @@ class ListAPIView(generics.ListAPIView, GenericAPIView):
|
||||
|
||||
class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView):
|
||||
# Base class for a list view that allows creating new objects.
|
||||
pass
|
||||
def perform_create(self, serializer):
|
||||
super().perform_create(serializer)
|
||||
if serializer.Meta.model in permission_registry.all_registered_models:
|
||||
if self.request and self.request.user:
|
||||
give_creator_permissions(self.request.user, serializer.instance)
|
||||
|
||||
|
||||
class ParentMixin(object):
|
||||
@@ -512,6 +529,9 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# And optionally (user must have given access permission on parent object
|
||||
# to view sublist):
|
||||
# parent_access = 'read'
|
||||
# filter_read_permission sets whether or not to override the default intersection behavior
|
||||
# implemented here
|
||||
filter_read_permission = True
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListAPIView, self).get_description_context()
|
||||
@@ -526,12 +546,16 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model).distinct()
|
||||
sublist_qs = self.get_sublist_queryset(parent)
|
||||
return qs & sublist_qs
|
||||
if not self.filter_read_permission:
|
||||
return optimize_queryset(self.get_sublist_queryset(parent))
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
if hasattr(self, 'parent_key'):
|
||||
# This is vastly preferable for ReverseForeignKey relationships
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
|
||||
|
||||
def get_sublist_queryset(self, parent):
|
||||
return getattrd(parent, self.relationship).distinct()
|
||||
return getattrd(parent, self.relationship)
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
@@ -580,15 +604,6 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
d.update({'parent_key': getattr(self, 'parent_key', None)})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -799,6 +814,7 @@ class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
|
||||
|
||||
|
||||
class ResourceAccessList(ParentMixin, ListAPIView):
|
||||
deprecated = True
|
||||
serializer_class = ResourceAccessListElementSerializer
|
||||
ordering = ('username',)
|
||||
|
||||
@@ -806,6 +822,15 @@ class ResourceAccessList(ParentMixin, ListAPIView):
|
||||
obj = self.get_parent_object()
|
||||
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
|
||||
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
|
||||
auditor_role = RoleDefinition.objects.filter(name="System Auditor").first()
|
||||
if auditor_role:
|
||||
qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
|
||||
return qs.distinct()
|
||||
|
||||
roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id))
|
||||
|
||||
ancestors = set()
|
||||
@@ -965,18 +990,13 @@ class CopyAPIView(GenericAPIView):
|
||||
None, None, self.model, obj, request.user, create_kwargs=create_kwargs, copy_name=serializer.validated_data.get('name', '')
|
||||
)
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
give_creator_permissions(request.user, new_obj)
|
||||
if sub_objs:
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
|
||||
@@ -36,11 +36,13 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info = OrderedDict()
|
||||
field_info['type'] = self.label_lookup[field]
|
||||
field_info['required'] = getattr(field, 'required', False)
|
||||
field_info['hidden'] = getattr(field, 'hidden', False)
|
||||
|
||||
text_attrs = [
|
||||
'read_only',
|
||||
'label',
|
||||
'help_text',
|
||||
'warning_text',
|
||||
'min_length',
|
||||
'max_length',
|
||||
'min_value',
|
||||
@@ -71,7 +73,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _(
|
||||
'Data structure with name/description for related resources. ' 'The output for some objects may be limited for performance reasons.'
|
||||
'Data structure with name/description for related resources. The output for some objects may be limited for performance reasons.'
|
||||
),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
@@ -101,7 +103,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
default = field.get_default()
|
||||
if type(default) is UUID:
|
||||
default = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
|
||||
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
|
||||
if field.field_name == 'TOWER_URL_BASE' and default == 'https://platformhost':
|
||||
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
|
||||
field_info['default'] = default
|
||||
except serializers.SkipField:
|
||||
|
||||
@@ -6,7 +6,7 @@ import copy
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from collections import Counter, OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -43,9 +43,14 @@ from rest_framework.utils.serializer_helpers import ReturnList
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.utils.models import get_type_for_model
|
||||
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
# AWX
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE
|
||||
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE, org_role_to_permission
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
AdHocCommand,
|
||||
@@ -80,6 +85,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
ProjectUpdateEvent,
|
||||
ReceptorAddress,
|
||||
RefreshToken,
|
||||
Role,
|
||||
Schedule,
|
||||
@@ -99,10 +105,9 @@ from awx.main.models import (
|
||||
CLOUD_INVENTORY_SOURCES,
|
||||
)
|
||||
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator
|
||||
from awx.main.models.rbac import role_summary_fields_generator, give_creator_permissions, get_role_codenames, to_permissions, get_role_from_object_role
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
get_model_for_type,
|
||||
camelcase_to_underscore,
|
||||
getattrd,
|
||||
@@ -189,6 +194,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
'resource': ('ansible_id', 'resource_type'),
|
||||
}
|
||||
|
||||
|
||||
@@ -220,7 +226,7 @@ class CopySerializer(serializers.Serializer):
|
||||
view = self.context.get('view', None)
|
||||
obj = view.get_object()
|
||||
if name == obj.name:
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from' ' it cannot have the same name.'.format(name)))
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from it cannot have the same name.'.format(name)))
|
||||
return attrs
|
||||
|
||||
|
||||
@@ -635,7 +641,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions:
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
obj.full_clean(exclude=exclusions)
|
||||
# full_clean may modify values on the instance; copy those changes
|
||||
@@ -760,7 +766,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
event_processing_finished = serializers.BooleanField(
|
||||
help_text=_('Indicates whether all of the events generated by this ' 'unified job have been saved to the database.'), read_only=True
|
||||
help_text=_('Indicates whether all of the events generated by this unified job have been saved to the database.'), read_only=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -954,7 +960,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializer):
|
||||
password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
|
||||
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.'))
|
||||
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
|
||||
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
|
||||
is_system_auditor = serializers.BooleanField(default=False)
|
||||
@@ -981,7 +987,12 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
ret.pop('password', None)
|
||||
if self.get_external_account(obj):
|
||||
# If this is an external account it shouldn't have a password field
|
||||
ret.pop('password', None)
|
||||
else:
|
||||
# If its an internal account lets assume there is a password and return $encrypted$ to the user
|
||||
ret['password'] = '$encrypted$'
|
||||
if obj and type(self) is UserSerializer:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
@@ -1019,7 +1030,7 @@ class UserSerializer(BaseSerializer):
|
||||
# For now we're not raising an error, just not saving password for
|
||||
# users managed by LDAP who already have an unusable password set.
|
||||
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
|
||||
if new_password and not self.get_external_account(obj):
|
||||
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj):
|
||||
obj.set_password(new_password)
|
||||
obj.save(update_fields=['password'])
|
||||
|
||||
@@ -1574,7 +1585,7 @@ class ProjectPlaybooksSerializer(ProjectSerializer):
|
||||
|
||||
|
||||
class ProjectInventoriesSerializer(ProjectSerializer):
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, ' 'not comprehensive.'))
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, not comprehensive.'))
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
@@ -1624,8 +1635,8 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
|
||||
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||
|
||||
data = {'play_count': play_count, 'task_count': task_count}
|
||||
|
||||
@@ -2185,7 +2196,7 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if not settings.IS_TESTING_MODE:
|
||||
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')):
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
@@ -2196,6 +2207,99 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
return return_data
|
||||
|
||||
|
||||
class BulkHostDeleteSerializer(serializers.Serializer):
|
||||
hosts = serializers.ListField(
|
||||
allow_empty=False,
|
||||
max_length=100000,
|
||||
write_only=True,
|
||||
help_text=_('List of hosts ids to be deleted, e.g. [105, 130, 131, 200]'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('hosts',)
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
max_hosts = settings.BULK_HOST_MAX_DELETE
|
||||
# Validating the number of hosts to be deleted
|
||||
if len(attrs['hosts']) > max_hosts:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"ERROR": 'Number of hosts exceeds system setting BULK_HOST_MAX_DELETE',
|
||||
"BULK_HOST_MAX_DELETE": max_hosts,
|
||||
"Hosts_count": len(attrs['hosts']),
|
||||
}
|
||||
)
|
||||
|
||||
# Getting list of all host objects, filtered by the list of the hosts to delete
|
||||
attrs['host_qs'] = Host.objects.get_queryset().filter(pk__in=attrs['hosts']).only('id', 'inventory_id', 'name')
|
||||
|
||||
# Converting the queryset data in a dict. to reduce the number of queries when
|
||||
# manipulating the data
|
||||
attrs['hosts_data'] = attrs['host_qs'].values()
|
||||
|
||||
if len(attrs['host_qs']) == 0:
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in attrs['hosts']}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
if len(attrs['host_qs']) < len(attrs['hosts']):
|
||||
hosts_exists = [host['id'] for host in attrs['hosts_data']]
|
||||
failed_hosts = list(set(attrs['hosts']).difference(hosts_exists))
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in failed_hosts}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
# Getting all inventories that the hosts can be in
|
||||
inv_list = list(set([host['inventory_id'] for host in attrs['hosts_data']]))
|
||||
|
||||
# Checking that the user have permission to all inventories
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if request and not request.user.is_superuser:
|
||||
if request.user not in inv.admin_role:
|
||||
errors[inv.name] = "Lack permissions to delete hosts from this inventory."
|
||||
if errors != {}:
|
||||
raise PermissionDenied({"inventories": errors})
|
||||
|
||||
# check the inventory type only if the user have permission to it.
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if inv.kind != '':
|
||||
errors[inv.name] = "Hosts can only be deleted from manual inventories."
|
||||
if errors != {}:
|
||||
raise serializers.ValidationError({"inventories": errors})
|
||||
attrs['inventories'] = inv_list
|
||||
return attrs
|
||||
|
||||
def delete(self, validated_data):
|
||||
result = {"hosts": dict()}
|
||||
changes = {'deleted_hosts': dict()}
|
||||
for inventory in validated_data['inventories']:
|
||||
changes['deleted_hosts'][inventory] = list()
|
||||
|
||||
for host in validated_data['hosts_data']:
|
||||
result["hosts"][host["id"]] = f"The host {host['name']} was deleted"
|
||||
changes['deleted_hosts'][host["inventory_id"]].append({"host_id": host["id"], "host_name": host["name"]})
|
||||
|
||||
try:
|
||||
validated_data['host_qs'].delete()
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError({"detail": _(f"cannot delete hosts, host deletion error {e}")})
|
||||
|
||||
request = self.context.get('request', None)
|
||||
|
||||
for inventory in validated_data['inventories']:
|
||||
activity_entry = ActivityStream.objects.create(
|
||||
operation='update',
|
||||
object1='inventory',
|
||||
changes=json.dumps(changes['deleted_hosts'][inventory]),
|
||||
actor=request.user,
|
||||
)
|
||||
activity_entry.inventory.add(inventory)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class GroupTreeSerializer(GroupSerializer):
|
||||
children = serializers.SerializerMethodField()
|
||||
|
||||
@@ -2659,6 +2763,30 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
if 'summary_fields' not in ret:
|
||||
ret['summary_fields'] = {}
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
reversed_org_map = {}
|
||||
for k, v in org_role_to_permission.items():
|
||||
reversed_org_map[v] = k
|
||||
reversed_role_map = {}
|
||||
for k, v in to_permissions.items():
|
||||
reversed_role_map[v] = k
|
||||
|
||||
def get_roles_from_perms(perm_list):
|
||||
"""given a list of permission codenames return a list of role names"""
|
||||
role_names = set()
|
||||
for codename in perm_list:
|
||||
action = codename.split('_', 1)[0]
|
||||
if action in reversed_role_map:
|
||||
role_names.add(reversed_role_map[action])
|
||||
elif codename in reversed_org_map:
|
||||
if isinstance(obj, Organization):
|
||||
role_names.add(reversed_org_map[codename])
|
||||
if 'view_organization' not in role_names:
|
||||
role_names.add('read_role')
|
||||
return list(role_names)
|
||||
|
||||
def format_role_perm(role):
|
||||
role_dict = {'id': role.id, 'name': role.name, 'description': role.description}
|
||||
try:
|
||||
@@ -2674,13 +2802,21 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
|
||||
|
||||
model_name = content_type.model
|
||||
if isinstance(obj, Organization):
|
||||
descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name) or codename.startswith('add_')]
|
||||
else:
|
||||
descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name)]
|
||||
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)}
|
||||
|
||||
def format_team_role_perm(naive_team_role, permissive_role_ids):
|
||||
ret = []
|
||||
team = naive_team_role.content_object
|
||||
team_role = naive_team_role
|
||||
if naive_team_role.role_field == 'admin_role':
|
||||
team_role = naive_team_role.content_object.member_role
|
||||
team_role = team.member_role
|
||||
for role in team_role.children.filter(id__in=permissive_role_ids).all():
|
||||
role_dict = {
|
||||
'id': role.id,
|
||||
@@ -2700,13 +2836,87 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
|
||||
|
||||
descendant_perms = list(
|
||||
RoleEvaluation.objects.filter(role__in=team.has_roles.all(), object_id=obj.id, content_type_id=content_type.id)
|
||||
.values_list('codename', flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)})
|
||||
return ret
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
gfk_kwargs = dict(content_type_id=content_type.id, object_id=obj.id)
|
||||
direct_permissive_role_ids = Role.objects.filter(**gfk_kwargs).values_list('id', flat=True)
|
||||
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
ret['summary_fields']['direct_access'] = []
|
||||
ret['summary_fields']['indirect_access'] = []
|
||||
|
||||
new_roles_seen = set()
|
||||
all_team_roles = set()
|
||||
all_permissive_role_ids = set()
|
||||
for evaluation in RoleEvaluation.objects.filter(role__in=user.has_roles.all(), **gfk_kwargs).prefetch_related('role'):
|
||||
new_role = evaluation.role
|
||||
if new_role.id in new_roles_seen:
|
||||
continue
|
||||
new_roles_seen.add(new_role.id)
|
||||
old_role = get_role_from_object_role(new_role)
|
||||
all_permissive_role_ids.add(old_role.id)
|
||||
|
||||
if int(new_role.object_id) == obj.id and new_role.content_type_id == content_type.id:
|
||||
ret['summary_fields']['direct_access'].append(format_role_perm(old_role))
|
||||
elif new_role.content_type_id == team_content_type.id:
|
||||
all_team_roles.add(old_role)
|
||||
else:
|
||||
ret['summary_fields']['indirect_access'].append(format_role_perm(old_role))
|
||||
|
||||
# Lazy role creation gives us a big problem, where some intermediate roles are not easy to find
|
||||
# like when a team has indirect permission, so here we get all roles the users teams have
|
||||
# these contribute to all potential permission-granting roles of the object
|
||||
user_teams_qs = permission_registry.team_model.objects.filter(member_roles__in=ObjectRole.objects.filter(users=user))
|
||||
team_obj_roles = ObjectRole.objects.filter(teams__in=user_teams_qs)
|
||||
for evaluation in RoleEvaluation.objects.filter(role__in=team_obj_roles, **gfk_kwargs).prefetch_related('role'):
|
||||
new_role = evaluation.role
|
||||
if new_role.id in new_roles_seen:
|
||||
continue
|
||||
new_roles_seen.add(new_role.id)
|
||||
old_role = get_role_from_object_role(new_role)
|
||||
all_permissive_role_ids.add(old_role.id)
|
||||
|
||||
# In DAB RBAC, superuser is strictly a user flag, and global roles are not in the RoleEvaluation table
|
||||
if user.is_superuser:
|
||||
ret['summary_fields'].setdefault('indirect_access', [])
|
||||
all_role_names = [field.name for field in obj._meta.get_fields() if isinstance(field, ImplicitRoleField)]
|
||||
ret['summary_fields']['indirect_access'].append(
|
||||
{
|
||||
"role": {
|
||||
"id": None,
|
||||
"name": _("System Administrator"),
|
||||
"description": _("Can manage all aspects of the system"),
|
||||
"user_capabilities": {"unattach": False},
|
||||
},
|
||||
"descendant_roles": all_role_names,
|
||||
}
|
||||
)
|
||||
elif user.is_system_auditor:
|
||||
ret['summary_fields'].setdefault('indirect_access', [])
|
||||
ret['summary_fields']['indirect_access'].append(
|
||||
{
|
||||
"role": {
|
||||
"id": None,
|
||||
"name": _("System Auditor"),
|
||||
"description": _("Can view all aspects of the system"),
|
||||
"user_capabilities": {"unattach": False},
|
||||
},
|
||||
"descendant_roles": ["read_role"],
|
||||
}
|
||||
)
|
||||
|
||||
ret['summary_fields']['direct_access'].extend([y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in all_team_roles) for y in x])
|
||||
|
||||
return ret
|
||||
|
||||
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
|
||||
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
|
||||
|
||||
direct_access_roles = user.roles.filter(id__in=direct_permissive_role_ids).all()
|
||||
@@ -2900,7 +3110,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
):
|
||||
if getattr(self.instance, related_objects).count() > 0:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality' ' of the resources using it.')
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality of the resources using it.')
|
||||
)
|
||||
|
||||
return credential_type
|
||||
@@ -2920,7 +3130,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, ' 'do not give either team or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.'),
|
||||
)
|
||||
team = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Team.objects.all(),
|
||||
@@ -2928,14 +3138,14 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, ' 'do not give either user or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.'),
|
||||
)
|
||||
organization = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Organization.objects.all(),
|
||||
required=False,
|
||||
default=None,
|
||||
allow_null=True,
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, ' 'do not give either user or team.'),
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, do not give either user or team.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -2957,7 +3167,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
if len(owner_fields) > 1:
|
||||
received = ", ".join(sorted(owner_fields))
|
||||
raise serializers.ValidationError(
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, " "received {} fields.".format(received))}
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, received {} fields.".format(received))}
|
||||
)
|
||||
|
||||
if attrs.get('team'):
|
||||
@@ -2975,7 +3185,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
credential = super(CredentialSerializerCreate, self).create(validated_data)
|
||||
|
||||
if user:
|
||||
credential.admin_role.members.add(user)
|
||||
give_creator_permissions(user, credential)
|
||||
if team:
|
||||
if not credential.organization or team.organization.id != credential.organization.id:
|
||||
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
|
||||
@@ -3228,7 +3438,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
@@ -3617,7 +3827,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
|
||||
try:
|
||||
return obj.result_stdout
|
||||
except StdoutMaxBytesExceeded as e:
|
||||
return _("Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes.").format(
|
||||
return _("Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes.").format(
|
||||
text_size=e.total, supported_size=e.supported
|
||||
)
|
||||
|
||||
@@ -4531,7 +4741,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
if cred.unique_hash() in provided_mapping.keys():
|
||||
continue # User replaced credential with new of same type
|
||||
errors.setdefault('credentials', []).append(
|
||||
_('Removing {} credential at launch time without replacement is not supported. ' 'Provided list lacked credential(s): {}.').format(
|
||||
_('Removing {} credential at launch time without replacement is not supported. Provided list lacked credential(s): {}.').format(
|
||||
cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])
|
||||
)
|
||||
)
|
||||
@@ -4681,12 +4891,11 @@ class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
|
||||
# many-to-many fields
|
||||
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
|
||||
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
|
||||
fields = ('*', 'credentials', 'labels', 'instance_groups') # m2m fields are not canonical for WJ nodes
|
||||
|
||||
def validate(self, attrs):
|
||||
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
|
||||
@@ -4746,21 +4955,21 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
|
||||
requested_use_credentials = set()
|
||||
requested_use_labels = set()
|
||||
# requested_use_instance_groups = set()
|
||||
requested_use_instance_groups = set()
|
||||
for job in attrs['jobs']:
|
||||
for cred in job.get('credentials', []):
|
||||
requested_use_credentials.add(cred)
|
||||
for label in job.get('labels', []):
|
||||
requested_use_labels.add(label)
|
||||
# for instance_group in job.get('instance_groups', []):
|
||||
# requested_use_instance_groups.add(instance_group)
|
||||
for instance_group in job.get('instance_groups', []):
|
||||
requested_use_instance_groups.add(instance_group)
|
||||
|
||||
key_to_obj_map = {
|
||||
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
|
||||
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
|
||||
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
|
||||
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
|
||||
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
|
||||
}
|
||||
|
||||
@@ -4787,7 +4996,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
|
||||
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
|
||||
self.check_list_permission(Label, requested_use_labels)
|
||||
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
|
||||
|
||||
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
|
||||
@@ -4834,7 +5043,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
node_m2m_object_types_to_through_model = {
|
||||
'credentials': WorkflowJobNode.credentials.through,
|
||||
'labels': WorkflowJobNode.labels.through,
|
||||
# 'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
}
|
||||
node_deferred_attr_names = (
|
||||
'limit',
|
||||
@@ -4887,9 +5096,9 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
|
||||
for label in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
|
||||
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
|
||||
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'instance_groups':
|
||||
for instance_group in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if through_model_objects:
|
||||
through_model.objects.bulk_create(through_model_objects)
|
||||
|
||||
@@ -5014,7 +5223,7 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
for subevent in event_messages:
|
||||
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
|
||||
error_list.append(
|
||||
_("Workflow Approval event '{}' invalid, must be one of " "'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
_("Workflow Approval event '{}' invalid, must be one of 'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
)
|
||||
continue
|
||||
subevent_messages = event_messages[subevent]
|
||||
@@ -5070,16 +5279,21 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
body = messages[event].get('body', {})
|
||||
if body:
|
||||
try:
|
||||
rendered_body = (
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
)
|
||||
potential_body = json.loads(rendered_body)
|
||||
if not isinstance(potential_body, dict):
|
||||
error_list.append(
|
||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
)
|
||||
except json.JSONDecodeError as exc:
|
||||
error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc)))
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
|
||||
# https://github.com/ansible/awx/issues/14410
|
||||
|
||||
# When rendering something such as "{{ job.id }}"
|
||||
# the return type is not a dict, unlike "{{ job_metadata }}" which is a dict
|
||||
|
||||
# potential_body = json.loads(rendered_body)
|
||||
|
||||
# if not isinstance(potential_body, dict):
|
||||
# error_list.append(
|
||||
# _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
# )
|
||||
except Exception as exc:
|
||||
error_list.append(_("Webhook body for '{}' is not valid. The following gave an error ({}).".format(event, exc)))
|
||||
|
||||
if error_list:
|
||||
raise serializers.ValidationError(error_list)
|
||||
@@ -5167,7 +5381,7 @@ class NotificationSerializer(BaseSerializer):
|
||||
)
|
||||
|
||||
def get_body(self, obj):
|
||||
if obj.notification_type in ('webhook', 'pagerduty'):
|
||||
if obj.notification_type in ('webhook', 'pagerduty', 'awssns'):
|
||||
if isinstance(obj.body, dict):
|
||||
if 'body' in obj.body:
|
||||
return obj.body['body']
|
||||
@@ -5189,9 +5403,9 @@ class NotificationSerializer(BaseSerializer):
|
||||
def to_representation(self, obj):
|
||||
ret = super(NotificationSerializer, self).to_representation(obj)
|
||||
|
||||
if obj.notification_type == 'webhook':
|
||||
if obj.notification_type in ('webhook', 'awssns'):
|
||||
ret.pop('subject')
|
||||
if obj.notification_type not in ('email', 'webhook', 'pagerduty'):
|
||||
if obj.notification_type not in ('email', 'webhook', 'pagerduty', 'awssns'):
|
||||
ret.pop('body')
|
||||
return ret
|
||||
|
||||
@@ -5352,10 +5566,24 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target', 'link_state')
|
||||
fields = ('id', 'related', 'source', 'target', 'target_full_address', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
target = serializers.SerializerMethodField()
|
||||
target_full_address = serializers.SerializerMethodField()
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||
res['target_address'] = self.reverse('api:receptor_address_detail', kwargs={'pk': obj.target.id})
|
||||
return res
|
||||
|
||||
def get_target(self, obj):
|
||||
return obj.target.instance.hostname
|
||||
|
||||
def get_target_full_address(self, obj):
|
||||
return obj.target.get_full_address()
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
@@ -5364,6 +5592,29 @@ class InstanceNodeSerializer(BaseSerializer):
|
||||
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
||||
|
||||
|
||||
class ReceptorAddressSerializer(BaseSerializer):
|
||||
full_address = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ReceptorAddress
|
||||
fields = (
|
||||
'id',
|
||||
'url',
|
||||
'address',
|
||||
'port',
|
||||
'protocol',
|
||||
'websocket_path',
|
||||
'is_internal',
|
||||
'canonical',
|
||||
'instance',
|
||||
'peers_from_control_nodes',
|
||||
'full_address',
|
||||
)
|
||||
|
||||
def get_full_address(self, obj):
|
||||
return obj.get_full_address()
|
||||
|
||||
|
||||
class InstanceSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit']
|
||||
|
||||
@@ -5372,10 +5623,17 @@ class InstanceSerializer(BaseSerializer):
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
peers = serializers.PrimaryKeyRelatedField(
|
||||
help_text=_('Primary keys of receptor addresses to peer to.'), many=True, required=False, queryset=ReceptorAddress.objects.all()
|
||||
)
|
||||
reverse_peers = serializers.SerializerMethodField()
|
||||
listener_port = serializers.IntegerField(source='canonical_address_port', required=False, allow_null=True)
|
||||
peers_from_control_nodes = serializers.BooleanField(source='canonical_address_peers_from_control_nodes', required=False)
|
||||
protocol = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('ip_address', 'uuid', 'version')
|
||||
read_only_fields = ('ip_address', 'uuid', 'version', 'managed', 'reverse_peers')
|
||||
fields = (
|
||||
'id',
|
||||
'hostname',
|
||||
@@ -5406,8 +5664,13 @@ class InstanceSerializer(BaseSerializer):
|
||||
'managed_by_policy',
|
||||
'node_type',
|
||||
'node_state',
|
||||
'managed',
|
||||
'ip_address',
|
||||
'peers',
|
||||
'reverse_peers',
|
||||
'listener_port',
|
||||
'peers_from_control_nodes',
|
||||
'protocol',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
@@ -5429,16 +5692,54 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceSerializer, self).get_related(obj)
|
||||
res['receptor_addresses'] = self.reverse('api:instance_receptor_addresses_list', kwargs={'pk': obj.pk})
|
||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed:
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
if obj.node_type == 'execution':
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def create_or_update(self, validated_data, obj=None, create=True):
|
||||
# create a managed receptor address if listener port is defined
|
||||
port = validated_data.pop('listener_port', -1)
|
||||
peers_from_control_nodes = validated_data.pop('peers_from_control_nodes', -1)
|
||||
|
||||
# delete the receptor address if the port is explicitly set to None
|
||||
if obj and port == None:
|
||||
obj.receptor_addresses.filter(address=obj.hostname).delete()
|
||||
|
||||
if create:
|
||||
instance = super(InstanceSerializer, self).create(validated_data)
|
||||
else:
|
||||
instance = super(InstanceSerializer, self).update(obj, validated_data)
|
||||
instance.refresh_from_db() # instance canonical address lookup is deferred, so needs to be reloaded
|
||||
|
||||
# only create or update if port is defined in validated_data or already exists in the
|
||||
# canonical address
|
||||
# this prevents creating a receptor address if peers_from_control_nodes is in
|
||||
# validated_data but a port is not set
|
||||
if (port != None and port != -1) or instance.canonical_address_port:
|
||||
kwargs = {}
|
||||
if port != -1:
|
||||
kwargs['port'] = port
|
||||
if peers_from_control_nodes != -1:
|
||||
kwargs['peers_from_control_nodes'] = peers_from_control_nodes
|
||||
if kwargs:
|
||||
kwargs['canonical'] = True
|
||||
instance.receptor_addresses.update_or_create(address=instance.hostname, defaults=kwargs)
|
||||
|
||||
return instance
|
||||
|
||||
def create(self, validated_data):
|
||||
return self.create_or_update(validated_data, create=True)
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
return self.create_or_update(validated_data, obj, create=False)
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary = super().get_summary_fields(obj)
|
||||
|
||||
@@ -5448,6 +5749,16 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
return summary
|
||||
|
||||
def get_reverse_peers(self, obj):
|
||||
return Instance.objects.prefetch_related('peers').filter(peers__in=obj.receptor_addresses.all()).values_list('id', flat=True)
|
||||
|
||||
def get_protocol(self, obj):
|
||||
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||
for addr in obj.receptor_addresses.all():
|
||||
if addr.canonical:
|
||||
return addr.protocol
|
||||
return ""
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return obj.consumed_capacity
|
||||
|
||||
@@ -5460,22 +5771,30 @@ class InstanceSerializer(BaseSerializer):
|
||||
def get_health_check_pending(self, obj):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, data):
|
||||
if self.instance:
|
||||
if self.instance.node_type == Instance.Types.HOP:
|
||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||
else:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||
return data
|
||||
def validate(self, attrs):
|
||||
# Oddly, using 'source' on a DRF field populates attrs with the source name, so we should rename it back
|
||||
if 'canonical_address_port' in attrs:
|
||||
attrs['listener_port'] = attrs.pop('canonical_address_port')
|
||||
if 'canonical_address_peers_from_control_nodes' in attrs:
|
||||
attrs['peers_from_control_nodes'] = attrs.pop('canonical_address_peers_from_control_nodes')
|
||||
|
||||
if not self.instance and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||
|
||||
# cannot enable peers_from_control_nodes if listener_port is not set
|
||||
if attrs.get('peers_from_control_nodes'):
|
||||
port = attrs.get('listener_port', -1) # -1 denotes missing, None denotes explicit null
|
||||
if (port is None) or (port == -1 and self.instance and self.instance.canonical_address is None):
|
||||
raise serializers.ValidationError(_("Cannot enable peers_from_control_nodes if listener_port is not set."))
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
def validate_node_type(self, value):
|
||||
if not self.instance:
|
||||
if value not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only create execution nodes.")
|
||||
else:
|
||||
if self.instance.node_type != value:
|
||||
raise serializers.ValidationError("Cannot change node type.")
|
||||
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
|
||||
|
||||
if self.instance and self.instance.node_type != value:
|
||||
raise serializers.ValidationError(_("Cannot change node type."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5483,30 +5802,71 @@ class InstanceSerializer(BaseSerializer):
|
||||
if self.instance:
|
||||
if value != self.instance.node_state:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||
if self.instance.managed:
|
||||
raise serializers.ValidationError(_("Cannot deprovision managed nodes."))
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_hostname(self, value):
|
||||
"""
|
||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
||||
Cannot change the hostname
|
||||
"""
|
||||
if self.instance and self.instance.hostname != value:
|
||||
raise serializers.ValidationError("Cannot change hostname.")
|
||||
raise serializers.ValidationError(_("Cannot change hostname."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_listener_port(self, value):
|
||||
if self.instance and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError("Cannot change listener port.")
|
||||
"""
|
||||
Cannot change listener port, unless going from none to integer, and vice versa
|
||||
If instance is managed, cannot change listener port at all
|
||||
"""
|
||||
if self.instance:
|
||||
canonical_address_port = self.instance.canonical_address_port
|
||||
if value and canonical_address_port and canonical_address_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
if self.instance.managed and value != canonical_address_port:
|
||||
raise serializers.ValidationError(_("Cannot change listener port for managed nodes."))
|
||||
return value
|
||||
|
||||
def validate_peers(self, value):
|
||||
# cannot peer to an instance more than once
|
||||
peers_instances = Counter(p.instance_id for p in value)
|
||||
if any(count > 1 for count in peers_instances.values()):
|
||||
raise serializers.ValidationError(_("Cannot peer to the same instance more than once."))
|
||||
|
||||
if self.instance:
|
||||
instance_addresses = set(self.instance.receptor_addresses.all())
|
||||
setting_peers = set(value)
|
||||
peers_changed = set(self.instance.peers.all()) != setting_peers
|
||||
|
||||
if not settings.IS_K8S and peers_changed:
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
|
||||
if self.instance.managed and peers_changed:
|
||||
raise serializers.ValidationError(_("Setting peers manually for managed nodes is not allowed."))
|
||||
|
||||
# cannot peer to self
|
||||
if instance_addresses & setting_peers:
|
||||
raise serializers.ValidationError(_("Instance cannot peer to its own address."))
|
||||
|
||||
# cannot peer to an instance that is already peered to this instance
|
||||
if instance_addresses:
|
||||
for p in setting_peers:
|
||||
if set(p.instance.peers.all()) & instance_addresses:
|
||||
raise serializers.ValidationError(_(f"Instance {p.instance.hostname} is already peered to this instance."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_peers_from_control_nodes(self, value):
|
||||
if self.instance and self.instance.managed and self.instance.canonical_address_peers_from_control_nodes != value:
|
||||
raise serializers.ValidationError(_("Cannot change peers_from_control_nodes for managed nodes."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5514,7 +5874,19 @@ class InstanceSerializer(BaseSerializer):
|
||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
|
||||
read_only_fields = (
|
||||
'uuid',
|
||||
'hostname',
|
||||
'ip_address',
|
||||
'version',
|
||||
'last_health_check',
|
||||
'errors',
|
||||
'cpu',
|
||||
'memory',
|
||||
'cpu_capacity',
|
||||
'mem_capacity',
|
||||
'capacity',
|
||||
)
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
@@ -5554,7 +5926,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_container_group = serializers.BooleanField(
|
||||
required=False,
|
||||
help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
help_text=_('Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
)
|
||||
# NOTE: help_text is duplicated from field definitions, no obvious way of
|
||||
# both defining field details here and also getting the field's help_text
|
||||
@@ -5565,7 +5937,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Percentage'),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to " "this group when new instances come online."),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to this group when new instances come online."),
|
||||
)
|
||||
policy_instance_minimum = serializers.IntegerField(
|
||||
default=0,
|
||||
@@ -5573,7 +5945,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Minimum'),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to this group when new instances come online."),
|
||||
)
|
||||
max_concurrent_jobs = serializers.IntegerField(
|
||||
default=0,
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
import json
|
||||
import warnings
|
||||
|
||||
from coreapi.document import Object, Link
|
||||
|
||||
from rest_framework import exceptions
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.renderers import CoreJSONRenderer
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
|
||||
from rest_framework.views import APIView
|
||||
|
||||
from rest_framework_swagger import renderers
|
||||
from drf_yasg.views import get_schema_view
|
||||
from drf_yasg import openapi
|
||||
|
||||
|
||||
class SuperUserSchemaGenerator(SchemaGenerator):
|
||||
@@ -55,43 +49,15 @@ class AutoSchema(DRFAuthSchema):
|
||||
return description
|
||||
|
||||
|
||||
class SwaggerSchemaView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
|
||||
|
||||
def get(self, request):
|
||||
generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
|
||||
schema = generator.get_schema(request=request)
|
||||
# python core-api doesn't support the deprecation yet, so track it
|
||||
# ourselves and return it in a response header
|
||||
_deprecated = []
|
||||
|
||||
# By default, DRF OpenAPI serialization places all endpoints in
|
||||
# a single node based on their root path (/api). Instead, we want to
|
||||
# group them by topic/tag so that they're categorized in the rendered
|
||||
# output
|
||||
document = schema._data.pop('api')
|
||||
for path, node in document.items():
|
||||
if isinstance(node, Object):
|
||||
for action in node.values():
|
||||
topic = getattr(action, 'topic', None)
|
||||
if topic:
|
||||
schema._data.setdefault(topic, Object())
|
||||
schema._data[topic]._data[path] = node
|
||||
|
||||
if isinstance(action, Object):
|
||||
for link in action.links.values():
|
||||
if link.deprecated:
|
||||
_deprecated.append(link.url)
|
||||
elif isinstance(node, Link):
|
||||
topic = getattr(node, 'topic', None)
|
||||
if topic:
|
||||
schema._data.setdefault(topic, Object())
|
||||
schema._data[topic]._data[path] = node
|
||||
|
||||
if not schema:
|
||||
raise exceptions.ValidationError('The schema generator did not return a schema Document')
|
||||
|
||||
return Response(schema, headers={'X-Deprecated-Paths': json.dumps(_deprecated)})
|
||||
schema_view = get_schema_view(
|
||||
openapi.Info(
|
||||
title="Snippets API",
|
||||
default_version='v1',
|
||||
description="Test description",
|
||||
terms_of_service="https://www.google.com/policies/terms/",
|
||||
contact=openapi.Contact(email="contact@snippets.local"),
|
||||
license=openapi.License(name="BSD License"),
|
||||
),
|
||||
public=True,
|
||||
permission_classes=[AllowAny],
|
||||
)
|
||||
|
||||
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Bulk Host Delete
|
||||
|
||||
This endpoint allows the client to delete multiple hosts from inventories.
|
||||
They may do this by providing a list of hosts ID's to be deleted.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"hosts": [1, 2, 3, 4, 5]
|
||||
}
|
||||
|
||||
Return data:
|
||||
|
||||
{
|
||||
"hosts": {
|
||||
"1": "The host a1 was deleted",
|
||||
"2": "The host a2 was deleted",
|
||||
"3": "The host a3 was deleted",
|
||||
"4": "The host a4 was deleted",
|
||||
"5": "The host a5 was deleted",
|
||||
}
|
||||
}
|
||||
@@ -3,21 +3,34 @@ receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
{% if instance.node_type == "execution" %}
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
params: worker
|
||||
allowruntimeparams: true
|
||||
verifysignature: true
|
||||
custom_worksign_public_keyfile: receptor/work-public-key.pem
|
||||
additional_python_packages:
|
||||
- ansible-runner
|
||||
{% endif %}
|
||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
|
||||
receptor_protocol: 'tcp'
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
{% if listener_port %}
|
||||
receptor_protocol: {{ listener_protocol }}
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_dependencies:
|
||||
- python39-pip
|
||||
receptor_port: {{ listener_port }}
|
||||
{% else %}
|
||||
receptor_listener: false
|
||||
{% endif %}
|
||||
{% if peers %}
|
||||
receptor_peers:
|
||||
{% for peer in peers %}
|
||||
- address: {{ peer.address }}
|
||||
protocol: {{ peer.protocol }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% verbatim %}
|
||||
podman_user: "{{ receptor_user }}"
|
||||
podman_group: "{{ receptor_group }}"
|
||||
|
||||
@@ -1,20 +1,22 @@
|
||||
{% verbatim %}
|
||||
---
|
||||
- hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create the receptor group
|
||||
group:
|
||||
{% verbatim %}
|
||||
name: "{{ receptor_group }}"
|
||||
{% endverbatim %}
|
||||
state: present
|
||||
- name: Create the receptor user
|
||||
user:
|
||||
{% verbatim %}
|
||||
name: "{{ receptor_user }}"
|
||||
{% endverbatim %}
|
||||
shell: /bin/bash
|
||||
- name: Enable Copr repo for Receptor
|
||||
command: dnf copr enable ansible-awx/receptor -y
|
||||
{% if instance.node_type == "execution" %}
|
||||
- import_role:
|
||||
name: ansible.receptor.podman
|
||||
{% endif %}
|
||||
- import_role:
|
||||
name: ansible.receptor.setup
|
||||
- name: Install ansible-runner
|
||||
pip:
|
||||
name: ansible-runner
|
||||
executable: pip3.9
|
||||
{% endverbatim %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 1.1.0
|
||||
version: 2.0.3
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
InstanceInstanceGroupsList,
|
||||
InstanceHealthCheck,
|
||||
InstancePeersList,
|
||||
InstanceReceptorAddressesList,
|
||||
)
|
||||
from awx.api.views.instance_install_bundle import InstanceInstallBundle
|
||||
|
||||
@@ -21,6 +22,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/receptor_addresses/$', InstanceReceptorAddressesList.as_view(), name='instance_receptor_addresses_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
||||
]
|
||||
|
||||
|
||||
17
awx/api/urls/receptor_address.py
Normal file
17
awx/api/urls/receptor_address.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import (
|
||||
ReceptorAddressesList,
|
||||
ReceptorAddressDetail,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', ReceptorAddressesList.as_view(), name='receptor_addresses_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ReceptorAddressDetail.as_view(), name='receptor_address_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -30,12 +30,13 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
HostMetricSummaryMonthlyList,
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
BulkView,
|
||||
BulkHostCreateView,
|
||||
BulkHostDeleteView,
|
||||
BulkJobLaunchView,
|
||||
)
|
||||
|
||||
@@ -84,6 +85,7 @@ from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
from .analytics import urls as analytics_urls
|
||||
from .receptor_address import urls as receptor_address_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -123,8 +125,7 @@ v2_urls = [
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@@ -153,7 +154,9 @@ v2_urls = [
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
re_path(r'^receptor_addresses/', include(receptor_address_urls)),
|
||||
]
|
||||
|
||||
|
||||
@@ -167,10 +170,13 @@ urlpatterns = [
|
||||
]
|
||||
if MODE == 'development':
|
||||
# Only include these if we are in the development environment
|
||||
from awx.api.swagger import SwaggerSchemaView
|
||||
|
||||
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
||||
from awx.api.swagger import schema_view
|
||||
|
||||
from awx.api.urls.debug import urls as debug_urls
|
||||
|
||||
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
|
||||
urlpatterns += [
|
||||
re_path(r'^swagger(?P<format>\.json|\.yaml)/$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
|
||||
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
|
||||
]
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver, BitbucketDcWebhookReceiver
|
||||
|
||||
|
||||
urlpatterns = [
|
||||
re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'),
|
||||
re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'),
|
||||
re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'),
|
||||
re_path(r'^bitbucket_dc/$', BitbucketDcWebhookReceiver.as_view(), name='webhook_receiver_bitbucket_dc'),
|
||||
]
|
||||
|
||||
@@ -2,28 +2,21 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf import settings
|
||||
from django.urls import NoReverseMatch
|
||||
|
||||
from rest_framework.reverse import _reverse
|
||||
from rest_framework.reverse import reverse as drf_reverse
|
||||
from rest_framework.versioning import URLPathVersioning as BaseVersioning
|
||||
|
||||
|
||||
def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
|
||||
"""
|
||||
Copy and monkey-patch `rest_framework.reverse.reverse` to prevent adding unwarranted
|
||||
query string parameters.
|
||||
"""
|
||||
scheme = getattr(request, 'versioning_scheme', None)
|
||||
if scheme is not None:
|
||||
try:
|
||||
url = scheme.reverse(viewname, args, kwargs, request, format, **extra)
|
||||
except NoReverseMatch:
|
||||
# In case the versioning scheme reversal fails, fallback to the
|
||||
# default implementation
|
||||
url = _reverse(viewname, args, kwargs, request, format, **extra)
|
||||
else:
|
||||
url = _reverse(viewname, args, kwargs, request, format, **extra)
|
||||
def is_optional_api_urlpattern_prefix_request(request):
|
||||
if settings.OPTIONAL_API_URLPATTERN_PREFIX and request:
|
||||
if request.path.startswith(f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def transform_optional_api_urlpattern_prefix_url(request, url):
|
||||
if is_optional_api_urlpattern_prefix_request(request):
|
||||
url = url.replace('/api', f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}")
|
||||
return url
|
||||
|
||||
|
||||
|
||||
@@ -60,9 +60,14 @@ from oauth2_provider.models import get_access_token_model
|
||||
import pytz
|
||||
from wsgiref.util import FileWrapper
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.utils.requests import get_remote_hosts
|
||||
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
@@ -87,6 +92,7 @@ from awx.api.generics import (
|
||||
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import models
|
||||
from awx.main.models.rbac import get_role_definition
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
extract_ansible_vars,
|
||||
@@ -124,10 +130,15 @@ from awx.api.views.mixin import (
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
|
||||
if not cursor.fetchone():
|
||||
return 0
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
@@ -268,16 +279,24 @@ class DashboardJobsGraphView(APIView):
|
||||
|
||||
success_query = user_unified_jobs.filter(status='successful')
|
||||
failed_query = user_unified_jobs.filter(status='failed')
|
||||
canceled_query = user_unified_jobs.filter(status='canceled')
|
||||
error_query = user_unified_jobs.filter(status='error')
|
||||
|
||||
if job_type == 'inv_sync':
|
||||
success_query = success_query.filter(instance_of=models.InventoryUpdate)
|
||||
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
|
||||
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
|
||||
error_query = error_query.filter(instance_of=models.InventoryUpdate)
|
||||
elif job_type == 'playbook_run':
|
||||
success_query = success_query.filter(instance_of=models.Job)
|
||||
failed_query = failed_query.filter(instance_of=models.Job)
|
||||
canceled_query = canceled_query.filter(instance_of=models.Job)
|
||||
error_query = error_query.filter(instance_of=models.Job)
|
||||
elif job_type == 'scm_update':
|
||||
success_query = success_query.filter(instance_of=models.ProjectUpdate)
|
||||
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
|
||||
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
|
||||
error_query = error_query.filter(instance_of=models.ProjectUpdate)
|
||||
|
||||
end = now()
|
||||
interval = 'day'
|
||||
@@ -293,10 +312,12 @@ class DashboardJobsGraphView(APIView):
|
||||
else:
|
||||
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
dashboard_data = {"jobs": {"successful": [], "failed": []}}
|
||||
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}}
|
||||
|
||||
succ_list = dashboard_data['jobs']['successful']
|
||||
fail_list = dashboard_data['jobs']['failed']
|
||||
canceled_list = dashboard_data['jobs']['canceled']
|
||||
error_list = dashboard_data['jobs']['error']
|
||||
|
||||
qs_s = (
|
||||
success_query.filter(finished__range=(start, end))
|
||||
@@ -314,6 +335,22 @@ class DashboardJobsGraphView(APIView):
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_f = {item['d']: item['agg'] for item in qs_f}
|
||||
qs_c = (
|
||||
canceled_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_c = {item['d']: item['agg'] for item in qs_c}
|
||||
qs_e = (
|
||||
error_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_e = {item['d']: item['agg'] for item in qs_e}
|
||||
|
||||
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
for d in itertools.count():
|
||||
@@ -322,6 +359,8 @@ class DashboardJobsGraphView(APIView):
|
||||
break
|
||||
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
|
||||
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
|
||||
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
|
||||
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
|
||||
|
||||
return Response(dashboard_data)
|
||||
|
||||
@@ -333,25 +372,34 @@ class InstanceList(ListCreateAPIView):
|
||||
search_fields = ('hostname',)
|
||||
ordering = ('id',)
|
||||
|
||||
def get_queryset(self):
|
||||
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||
return qs
|
||||
|
||||
|
||||
class InstanceDetail(RetrieveUpdateAPIView):
|
||||
name = _("Instance Detail")
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||
return qs
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
data.pop('ip_address', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
obj = self.get_object()
|
||||
obj.set_capacity_value()
|
||||
obj.save(update_fields=['capacity'])
|
||||
capacity_changed = obj.set_capacity_value()
|
||||
if capacity_changed:
|
||||
obj.save(update_fields=['capacity'])
|
||||
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||
return r
|
||||
|
||||
@@ -370,13 +418,37 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
||||
|
||||
|
||||
class InstancePeersList(SubListAPIView):
|
||||
name = _("Instance Peers")
|
||||
name = _("Peers")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
parent_model = models.Instance
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
parent_access = 'read'
|
||||
search_fields = {'hostname'}
|
||||
relationship = 'peers'
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class InstanceReceptorAddressesList(SubListAPIView):
|
||||
name = _("Receptor Addresses")
|
||||
model = models.ReceptorAddress
|
||||
parent_key = 'instance'
|
||||
parent_model = models.Instance
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class ReceptorAddressesList(ListAPIView):
|
||||
name = _("Receptor Addresses")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class ReceptorAddressDetail(RetrieveAPIView):
|
||||
name = _("Receptor Address Detail")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
parent_model = models.Instance
|
||||
relationship = 'receptor_addresses'
|
||||
|
||||
|
||||
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
||||
@@ -471,6 +543,7 @@ class InstanceGroupAccessList(ResourceAccessList):
|
||||
|
||||
|
||||
class InstanceGroupObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.InstanceGroup
|
||||
@@ -565,7 +638,7 @@ class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
if self.relationship not in ask_mapping:
|
||||
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
|
||||
elif sub.passwords_needed:
|
||||
return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
|
||||
return {"msg": _("Credential that requires user input on launch cannot be used in saved launch configuration.")}
|
||||
|
||||
ask_field_name = ask_mapping[self.relationship]
|
||||
|
||||
@@ -640,16 +713,81 @@ class AuthView(APIView):
|
||||
return Response(data)
|
||||
|
||||
|
||||
def immutablesharedfields(cls):
|
||||
'''
|
||||
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
|
||||
|
||||
Works by overriding these view methods:
|
||||
- create
|
||||
- delete
|
||||
- perform_update
|
||||
create and delete are overridden to raise a PermissionDenied exception.
|
||||
perform_update is overridden to check if any shared fields are being modified,
|
||||
and raise a PermissionDenied exception if so.
|
||||
'''
|
||||
# create instead of perform_create because some of our views
|
||||
# override create instead of perform_create
|
||||
if hasattr(cls, 'create'):
|
||||
cls.original_create = cls.create
|
||||
|
||||
@functools.wraps(cls.create)
|
||||
def create_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_create(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
|
||||
|
||||
cls.create = create_wrapper
|
||||
|
||||
if hasattr(cls, 'delete'):
|
||||
cls.original_delete = cls.delete
|
||||
|
||||
@functools.wraps(cls.delete)
|
||||
def delete_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_delete(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
|
||||
|
||||
cls.delete = delete_wrapper
|
||||
|
||||
if hasattr(cls, 'perform_update'):
|
||||
cls.original_perform_update = cls.perform_update
|
||||
|
||||
@functools.wraps(cls.perform_update)
|
||||
def update_wrapper(*args, **kwargs):
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
view, serializer = args
|
||||
instance = view.get_object()
|
||||
if instance:
|
||||
if isinstance(instance, models.Organization):
|
||||
shared_fields = OrganizationType._declared_fields.keys()
|
||||
elif isinstance(instance, models.User):
|
||||
shared_fields = UserType._declared_fields.keys()
|
||||
elif isinstance(instance, models.Team):
|
||||
shared_fields = TeamType._declared_fields.keys()
|
||||
attrs = serializer.validated_data
|
||||
for field in shared_fields:
|
||||
if field in attrs and getattr(instance, field) != attrs[field]:
|
||||
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
|
||||
return cls.original_perform_update(*args, **kwargs)
|
||||
|
||||
cls.perform_update = update_wrapper
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamList(ListCreateAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamUsersList(BaseUsersList):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -659,6 +797,7 @@ class TeamUsersList(BaseUsersList):
|
||||
|
||||
|
||||
class TeamRolesList(SubListAttachDetachAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializerWithParentAccess
|
||||
metadata_class = RoleMetadata
|
||||
@@ -698,10 +837,12 @@ class TeamRolesList(SubListAttachDetachAPIView):
|
||||
|
||||
|
||||
class TeamObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.Team
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
@@ -719,8 +860,15 @@ class TeamProjectsList(SubListAPIView):
|
||||
self.check_parent_access(team)
|
||||
model_ct = ContentType.objects.get_for_model(self.model)
|
||||
parent_ct = ContentType.objects.get_for_model(self.parent_model)
|
||||
proj_roles = models.Role.objects.filter(Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk), content_type=model_ct)
|
||||
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles])
|
||||
|
||||
rd = get_role_definition(team.member_role)
|
||||
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
|
||||
if role is None:
|
||||
# Team has no permissions, therefore team has no projects
|
||||
return self.model.objects.none()
|
||||
else:
|
||||
project_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
||||
return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id'))
|
||||
|
||||
|
||||
class TeamActivityStreamList(SubListAPIView):
|
||||
@@ -735,10 +883,23 @@ class TeamActivityStreamList(SubListAPIView):
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
|
||||
return qs.filter(
|
||||
Q(team=parent)
|
||||
| Q(project__in=models.Project.accessible_objects(parent, 'read_role'))
|
||||
| Q(credential__in=models.Credential.accessible_objects(parent, 'read_role'))
|
||||
| Q(
|
||||
project__in=RoleEvaluation.objects.filter(
|
||||
role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Project).id, codename='view_project'
|
||||
)
|
||||
.values_list('object_id')
|
||||
.distinct()
|
||||
)
|
||||
| Q(
|
||||
credential__in=RoleEvaluation.objects.filter(
|
||||
role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Credential).id, codename='view_credential'
|
||||
)
|
||||
.values_list('object_id')
|
||||
.distinct()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -794,13 +955,7 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
@@ -996,10 +1151,12 @@ class ProjectAccessList(ResourceAccessList):
|
||||
|
||||
|
||||
class ProjectObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.Project
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
@@ -1012,6 +1169,7 @@ class ProjectCopy(CopyAPIView):
|
||||
copy_return_serializer_class = serializers.ProjectSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserList(ListCreateAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -1157,6 +1315,7 @@ class UserTeamsList(SubListAPIView):
|
||||
|
||||
|
||||
class UserRolesList(SubListAttachDetachAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializerWithParentAccess
|
||||
metadata_class = RoleMetadata
|
||||
@@ -1181,7 +1340,16 @@ class UserRolesList(SubListAttachDetachAPIView):
|
||||
user = get_object_or_400(models.User, pk=self.kwargs['pk'])
|
||||
role = get_object_or_400(models.Role, pk=sub_id)
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
@@ -1253,6 +1421,7 @@ class UserActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -1398,7 +1567,7 @@ class OrganizationCredentialList(SubListCreateAPIView):
|
||||
self.check_parent_access(organization)
|
||||
|
||||
user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all()
|
||||
org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all()
|
||||
org_set = models.Credential.objects.filter(organization=organization)
|
||||
|
||||
if self.request.user.is_superuser or self.request.user.is_system_auditor:
|
||||
return org_set
|
||||
@@ -1431,10 +1600,12 @@ class CredentialAccessList(ResourceAccessList):
|
||||
|
||||
|
||||
class CredentialObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.Credential
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
@@ -1570,16 +1741,15 @@ class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
name = _("Host Metrics Summary Monthly")
|
||||
model = models.HostMetricSummaryMonthly
|
||||
serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('date',)
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
@@ -1634,13 +1804,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
parent_model = models.Inventory
|
||||
relationship = 'hosts'
|
||||
parent_key = 'inventory'
|
||||
|
||||
def get_queryset(self):
|
||||
inventory = self.get_parent_object()
|
||||
qs = getattrd(inventory, self.relationship).all()
|
||||
# Apply queryset optimizations
|
||||
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
|
||||
return qs
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -2228,12 +2392,13 @@ class JobTemplateList(ListCreateAPIView):
|
||||
serializer_class = serializers.JobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(JobTemplateList, self).post(request, *args, **kwargs)
|
||||
if ret.status_code == 201:
|
||||
job_template = models.JobTemplate.objects.get(id=ret.data['id'])
|
||||
job_template.admin_role.members.add(request.user)
|
||||
return ret
|
||||
def check_permissions(self, request):
|
||||
if request.method == 'POST':
|
||||
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
|
||||
if not can_access:
|
||||
self.permission_denied(request, message=messages)
|
||||
|
||||
super(JobTemplateList, self).check_permissions(request)
|
||||
|
||||
|
||||
class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
@@ -2513,7 +2678,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
return Response(
|
||||
dict(
|
||||
error=_(
|
||||
"$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
|
||||
"$encrypted$ is a reserved keyword for password question defaults, survey question {idx} is type {survey_item[type]}."
|
||||
).format(**context)
|
||||
),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
@@ -2581,16 +2746,7 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'credentials'
|
||||
|
||||
def get_queryset(self):
|
||||
# Return the full list of credentials
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
sublist_qs = getattrd(parent, self.relationship)
|
||||
sublist_qs = sublist_qs.prefetch_related(
|
||||
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
|
||||
)
|
||||
return sublist_qs
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
@@ -2623,12 +2779,7 @@ class JobTemplateCallback(GenericAPIView):
|
||||
host for the current request.
|
||||
"""
|
||||
# Find the list of remote host names/IPs to check.
|
||||
remote_hosts = set()
|
||||
for header in settings.REMOTE_HOST_HEADERS:
|
||||
for value in self.request.META.get(header, '').split(','):
|
||||
value = value.strip()
|
||||
if value:
|
||||
remote_hosts.add(value)
|
||||
remote_hosts = set(get_remote_hosts(self.request))
|
||||
# Add the reverse lookup of IP addresses.
|
||||
for rh in list(remote_hosts):
|
||||
try:
|
||||
@@ -2692,7 +2843,10 @@ class JobTemplateCallback(GenericAPIView):
|
||||
# Permission class should have already validated host_config_key.
|
||||
job_template = self.get_object()
|
||||
# Attempt to find matching hosts based on remote address.
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
if job_template.inventory:
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
else:
|
||||
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
# If the host is not found, update the inventory before trying to
|
||||
# match again.
|
||||
inventory_sources_already_updated = []
|
||||
@@ -2777,6 +2931,7 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class JobTemplateAccessList(ResourceAccessList):
|
||||
@@ -2785,10 +2940,12 @@ class JobTemplateAccessList(ResourceAccessList):
|
||||
|
||||
|
||||
class JobTemplateObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.JobTemplate
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
@@ -2867,16 +3024,7 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
|
||||
relationship = ''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
'''
|
||||
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
|
||||
'relationship'
|
||||
'''
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if created:
|
||||
@@ -2951,14 +3099,7 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = ''
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
@@ -2978,6 +3119,14 @@ class WorkflowJobTemplateList(ListCreateAPIView):
|
||||
serializer_class = serializers.WorkflowJobTemplateSerializer
|
||||
always_allow_superuser = False
|
||||
|
||||
def check_permissions(self, request):
|
||||
if request.method == 'POST':
|
||||
can_access, messages = request.user.can_access_with_errors(self.model, 'add', request.data)
|
||||
if not can_access:
|
||||
self.permission_denied(request, message=messages)
|
||||
|
||||
super(WorkflowJobTemplateList, self).check_permissions(request)
|
||||
|
||||
|
||||
class WorkflowJobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = models.WorkflowJobTemplate
|
||||
@@ -3137,11 +3286,8 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
relationship = 'workflow_job_template_nodes'
|
||||
parent_key = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3190,10 +3336,12 @@ class WorkflowJobTemplateAccessList(ResourceAccessList):
|
||||
|
||||
|
||||
class WorkflowJobTemplateObjectRolesList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.WorkflowJobTemplate
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
@@ -3233,11 +3381,8 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
relationship = 'workflow_job_nodes'
|
||||
parent_key = 'workflow_job'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
@@ -3372,7 +3517,6 @@ class JobLabelList(SubListAPIView):
|
||||
serializer_class = serializers.LabelSerializer
|
||||
parent_model = models.Job
|
||||
relationship = 'labels'
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
@@ -3551,11 +3695,7 @@ class BaseJobHostSummariesList(SubListAPIView):
|
||||
relationship = 'job_host_summaries'
|
||||
name = _('Job Host Summaries List')
|
||||
search_fields = ('host_name',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostJobHostSummariesList(BaseJobHostSummariesList):
|
||||
@@ -4099,7 +4239,7 @@ class UnifiedJobStdout(RetrieveAPIView):
|
||||
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
|
||||
except models.StdoutMaxBytesExceeded as e:
|
||||
response_message = _(
|
||||
"Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
|
||||
"Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes."
|
||||
).format(text_size=e.total, supported_size=e.supported)
|
||||
if request.accepted_renderer.format == 'json':
|
||||
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
|
||||
@@ -4210,6 +4350,7 @@ class ActivityStreamDetail(RetrieveAPIView):
|
||||
|
||||
|
||||
class RoleList(ListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
permission_classes = (IsAuthenticated,)
|
||||
@@ -4217,11 +4358,13 @@ class RoleList(ListAPIView):
|
||||
|
||||
|
||||
class RoleDetail(RetrieveAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
|
||||
|
||||
class RoleUsersList(SubListAttachDetachAPIView):
|
||||
deprecated = True
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
parent_model = models.Role
|
||||
@@ -4242,7 +4385,15 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
user = get_object_or_400(models.User, pk=sub_id)
|
||||
role = self.get_parent_object()
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
@@ -4256,6 +4407,7 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
|
||||
|
||||
class RoleTeamsList(SubListAttachDetachAPIView):
|
||||
deprecated = True
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
parent_model = models.Role
|
||||
@@ -4300,10 +4452,12 @@ class RoleTeamsList(SubListAttachDetachAPIView):
|
||||
team.member_role.children.remove(role)
|
||||
else:
|
||||
team.member_role.children.add(role)
|
||||
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class RoleParentsList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.Role
|
||||
@@ -4317,6 +4471,7 @@ class RoleParentsList(SubListAPIView):
|
||||
|
||||
|
||||
class RoleChildrenList(SubListAPIView):
|
||||
deprecated = True
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.Role
|
||||
|
||||
@@ -48,23 +48,23 @@ class AnalyticsRootView(APIView):
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
data['authorized'] = reverse('api:analytics_authorized', request=request)
|
||||
data['reports'] = reverse('api:analytics_reports_list', request=request)
|
||||
data['report_options'] = reverse('api:analytics_report_options_list', request=request)
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate', request=request)
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options', request=request)
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer', request=request)
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options', request=request)
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer', request=request)
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options', request=request)
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer', request=request)
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options', request=request)
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer', request=request)
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options', request=request)
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer', request=request)
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options', request=request)
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer', request=request)
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.reverse import reverse
|
||||
@@ -18,6 +20,9 @@ from awx.api import (
|
||||
|
||||
|
||||
class BulkView(APIView):
|
||||
name = _('Bulk')
|
||||
swagger_topic = 'Bulk'
|
||||
|
||||
permission_classes = [IsAuthenticated]
|
||||
renderer_classes = [
|
||||
renderers.BrowsableAPIRenderer,
|
||||
@@ -29,6 +34,7 @@ class BulkView(APIView):
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['host_create'] = reverse('api:bulk_host_create', request=request)
|
||||
data['host_delete'] = reverse('api:bulk_host_delete', request=request)
|
||||
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
|
||||
return Response(data)
|
||||
|
||||
@@ -67,3 +73,20 @@ class BulkHostCreateView(GenericAPIView):
|
||||
result = serializer.create(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class BulkHostDeleteView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = Host
|
||||
serializer_class = serializers.BulkHostDeleteSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
return Response({"detail": "Bulk delete hosts with this endpoint"}, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
serializer = serializers.BulkHostDeleteSerializer(data=request.data, context={'request': request})
|
||||
if serializer.is_valid():
|
||||
result = serializer.delete(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -6,6 +6,8 @@ import io
|
||||
import ipaddress
|
||||
import os
|
||||
import tarfile
|
||||
import time
|
||||
import re
|
||||
|
||||
import asn1
|
||||
from awx.api import serializers
|
||||
@@ -40,6 +42,8 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||
# │ │ └── receptor.key
|
||||
# │ └── work-public-key.pem
|
||||
# └── requirements.yml
|
||||
|
||||
|
||||
class InstanceInstallBundle(GenericAPIView):
|
||||
name = _('Install Bundle')
|
||||
model = models.Instance
|
||||
@@ -49,56 +53,54 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
def get(self, request, *args, **kwargs):
|
||||
instance_obj = self.get_object()
|
||||
|
||||
if instance_obj.node_type not in ('execution',):
|
||||
if instance_obj.node_type not in ('execution', 'hop'):
|
||||
return Response(
|
||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
with io.BytesIO() as f:
|
||||
with tarfile.open(fileobj=f, mode='w:gz') as tar:
|
||||
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
|
||||
tar.add(
|
||||
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
|
||||
)
|
||||
# copy /etc/receptor/tls/ca/mesh-CA.crt to receptor/tls/ca in the tar file
|
||||
tar.add(os.path.realpath('/etc/receptor/tls/ca/mesh-CA.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/mesh-CA.crt")
|
||||
|
||||
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
|
||||
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
|
||||
# copy /etc/receptor/work_public_key.pem to receptor/work_public_key.pem
|
||||
tar.add('/etc/receptor/work_public_key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work_public_key.pem")
|
||||
|
||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||
key, cert = generate_receptor_tls(instance_obj)
|
||||
|
||||
def tar_addfile(tarinfo, filecontent):
|
||||
tarinfo.mtime = time.time()
|
||||
tarinfo.size = len(filecontent)
|
||||
tar.addfile(tarinfo, io.BytesIO(filecontent))
|
||||
|
||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||
key_tarinfo.size = len(key)
|
||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||
tar_addfile(key_tarinfo, key)
|
||||
|
||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||
cert_tarinfo.size = len(cert)
|
||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||
tar_addfile(cert_tarinfo, cert)
|
||||
|
||||
# generate and write install_receptor.yml to the tar file
|
||||
playbook = generate_playbook().encode('utf-8')
|
||||
playbook = generate_playbook(instance_obj).encode('utf-8')
|
||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||
playbook_tarinfo.size = len(playbook)
|
||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||
tar_addfile(playbook_tarinfo, playbook)
|
||||
|
||||
# generate and write inventory.yml to the tar file
|
||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||
tar_addfile(inventory_yml_tarinfo, inventory_yml)
|
||||
|
||||
# generate and write group_vars/all.yml to the tar file
|
||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||
group_vars_tarinfo.size = len(group_vars)
|
||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||
tar_addfile(group_vars_tarinfo, group_vars)
|
||||
|
||||
# generate and write requirements.yml to the tar file
|
||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||
tar_addfile(requirements_yml_tarinfo, requirements_yml)
|
||||
|
||||
# respond with the tarfile
|
||||
f.seek(0)
|
||||
@@ -107,8 +109,10 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
return response
|
||||
|
||||
|
||||
def generate_playbook():
|
||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||
def generate_playbook(instance_obj):
|
||||
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', playbook_yaml)
|
||||
|
||||
|
||||
def generate_requirements_yml():
|
||||
@@ -120,7 +124,21 @@ def generate_inventory_yml(instance_obj):
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||
# get peers
|
||||
peers = []
|
||||
for addr in instance_obj.peers.select_related('instance'):
|
||||
peers.append(dict(address=addr.get_full_address(), protocol=addr.protocol))
|
||||
context = dict(instance=instance_obj, peers=peers)
|
||||
|
||||
canonical_addr = instance_obj.canonical_address
|
||||
if canonical_addr:
|
||||
context['listener_port'] = canonical_addr.port
|
||||
protocol = canonical_addr.protocol if canonical_addr.protocol != 'wss' else 'ws'
|
||||
context['listener_protocol'] = protocol
|
||||
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=context)
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', all_yaml)
|
||||
|
||||
|
||||
def generate_receptor_tls(instance_obj):
|
||||
@@ -161,14 +179,14 @@ def generate_receptor_tls(instance_obj):
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
|
||||
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
|
||||
# sign csr with the receptor ca key from /etc/receptor/ca/mesh-CA.key
|
||||
with open('/etc/receptor/tls/ca/mesh-CA.key', 'rb') as f:
|
||||
ca_key = serialization.load_pem_private_key(
|
||||
f.read(),
|
||||
password=None,
|
||||
)
|
||||
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
|
||||
with open('/etc/receptor/tls/ca/mesh-CA.crt', 'rb') as f:
|
||||
ca_cert = x509.load_pem_x509_certificate(f.read())
|
||||
|
||||
cert = (
|
||||
|
||||
@@ -152,6 +152,7 @@ class InventoryObjectRolesList(SubListAPIView):
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = Inventory
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
|
||||
@@ -17,7 +17,7 @@ class MeshVisualizer(APIView):
|
||||
def get(self, request, format=None):
|
||||
data = {
|
||||
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
|
||||
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data,
|
||||
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target__instance', 'source'), many=True).data,
|
||||
}
|
||||
|
||||
return Response(data)
|
||||
|
||||
@@ -50,7 +50,7 @@ class UnifiedJobDeletionMixin(object):
|
||||
return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||
logger.warning('Allowing deletion of {} through the API without all events processed.'.format(obj.log_format))
|
||||
|
||||
# Manually cascade delete events if unpartitioned job
|
||||
if obj.has_unpartitioned_events:
|
||||
|
||||
@@ -53,21 +53,18 @@ from awx.api.serializers import (
|
||||
CredentialSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin
|
||||
from awx.api.views import immutablesharedfields
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
@@ -110,6 +107,7 @@ class OrganizationInventoriesList(SubListAPIView):
|
||||
relationship = 'inventories'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationUsersList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -118,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationAdminsList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -156,6 +155,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
@@ -207,6 +207,7 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
@@ -214,6 +215,7 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
@@ -230,6 +232,7 @@ class OrganizationObjectRolesList(SubListAPIView):
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = Organization
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
|
||||
@@ -13,6 +13,7 @@ from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.csrf import ensure_csrf_cookie
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.urls import reverse as django_reverse
|
||||
|
||||
from rest_framework.permissions import AllowAny, IsAuthenticated
|
||||
from rest_framework.response import Response
|
||||
@@ -20,13 +21,14 @@ from rest_framework import status
|
||||
|
||||
import requests
|
||||
|
||||
from awx import MODE
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.analytics import all_collectors
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import get_awx_version, get_custom_venv_choices
|
||||
from awx.main.utils.licensing import validate_entitlement_manifest
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.api.versioning import URLPathVersioning, is_optional_api_urlpattern_prefix_request, reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
@@ -38,22 +40,24 @@ logger = logging.getLogger('awx.api.views.root')
|
||||
class ApiRootView(APIView):
|
||||
permission_classes = (AllowAny,)
|
||||
name = _('REST API')
|
||||
versioning_class = None
|
||||
versioning_class = URLPathVersioning
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
@method_decorator(ensure_csrf_cookie)
|
||||
def get(self, request, format=None):
|
||||
'''List supported API versions'''
|
||||
|
||||
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
|
||||
v2 = reverse('api:api_v2_root_view', request=request, kwargs={'version': 'v2'})
|
||||
data = OrderedDict()
|
||||
data['description'] = _('AWX REST API')
|
||||
data['current_version'] = v2
|
||||
data['available_versions'] = dict(v2=v2)
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
if not is_optional_api_urlpattern_prefix_request(request):
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
|
||||
if MODE == 'development':
|
||||
data['swagger'] = drf_reverse('api:schema-swagger-ui')
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -81,6 +85,7 @@ class ApiVersionRootView(APIView):
|
||||
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
||||
data['instances'] = reverse('api:instance_list', request=request)
|
||||
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
||||
data['receptor_addresses'] = reverse('api:receptor_addresses_list', request=request)
|
||||
data['config'] = reverse('api:api_v2_config_view', request=request)
|
||||
data['settings'] = reverse('api:setting_category_list', request=request)
|
||||
data['me'] = reverse('api:user_me_list', request=request)
|
||||
@@ -104,8 +109,7 @@ class ApiVersionRootView(APIView):
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@@ -127,6 +131,10 @@ class ApiVersionRootView(APIView):
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
data['service_index'] = django_reverse('service-index-root')
|
||||
data['role_definitions'] = django_reverse('roledefinition-list')
|
||||
data['role_user_assignments'] = django_reverse('roleuserassignment-list')
|
||||
data['role_team_assignments'] = django_reverse('roleteamassignment-list')
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from hashlib import sha1
|
||||
from hashlib import sha1, sha256
|
||||
import hmac
|
||||
import logging
|
||||
import urllib.parse
|
||||
@@ -99,14 +99,31 @@ class WebhookReceiverBase(APIView):
|
||||
def get_signature(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def must_check_signature(self):
|
||||
return True
|
||||
|
||||
def is_ignored_request(self):
|
||||
return False
|
||||
|
||||
def check_signature(self, obj):
|
||||
if not obj.webhook_key:
|
||||
raise PermissionDenied
|
||||
if not self.must_check_signature():
|
||||
logger.debug("skipping signature validation")
|
||||
return
|
||||
|
||||
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
|
||||
logger.debug("header signature: %s", self.get_signature())
|
||||
hash_alg, expected_digest = self.get_signature()
|
||||
if hash_alg == 'sha1':
|
||||
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
|
||||
elif hash_alg == 'sha256':
|
||||
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha256)
|
||||
else:
|
||||
logger.debug("Unsupported signature type, supported: sha1, sha256, received: {}".format(hash_alg))
|
||||
raise PermissionDenied
|
||||
|
||||
logger.debug("header signature: %s", expected_digest)
|
||||
logger.debug("calculated signature: %s", force_bytes(mac.hexdigest()))
|
||||
if not hmac.compare_digest(force_bytes(mac.hexdigest()), self.get_signature()):
|
||||
if not hmac.compare_digest(force_bytes(mac.hexdigest()), expected_digest):
|
||||
raise PermissionDenied
|
||||
|
||||
@csrf_exempt
|
||||
@@ -114,10 +131,14 @@ class WebhookReceiverBase(APIView):
|
||||
# Ensure that the full contents of the request are captured for multiple uses.
|
||||
request.body
|
||||
|
||||
logger.debug("headers: {}\n" "data: {}\n".format(request.headers, request.data))
|
||||
logger.debug("headers: {}\ndata: {}\n".format(request.headers, request.data))
|
||||
obj = self.get_object()
|
||||
self.check_signature(obj)
|
||||
|
||||
if self.is_ignored_request():
|
||||
# This was an ignored request type (e.g. ping), don't act on it
|
||||
return Response({'message': _("Webhook ignored")}, status=status.HTTP_200_OK)
|
||||
|
||||
event_type = self.get_event_type()
|
||||
event_guid = self.get_event_guid()
|
||||
event_ref = self.get_event_ref()
|
||||
@@ -186,7 +207,7 @@ class GithubWebhookReceiver(WebhookReceiverBase):
|
||||
if hash_alg != 'sha1':
|
||||
logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg))
|
||||
raise PermissionDenied
|
||||
return force_bytes(signature)
|
||||
return hash_alg, force_bytes(signature)
|
||||
|
||||
|
||||
class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||
@@ -214,15 +235,73 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||
|
||||
return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref())
|
||||
|
||||
def get_signature(self):
|
||||
return force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
|
||||
|
||||
def check_signature(self, obj):
|
||||
if not obj.webhook_key:
|
||||
raise PermissionDenied
|
||||
|
||||
token_from_request = force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
|
||||
|
||||
# GitLab only returns the secret token, not an hmac hash. Use
|
||||
# the hmac `compare_digest` helper function to prevent timing
|
||||
# analysis by attackers.
|
||||
if not hmac.compare_digest(force_bytes(obj.webhook_key), self.get_signature()):
|
||||
if not hmac.compare_digest(force_bytes(obj.webhook_key), token_from_request):
|
||||
raise PermissionDenied
|
||||
|
||||
|
||||
class BitbucketDcWebhookReceiver(WebhookReceiverBase):
|
||||
service = 'bitbucket_dc'
|
||||
|
||||
ref_keys = {
|
||||
'repo:refs_changed': 'changes.0.toHash',
|
||||
'mirror:repo_synchronized': 'changes.0.toHash',
|
||||
'pr:opened': 'pullRequest.toRef.latestCommit',
|
||||
'pr:from_ref_updated': 'pullRequest.toRef.latestCommit',
|
||||
'pr:modified': 'pullRequest.toRef.latestCommit',
|
||||
}
|
||||
|
||||
def get_event_type(self):
|
||||
return self.request.META.get('HTTP_X_EVENT_KEY')
|
||||
|
||||
def get_event_guid(self):
|
||||
return self.request.META.get('HTTP_X_REQUEST_ID')
|
||||
|
||||
def get_event_status_api(self):
|
||||
# https://<bitbucket-base-url>/rest/build-status/1.0/commits/<commit-hash>
|
||||
if self.get_event_type() not in self.ref_keys.keys():
|
||||
return
|
||||
if self.get_event_ref() is None:
|
||||
return
|
||||
any_url = None
|
||||
if 'actor' in self.request.data:
|
||||
any_url = self.request.data['actor'].get('links', {}).get('self')
|
||||
if any_url is None and 'repository' in self.request.data:
|
||||
any_url = self.request.data['repository'].get('links', {}).get('self')
|
||||
if any_url is None:
|
||||
return
|
||||
any_url = any_url[0].get('href')
|
||||
if any_url is None:
|
||||
return
|
||||
parsed = urllib.parse.urlparse(any_url)
|
||||
|
||||
return "{}://{}/rest/build-status/1.0/commits/{}".format(parsed.scheme, parsed.netloc, self.get_event_ref())
|
||||
|
||||
def is_ignored_request(self):
|
||||
return self.get_event_type() not in [
|
||||
'repo:refs_changed',
|
||||
'mirror:repo_synchronized',
|
||||
'pr:opened',
|
||||
'pr:from_ref_updated',
|
||||
'pr:modified',
|
||||
]
|
||||
|
||||
def must_check_signature(self):
|
||||
# Bitbucket does not sign ping requests...
|
||||
return self.get_event_type() != 'diagnostics:ping'
|
||||
|
||||
def get_signature(self):
|
||||
header_sig = self.request.META.get('HTTP_X_HUB_SIGNATURE')
|
||||
if not header_sig:
|
||||
logger.debug("Expected signature missing from header key HTTP_X_HUB_SIGNATURE")
|
||||
raise PermissionDenied
|
||||
hash_alg, signature = header_sig.split('=')
|
||||
return hash_alg, force_bytes(signature)
|
||||
|
||||
@@ -14,7 +14,7 @@ class ConfConfig(AppConfig):
|
||||
def ready(self):
|
||||
self.module.autodiscover()
|
||||
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations'}:
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations', 'showmigrations'}:
|
||||
from .settings import SettingsWrapper
|
||||
|
||||
SettingsWrapper.initialize()
|
||||
|
||||
@@ -55,6 +55,7 @@ register(
|
||||
# Optional; category_slug will be slugified version of category if not
|
||||
# explicitly provided.
|
||||
category_slug='cows',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -61,6 +61,10 @@ class StringListBooleanField(ListField):
|
||||
|
||||
def to_representation(self, value):
|
||||
try:
|
||||
if isinstance(value, str):
|
||||
# https://github.com/encode/django-rest-framework/commit/a180bde0fd965915718b070932418cabc831cee1
|
||||
# DRF changed truthy and falsy lists to be capitalized
|
||||
value = value.lower()
|
||||
if isinstance(value, (list, tuple)):
|
||||
return super(StringListBooleanField, self).to_representation(value)
|
||||
elif value in BooleanField.TRUE_VALUES:
|
||||
@@ -78,6 +82,8 @@ class StringListBooleanField(ListField):
|
||||
|
||||
def to_internal_value(self, data):
|
||||
try:
|
||||
if isinstance(data, str):
|
||||
data = data.lower()
|
||||
if isinstance(data, (list, tuple)):
|
||||
return super(StringListBooleanField, self).to_internal_value(data)
|
||||
elif data in BooleanField.TRUE_VALUES:
|
||||
|
||||
17
awx/conf/migrations/0010_change_to_JSONField.py
Normal file
17
awx/conf/migrations/0010_change_to_JSONField.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 4.2 on 2023-06-09 19:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('conf', '0009_rename_proot_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='setting',
|
||||
name='value',
|
||||
field=models.JSONField(null=True),
|
||||
),
|
||||
]
|
||||
@@ -7,9 +7,10 @@ import json
|
||||
# Django
|
||||
from django.db import models
|
||||
|
||||
from ansible_base.lib.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.fields import JSONBlob
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.conf import settings_registry
|
||||
|
||||
@@ -18,7 +19,7 @@ __all__ = ['Setting']
|
||||
|
||||
class Setting(CreatedModifiedModel):
|
||||
key = models.CharField(max_length=255)
|
||||
value = JSONBlob(null=True)
|
||||
value = models.JSONField(null=True)
|
||||
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))
|
||||
|
||||
def __str__(self):
|
||||
|
||||
@@ -127,6 +127,8 @@ class SettingsRegistry(object):
|
||||
encrypted = bool(field_kwargs.pop('encrypted', False))
|
||||
defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
|
||||
unit = field_kwargs.pop('unit', None)
|
||||
hidden = field_kwargs.pop('hidden', False)
|
||||
warning_text = field_kwargs.pop('warning_text', None)
|
||||
if getattr(field_kwargs.get('child', None), 'source', None) is not None:
|
||||
field_kwargs['child'].source = None
|
||||
field_instance = field_class(**field_kwargs)
|
||||
@@ -134,12 +136,14 @@ class SettingsRegistry(object):
|
||||
field_instance.category = category
|
||||
field_instance.depends_on = depends_on
|
||||
field_instance.unit = unit
|
||||
field_instance.hidden = hidden
|
||||
if placeholder is not empty:
|
||||
field_instance.placeholder = placeholder
|
||||
field_instance.defined_in_file = defined_in_file
|
||||
if field_instance.defined_in_file:
|
||||
field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text)
|
||||
field_instance.encrypted = encrypted
|
||||
field_instance.warning_text = warning_text
|
||||
original_field_instance = field_instance
|
||||
if field_class != original_field_class:
|
||||
original_field_instance = original_field_class(**field_kwargs)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import psycopg
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
@@ -13,7 +14,7 @@ from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.db.utils import DatabaseError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
@@ -80,18 +81,26 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError as exc:
|
||||
except ProgrammingError as e:
|
||||
# Exception raised for programming errors
|
||||
# Examples may be table not found or already exists,
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
# syntax error in the SQL statement, wrong number of parameters specified, etc.
|
||||
if trans_safe:
|
||||
level = logger.warning
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
||||
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
except DatabaseError as e:
|
||||
if trans_safe:
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
@@ -418,6 +427,10 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
# If the last line did not return, that means we hit a database error
|
||||
# in that case, we should not have a local cache value
|
||||
# thus, return empty as a signal to use the default
|
||||
return empty
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
|
||||
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
|
||||
def test_to_representation_valid(self, value_in, value_known):
|
||||
@@ -48,7 +48,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_representation(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
|
||||
|
||||
class TestListTuplesField:
|
||||
@@ -67,7 +67,7 @@ class TestListTuplesField:
|
||||
field = ListTuplesField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " "but got {} instead.".format(t)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 but got {} instead.".format(t)
|
||||
|
||||
|
||||
class TestStringListPathField:
|
||||
|
||||
@@ -13,6 +13,7 @@ from unittest import mock
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.utils import Error as DBError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import pytest
|
||||
|
||||
@@ -129,9 +130,9 @@ def test_default_setting(settings, mocker):
|
||||
settings.registry.register('AWX_SOME_SETTING', field_class=fields.CharField, category=_('System'), category_slug='system', default='DEFAULT')
|
||||
|
||||
settings_to_cache = mocker.Mock(**{'order_by.return_value': []})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache):
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT'
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'DEFAULT'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
@@ -145,9 +146,9 @@ def test_setting_is_not_from_setting_file(settings, mocker):
|
||||
settings.registry.register('AWX_SOME_SETTING', field_class=fields.CharField, category=_('System'), category_slug='system', default='DEFAULT')
|
||||
|
||||
settings_to_cache = mocker.Mock(**{'order_by.return_value': []})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache):
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is False
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=settings_to_cache)
|
||||
assert settings.AWX_SOME_SETTING == 'DEFAULT'
|
||||
assert settings.registry.get_setting_field('AWX_SOME_SETTING').defined_in_file is False
|
||||
|
||||
|
||||
def test_empty_setting(settings, mocker):
|
||||
@@ -155,10 +156,10 @@ def test_empty_setting(settings, mocker):
|
||||
settings.registry.register('AWX_SOME_SETTING', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
|
||||
mocks = mocker.Mock(**{'order_by.return_value': mocker.Mock(**{'__iter__': lambda self: iter([]), 'first.return_value': None})})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
with pytest.raises(AttributeError):
|
||||
settings.AWX_SOME_SETTING
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == SETTING_CACHE_NOTSET
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks)
|
||||
with pytest.raises(AttributeError):
|
||||
settings.AWX_SOME_SETTING
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == SETTING_CACHE_NOTSET
|
||||
|
||||
|
||||
def test_setting_from_db(settings, mocker):
|
||||
@@ -167,9 +168,9 @@ def test_setting_from_db(settings, mocker):
|
||||
|
||||
setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
mocks = mocker.Mock(**{'order_by.return_value': mocker.Mock(**{'__iter__': lambda self: iter([setting_from_db]), 'first.return_value': setting_from_db})})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
assert settings.AWX_SOME_SETTING == 'FROM_DB'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'FROM_DB'
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks)
|
||||
assert settings.AWX_SOME_SETTING == 'FROM_DB'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'FROM_DB'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
@@ -204,8 +205,8 @@ def test_db_setting_update(settings, mocker):
|
||||
|
||||
existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
setting_list = mocker.Mock(**{'order_by.return_value.first.return_value': existing_setting})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=setting_list):
|
||||
settings.AWX_SOME_SETTING = 'NEW-VALUE'
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=setting_list)
|
||||
settings.AWX_SOME_SETTING = 'NEW-VALUE'
|
||||
|
||||
assert existing_setting.value == 'NEW-VALUE'
|
||||
existing_setting.save.assert_called_with(update_fields=['value'])
|
||||
@@ -216,8 +217,8 @@ def test_db_setting_deletion(settings, mocker):
|
||||
settings.registry.register('AWX_SOME_SETTING', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
|
||||
existing_setting = mocker.Mock(key='AWX_SOME_SETTING', value='FROM_DB')
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=[existing_setting]):
|
||||
del settings.AWX_SOME_SETTING
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=[existing_setting])
|
||||
del settings.AWX_SOME_SETTING
|
||||
|
||||
assert existing_setting.delete.call_count == 1
|
||||
|
||||
@@ -282,10 +283,10 @@ def test_sensitive_cache_data_is_encrypted(settings, mocker):
|
||||
# use its primary key as part of the encryption key
|
||||
setting_from_db = mocker.Mock(pk=123, key='AWX_ENCRYPTED', value='SECRET!')
|
||||
mocks = mocker.Mock(**{'order_by.return_value': mocker.Mock(**{'__iter__': lambda self: iter([setting_from_db]), 'first.return_value': setting_from_db})})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks)
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
|
||||
|
||||
def test_readonly_sensitive_cache_data_is_encrypted(settings):
|
||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||
def test_getattr_with_database_error(settings):
|
||||
"""
|
||||
If a setting is defined via the registry and has a null-ish default which is not None
|
||||
then referencing that setting during a database outage should give that default
|
||||
this is regression testing for a bug where it would return None
|
||||
"""
|
||||
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||
mock_ensure.side_effect = DBError('for test')
|
||||
assert settings.AWX_VAR == []
|
||||
|
||||
@@ -20,11 +20,15 @@ from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
# Django OAuth Toolkit
|
||||
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.utils.validation import to_python_boolean
|
||||
from ansible_base.rbac.models import RoleEvaluation
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
get_pk_from_dict,
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import (
|
||||
@@ -56,6 +60,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
ProjectUpdateEvent,
|
||||
ReceptorAddress,
|
||||
Role,
|
||||
Schedule,
|
||||
SystemJob,
|
||||
@@ -70,8 +75,6 @@ from awx.main.models import (
|
||||
WorkflowJobTemplateNode,
|
||||
WorkflowApproval,
|
||||
WorkflowApprovalTemplate,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
|
||||
@@ -79,7 +82,6 @@ __all__ = [
|
||||
'get_user_queryset',
|
||||
'check_user_access',
|
||||
'check_user_access_with_errors',
|
||||
'user_accessible_objects',
|
||||
'consumer_access',
|
||||
]
|
||||
|
||||
@@ -136,10 +138,6 @@ def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
|
||||
def user_accessible_objects(user, role_name):
|
||||
return ResourceMixin._accessible_objects(User, user, role_name)
|
||||
|
||||
|
||||
def get_user_queryset(user, model_class):
|
||||
"""
|
||||
Return a queryset for the given model_class containing only the instances
|
||||
@@ -267,7 +265,11 @@ class BaseAccess(object):
|
||||
return self.can_change(obj, data)
|
||||
|
||||
def can_delete(self, obj):
|
||||
return self.user.is_superuser
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
if obj._meta.model_name in [cls._meta.model_name for cls in permission_registry.all_registered_models]:
|
||||
return self.user.has_obj_perm(obj, 'delete')
|
||||
return False
|
||||
|
||||
def can_copy(self, obj):
|
||||
return self.can_add({'reference_obj': obj})
|
||||
@@ -366,9 +368,9 @@ class BaseAccess(object):
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if validation_info.get('trial', False) is True or validation_info['instance_count'] == 10: # basic 10 license
|
||||
if validation_info.get('trial', False) is True:
|
||||
|
||||
def report_violation(message):
|
||||
def report_violation(message): # noqa
|
||||
raise PermissionDenied(message)
|
||||
|
||||
if check_expiration and validation_info.get('time_remaining', None) is None:
|
||||
@@ -596,7 +598,7 @@ class InstanceGroupAccess(BaseAccess):
|
||||
- a superuser
|
||||
- admin role on the Instance group
|
||||
I can add/delete Instance Groups:
|
||||
- a superuser(system administrator)
|
||||
- a superuser(system administrator), because these are not org-scoped
|
||||
I can use Instance Groups when I have:
|
||||
- use_role on the instance group
|
||||
"""
|
||||
@@ -625,7 +627,7 @@ class InstanceGroupAccess(BaseAccess):
|
||||
def can_delete(self, obj):
|
||||
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
return False
|
||||
return self.user.is_superuser
|
||||
return self.user.has_obj_perm(obj, 'delete')
|
||||
|
||||
|
||||
class UserAccess(BaseAccess):
|
||||
@@ -642,7 +644,10 @@ class UserAccess(BaseAccess):
|
||||
"""
|
||||
|
||||
model = User
|
||||
prefetch_related = ('profile',)
|
||||
prefetch_related = (
|
||||
'profile',
|
||||
'resource',
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||
@@ -651,9 +656,7 @@ class UserAccess(BaseAccess):
|
||||
qs = (
|
||||
User.objects.filter(pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members'))
|
||||
| User.objects.filter(pk=self.user.id)
|
||||
| User.objects.filter(
|
||||
pk__in=Role.objects.filter(singleton_name__in=[ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR]).values('members')
|
||||
)
|
||||
| User.objects.filter(is_superuser=True)
|
||||
).distinct()
|
||||
return qs
|
||||
|
||||
@@ -711,6 +714,15 @@ class UserAccess(BaseAccess):
|
||||
if not allow_orphans:
|
||||
# in these cases only superusers can modify orphan users
|
||||
return False
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
# Permission granted if the user has all permissions that the target user has
|
||||
target_perms = set(
|
||||
RoleEvaluation.objects.filter(role__in=obj.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct()
|
||||
)
|
||||
user_perms = set(
|
||||
RoleEvaluation.objects.filter(role__in=self.user.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct()
|
||||
)
|
||||
return not (target_perms - user_perms)
|
||||
return not obj.roles.all().exclude(ancestors__in=self.user.roles.all()).exists()
|
||||
else:
|
||||
return self.is_all_org_admin(obj)
|
||||
@@ -838,6 +850,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
prefetch_related = (
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'resource', # dab_resource_registry
|
||||
)
|
||||
# organization admin_role is not a parent of organization auditor_role
|
||||
notification_attach_roles = ['admin_role', 'auditor_role']
|
||||
@@ -948,9 +961,6 @@ class InventoryAccess(BaseAccess):
|
||||
def can_update(self, obj):
|
||||
return self.user in obj.update_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
return self.can_admin(obj, None)
|
||||
|
||||
def can_run_ad_hoc_commands(self, obj):
|
||||
return self.user in obj.adhoc_role
|
||||
|
||||
@@ -1306,6 +1316,7 @@ class TeamAccess(BaseAccess):
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'organization',
|
||||
'resource', # dab_resource_registry
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
@@ -1376,12 +1387,11 @@ class TeamAccess(BaseAccess):
|
||||
class ExecutionEnvironmentAccess(BaseAccess):
|
||||
"""
|
||||
I can see an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm a member of the same organization
|
||||
- it is a global ExecutionEnvironment
|
||||
- I can see its organization
|
||||
- It is a global ExecutionEnvironment
|
||||
I can create/change an execution environment when:
|
||||
- I'm a superuser
|
||||
- I'm an admin for the organization(s)
|
||||
- I have an organization or object role that gives access
|
||||
"""
|
||||
|
||||
model = ExecutionEnvironment
|
||||
@@ -1390,7 +1400,9 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
return ExecutionEnvironment.objects.filter(
|
||||
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role')) | Q(organization__isnull=True)
|
||||
Q(organization__in=Organization.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(organization__isnull=True)
|
||||
| Q(id__in=ExecutionEnvironment.access_ids_qs(self.user, 'change'))
|
||||
).distinct()
|
||||
|
||||
@check_superuser
|
||||
@@ -1403,13 +1415,19 @@ class ExecutionEnvironmentAccess(BaseAccess):
|
||||
def can_change(self, obj, data):
|
||||
if obj and obj.organization_id is None:
|
||||
raise PermissionDenied
|
||||
if self.user not in obj.organization.execution_environment_admin_role:
|
||||
raise PermissionDenied
|
||||
if data and 'organization' in data:
|
||||
new_org = get_object_from_data('organization', Organization, data, obj=obj)
|
||||
if not new_org or self.user not in new_org.execution_environment_admin_role:
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
if not self.user.has_obj_perm(obj, 'change'):
|
||||
return False
|
||||
return self.check_related('organization', Organization, data, obj=obj, mandatory=True, role_field='execution_environment_admin_role')
|
||||
else:
|
||||
if self.user not in obj.organization.execution_environment_admin_role:
|
||||
raise PermissionDenied
|
||||
if not self.check_related('organization', Organization, data, obj=obj, role_field='execution_environment_admin_role'):
|
||||
return False
|
||||
# Special case that check_related does not catch, org users can not remove the organization from the EE
|
||||
if data and ('organization' in data or 'organization_id' in data):
|
||||
if (not data.get('organization')) and (not data.get('organization_id')):
|
||||
return False
|
||||
return True
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.managed:
|
||||
@@ -1581,6 +1599,8 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
inventory = get_value(Inventory, 'inventory')
|
||||
if inventory:
|
||||
if self.user not in inventory.use_role:
|
||||
if self.save_messages:
|
||||
self.messages['inventory'] = [_('You do not have use permission on Inventory')]
|
||||
return False
|
||||
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||
@@ -1589,11 +1609,16 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
project = get_value(Project, 'project')
|
||||
# If the user has admin access to the project (as an org admin), should
|
||||
# be able to proceed without additional checks.
|
||||
if project:
|
||||
return self.user in project.use_role
|
||||
else:
|
||||
if not project:
|
||||
return False
|
||||
|
||||
if self.user not in project.use_role:
|
||||
if self.save_messages:
|
||||
self.messages['project'] = [_('You do not have use permission on Project')]
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
def can_copy_related(self, obj):
|
||||
"""
|
||||
@@ -2077,11 +2102,23 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
return bool(
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
|
||||
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||
)
|
||||
if not self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True):
|
||||
if data.get('organization', None) is None:
|
||||
if self.save_messages:
|
||||
self.messages['organization'] = [_('An organization is required to create a workflow job template for normal user')]
|
||||
return False
|
||||
|
||||
if not self.check_related('inventory', Inventory, data, role_field='use_role'):
|
||||
if self.save_messages:
|
||||
self.messages['inventory'] = [_('You do not have use_role to the inventory')]
|
||||
return False
|
||||
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||
if self.save_messages:
|
||||
self.messages['execution_environment'] = [_('You do not have read_role to the execution environment')]
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def can_copy(self, obj):
|
||||
if self.save_messages:
|
||||
@@ -2234,7 +2271,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if not node_access.can_add({'reference_obj': node}):
|
||||
wj_add_perm = False
|
||||
if not wj_add_perm and self.save_messages:
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' 'resources required for relaunch.')
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job resources required for relaunch.')
|
||||
return wj_add_perm
|
||||
|
||||
def can_cancel(self, obj):
|
||||
@@ -2434,6 +2471,29 @@ class InventoryUpdateEventAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class ReceptorAddressAccess(BaseAccess):
|
||||
"""
|
||||
I can see receptor address records whenever I can access the instance
|
||||
"""
|
||||
|
||||
model = ReceptorAddress
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(Q(instance__in=Instance.accessible_pk_qs(self.user, 'read_role')))
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobEventAccess(BaseAccess):
|
||||
"""
|
||||
I can only see manage System Jobs events if I'm a super user
|
||||
@@ -2567,6 +2627,8 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
if not JobLaunchConfigAccess(self.user).can_add(data):
|
||||
return False
|
||||
if not data:
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
return self.user.has_roles.filter(permission_partials__codename__in=['execute_jobtemplate', 'update_project', 'update_inventory']).exists()
|
||||
return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists()
|
||||
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
|
||||
@@ -2588,13 +2650,15 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
|
||||
class NotificationTemplateAccess(BaseAccess):
|
||||
"""
|
||||
I can see/use a notification_template if I have permission to
|
||||
Run standard logic from DAB RBAC
|
||||
"""
|
||||
|
||||
model = NotificationTemplate
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
|
||||
def filtered_queryset(self):
|
||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||
return self.model.access_qs(self.user, 'view')
|
||||
return self.model.objects.filter(
|
||||
Q(organization__in=Organization.accessible_objects(self.user, 'notification_admin_role')) | Q(organization__in=self.user.auditor_of_organizations)
|
||||
).distinct()
|
||||
@@ -2607,10 +2671,7 @@ class NotificationTemplateAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
if obj.organization is None:
|
||||
# only superusers are allowed to edit orphan notification templates
|
||||
return False
|
||||
return self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role', mandatory=True)
|
||||
return self.user.has_obj_perm(obj, 'change') and self.check_related('organization', Organization, data, obj=obj, role_field='notification_admin_role')
|
||||
|
||||
def can_admin(self, obj, data):
|
||||
return self.can_change(obj, data)
|
||||
@@ -2620,9 +2681,7 @@ class NotificationTemplateAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_start(self, obj, validate_license=True):
|
||||
if obj.organization is None:
|
||||
return False
|
||||
return self.user in obj.organization.notification_admin_role
|
||||
return self.can_change(obj, None)
|
||||
|
||||
|
||||
class NotificationAccess(BaseAccess):
|
||||
@@ -2763,7 +2822,7 @@ class ActivityStreamAccess(BaseAccess):
|
||||
| Q(notification_template__organization__in=auditing_orgs)
|
||||
| Q(notification__notification_template__organization__in=auditing_orgs)
|
||||
| Q(label__organization__in=auditing_orgs)
|
||||
| Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
|
||||
| Q(role__in=Role.visible_roles(self.user) if auditing_orgs else [])
|
||||
)
|
||||
|
||||
project_set = Project.accessible_pk_qs(self.user, 'read_role')
|
||||
@@ -2820,13 +2879,10 @@ class RoleAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
result = Role.visible_roles(self.user)
|
||||
# Sanity check: is the requesting user an orphaned non-admin/auditor?
|
||||
# if yes, make system admin/auditor mandatorily visible.
|
||||
if not self.user.is_superuser and not self.user.is_system_auditor and not self.user.organizations.exists():
|
||||
mandatories = ('system_administrator', 'system_auditor')
|
||||
super_qs = Role.objects.filter(singleton_name__in=mandatories)
|
||||
result = result | super_qs
|
||||
return result
|
||||
# Make system admin/auditor mandatorily visible.
|
||||
mandatories = ('system_administrator', 'system_auditor')
|
||||
super_qs = Role.objects.filter(singleton_name__in=mandatories)
|
||||
return result | super_qs
|
||||
|
||||
def can_add(self, obj, data):
|
||||
# Unsupported for now
|
||||
@@ -2952,3 +3008,19 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
|
||||
def optimize_queryset(queryset):
|
||||
"""
|
||||
A utility method in case you already have a queryset and just want to
|
||||
apply the standard optimizations for that model.
|
||||
In other words, use if you do not want to start from filtered_queryset for some reason.
|
||||
"""
|
||||
if not queryset.model or queryset.model not in access_registry:
|
||||
return queryset
|
||||
access_class = access_registry[queryset.model]
|
||||
if access_class.select_related:
|
||||
queryset = queryset.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
queryset = queryset.prefetch_related(*access_class.prefetch_related)
|
||||
return queryset
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import logging
|
||||
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
@@ -11,4 +11,5 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
DispatcherMetrics().send_metrics()
|
||||
CallbackReceiverMetrics().send_metrics()
|
||||
|
||||
@@ -66,10 +66,8 @@ class FixedSlidingWindow:
|
||||
|
||||
|
||||
class RelayWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
def __init__(self, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
self._event_loop = event_loop
|
||||
self._stats = dict()
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
@@ -94,7 +92,10 @@ class RelayWebsocketStatsManager:
|
||||
self.start()
|
||||
|
||||
def start(self):
|
||||
self.async_task = self._event_loop.create_task(self.run_loop())
|
||||
self.async_task = asyncio.get_running_loop().create_task(
|
||||
self.run_loop(),
|
||||
name='RelayWebsocketStatsManager.run_loop',
|
||||
)
|
||||
return self.async_task
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -399,7 +399,10 @@ def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = FileSplitter(filespec=file_path)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.copy_expert(query, file)
|
||||
with cursor.copy(query) as copy:
|
||||
while data := copy.read():
|
||||
byte_data = bytes(data)
|
||||
file.write(byte_data.decode())
|
||||
return file.file_list()
|
||||
|
||||
|
||||
@@ -416,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
resolved_action,
|
||||
resolved_role,
|
||||
-- '-' operator listed here:
|
||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
||||
-- https://www.postgresql.org/docs/15/functions-json.html
|
||||
-- note that operator is only supported by jsonb objects
|
||||
-- https://www.postgresql.org/docs/current/datatype-json.html
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
|
||||
@@ -610,3 +613,20 @@ def host_metric_table(since, full_path, until, **kwargs):
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
|
||||
@register('host_metric_summary_monthly_table', '1.0', format='csv', description=_('HostMetricSummaryMonthly export, full sync'), expensive=trivial_slicing)
|
||||
def host_metric_summary_monthly_table(since, full_path, **kwargs):
|
||||
query = '''
|
||||
COPY (SELECT main_hostmetricsummarymonthly.id,
|
||||
main_hostmetricsummarymonthly.date,
|
||||
main_hostmetricsummarymonthly.license_capacity,
|
||||
main_hostmetricsummarymonthly.license_consumed,
|
||||
main_hostmetricsummarymonthly.hosts_added,
|
||||
main_hostmetricsummarymonthly.hosts_deleted,
|
||||
main_hostmetricsummarymonthly.indirectly_managed_hosts
|
||||
FROM main_hostmetricsummarymonthly
|
||||
ORDER BY main_hostmetricsummarymonthly.id ASC) TO STDOUT WITH CSV HEADER
|
||||
'''
|
||||
|
||||
return _copy_table(table='host_metric_summary_monthly', query=query, path=full_path)
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import itertools
|
||||
import redis
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
import prometheus_client
|
||||
from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
|
||||
from prometheus_client.registry import CollectorRegistry
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
from django.http import HttpRequest
|
||||
from rest_framework.request import Request
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
@@ -13,6 +18,30 @@ root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class MetricsNamespace:
|
||||
def __init__(self, namespace):
|
||||
self._namespace = namespace
|
||||
|
||||
|
||||
class MetricsServerSettings(MetricsNamespace):
|
||||
def port(self):
|
||||
return settings.METRICS_SUBSYSTEM_CONFIG['server'][self._namespace]['port']
|
||||
|
||||
|
||||
class MetricsServer(MetricsServerSettings):
|
||||
def __init__(self, namespace, registry):
|
||||
MetricsNamespace.__init__(self, namespace)
|
||||
self._registry = registry
|
||||
|
||||
def start(self):
|
||||
try:
|
||||
# TODO: addr for ipv6 ?
|
||||
prometheus_client.start_http_server(self.port(), addr='localhost', registry=self._registry)
|
||||
except Exception:
|
||||
logger.error(f"MetricsServer failed to start for service '{self._namespace}.")
|
||||
raise
|
||||
|
||||
|
||||
class BaseM:
|
||||
def __init__(self, field, help_text):
|
||||
self.field = field
|
||||
@@ -148,71 +177,40 @@ class HistogramM(BaseM):
|
||||
return output_text
|
||||
|
||||
|
||||
class Metrics:
|
||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
||||
class Metrics(MetricsNamespace):
|
||||
# metric name, help_text
|
||||
METRICSLIST = []
|
||||
_METRICSLIST = [
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
]
|
||||
|
||||
def __init__(self, namespace, auto_pipe_execute=False, instance_name=None, metrics_have_changed=True, **kwargs):
|
||||
MetricsNamespace.__init__(self, namespace)
|
||||
|
||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.last_pipe_execute = time.time()
|
||||
# track if metrics have been modified since last saved to redis
|
||||
# start with True so that we get an initial save to redis
|
||||
self.metrics_have_changed = True
|
||||
self.metrics_have_changed = metrics_have_changed
|
||||
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
|
||||
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
|
||||
# auto pipe execute will commit transaction of metric data to redis
|
||||
# at a regular interval (pipe_execute_interval). If set to False,
|
||||
# the calling function should call .pipe_execute() explicitly
|
||||
self.auto_pipe_execute = auto_pipe_execute
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif is_testing():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.my_hostname()
|
||||
self.instance_name = settings.CLUSTER_HOST_ID # Same as Instance.objects.my_hostname() BUT we do not need to import Instance
|
||||
|
||||
# metric name, help_text
|
||||
METRICSLIST = [
|
||||
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
for m in METRICSLIST:
|
||||
for m in itertools.chain(self.METRICSLIST, self._METRICSLIST):
|
||||
self.METRICS[m.field] = m
|
||||
|
||||
# track last time metrics were sent to other nodes
|
||||
@@ -225,7 +223,7 @@ class Metrics:
|
||||
m.reset_value(self.conn)
|
||||
self.metrics_have_changed = True
|
||||
self.conn.delete(root_key + "_lock")
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||
self.conn.delete(m)
|
||||
|
||||
def inc(self, field, value):
|
||||
@@ -292,7 +290,7 @@ class Metrics:
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock')
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
try:
|
||||
@@ -302,9 +300,10 @@ class Metrics:
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': serialized_metrics,
|
||||
'metrics_namespace': self._namespace,
|
||||
}
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
self.conn.set(root_key + '-' + self._namespace + '_instance_' + self.instance_name, serialized_metrics)
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
@@ -326,14 +325,14 @@ class Metrics:
|
||||
instances_filter = request.query_params.getlist("node")
|
||||
# get a sorted list of instance names
|
||||
instance_names = [self.instance_name]
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||
instance_names.append(m.decode('UTF-8').split('_instance_')[1])
|
||||
instance_names.sort()
|
||||
# load data, including data from the this local instance
|
||||
instance_data = {}
|
||||
for instance in instance_names:
|
||||
if len(instances_filter) == 0 or instance in instances_filter:
|
||||
instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance)
|
||||
instance_data_from_redis = self.conn.get(root_key + '-' + self._namespace + '_instance_' + instance)
|
||||
# data from other instances may not be available. That is OK.
|
||||
if instance_data_from_redis:
|
||||
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
|
||||
@@ -352,6 +351,120 @@ class Metrics:
|
||||
return output_text
|
||||
|
||||
|
||||
class DispatcherMetrics(Metrics):
|
||||
METRICSLIST = [
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
# dispatcher subsystem metrics
|
||||
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
||||
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
||||
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
||||
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(settings.METRICS_SERVICE_DISPATCHER, *args, **kwargs)
|
||||
|
||||
|
||||
class CallbackReceiverMetrics(Metrics):
|
||||
METRICSLIST = [
|
||||
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, *args, **kwargs)
|
||||
|
||||
|
||||
def metrics(request):
|
||||
m = Metrics()
|
||||
return m.generate_metrics(request)
|
||||
output_text = ''
|
||||
for m in [DispatcherMetrics(), CallbackReceiverMetrics()]:
|
||||
output_text += m.generate_metrics(request)
|
||||
return output_text
|
||||
|
||||
|
||||
class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
|
||||
"""
|
||||
Takes the metric data from redis -> our custom metric fields -> prometheus
|
||||
library metric fields.
|
||||
|
||||
The plan is to get rid of the use of redis, our custom metric fields, and
|
||||
to switch fully to the prometheus library. At that point, this translation
|
||||
code will be deleted.
|
||||
"""
|
||||
|
||||
def __init__(self, metrics_obj, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._metrics = metrics_obj
|
||||
|
||||
def collect(self):
|
||||
my_hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
instance_data = self._metrics.load_other_metrics(Request(HttpRequest()))
|
||||
if not instance_data:
|
||||
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
|
||||
return None
|
||||
|
||||
host_metrics = instance_data.get(my_hostname)
|
||||
for _, metric in self._metrics.METRICS.items():
|
||||
entry = host_metrics.get(metric.field)
|
||||
if not entry:
|
||||
logger.debug(f"{self._metrics._namespace} metric '{metric.field}' not found in redis data payload {json.dumps(instance_data, indent=2)}")
|
||||
continue
|
||||
if isinstance(metric, HistogramM):
|
||||
buckets = list(zip(metric.buckets, entry['counts']))
|
||||
buckets = [[str(i[0]), str(i[1])] for i in buckets]
|
||||
yield HistogramMetricFamily(metric.field, metric.help_text, buckets=buckets, sum_value=entry['sum'])
|
||||
else:
|
||||
yield GaugeMetricFamily(metric.field, metric.help_text, value=entry)
|
||||
|
||||
|
||||
class CallbackReceiverMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
registry.register(CustomToPrometheusMetricsCollector(DispatcherMetrics(metrics_have_changed=False)))
|
||||
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, registry)
|
||||
|
||||
|
||||
class DispatcherMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
registry.register(CustomToPrometheusMetricsCollector(CallbackReceiverMetrics(metrics_have_changed=False)))
|
||||
super().__init__(settings.METRICS_SERVICE_DISPATCHER, registry)
|
||||
|
||||
|
||||
class WebsocketsMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
# registry.register()
|
||||
super().__init__(settings.METRICS_SERVICE_WEBSOCKETS, registry)
|
||||
|
||||
@@ -1,7 +1,40 @@
|
||||
from django.apps import AppConfig
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from awx.main.utils.named_url_graph import _customize_graph, generate_graph
|
||||
from awx.conf import register, fields
|
||||
|
||||
|
||||
class MainConfig(AppConfig):
|
||||
name = 'awx.main'
|
||||
verbose_name = _('Main')
|
||||
|
||||
def load_named_url_feature(self):
|
||||
models = [m for m in self.get_models() if hasattr(m, 'get_absolute_url')]
|
||||
generate_graph(models)
|
||||
_customize_graph()
|
||||
register(
|
||||
'NAMED_URL_FORMATS',
|
||||
field_class=fields.DictField,
|
||||
read_only=True,
|
||||
label=_('Formats of all available named urls'),
|
||||
help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'),
|
||||
category=_('Named URL'),
|
||||
category_slug='named-url',
|
||||
)
|
||||
register(
|
||||
'NAMED_URL_GRAPH_NODES',
|
||||
field_class=fields.DictField,
|
||||
read_only=True,
|
||||
label=_('List of all named url graph nodes.'),
|
||||
help_text=_(
|
||||
'Read-only list of key-value pairs that exposes named URL graph topology.'
|
||||
' Use this list to programmatically generate named URLs for resources'
|
||||
),
|
||||
category=_('Named URL'),
|
||||
category_slug='named-url',
|
||||
)
|
||||
|
||||
def ready(self):
|
||||
super().ready()
|
||||
|
||||
self.load_named_url_feature()
|
||||
|
||||
87
awx/main/cache.py
Normal file
87
awx/main/cache.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import functools
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT
|
||||
from django.core.cache.backends.redis import RedisCache
|
||||
|
||||
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
|
||||
import socket
|
||||
|
||||
# This list comes from what django-redis ignores and the behavior we are trying
|
||||
# to retain while dropping the dependency on django-redis.
|
||||
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
|
||||
|
||||
CONNECTION_INTERRUPTED_SENTINEL = object()
|
||||
|
||||
|
||||
def optionally_ignore_exceptions(func=None, return_value=None):
|
||||
if func is None:
|
||||
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except IGNORED_EXCEPTIONS as e:
|
||||
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
|
||||
return return_value
|
||||
raise e.__cause__ or e
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AWXRedisCache(RedisCache):
|
||||
"""
|
||||
We just want to wrap the upstream RedisCache class so that we can ignore
|
||||
the exceptions that it raises when the cache is unavailable.
|
||||
"""
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().add(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
|
||||
def _get(self, key, default=None, version=None):
|
||||
return super().get(key, default, version)
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
value = self._get(key, default, version)
|
||||
if value is CONNECTION_INTERRUPTED_SENTINEL:
|
||||
return default
|
||||
return value
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().touch(key, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete(self, key, version=None):
|
||||
return super().delete(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def get_many(self, keys, version=None):
|
||||
return super().get_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def has_key(self, key, version=None):
|
||||
return super().has_key(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def incr(self, key, delta=1, version=None):
|
||||
return super().incr(key, delta, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set_many(data, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete_many(self, keys, version=None):
|
||||
return super().delete_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def clear(self):
|
||||
return super().clear()
|
||||
142
awx/main/conf.py
142
awx/main/conf.py
@@ -2,6 +2,7 @@
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.core.checks import Error
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
@@ -92,6 +93,21 @@ register(
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
'CSRF_TRUSTED_ORIGINS',
|
||||
default=[],
|
||||
field_class=fields.StringListField,
|
||||
label=_('CSRF Trusted Origins List'),
|
||||
help_text=_(
|
||||
"If the service is behind a reverse proxy/load balancer, use this setting "
|
||||
"to configure the schema://addresses from which the service should trust "
|
||||
"Origin header values. "
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -680,15 +696,33 @@ register(
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
|
||||
field_class=fields.IntegerField,
|
||||
default=131072,
|
||||
min_value=1,
|
||||
label=_('Maximum number of messages that can be stored in the log action queue'),
|
||||
help_text=_(
|
||||
'Defines how large the rsyslog action queue can grow in number of messages '
|
||||
'stored. This can have an impact on memory utilization. When the queue '
|
||||
'reaches 75% of this number, the queue will start writing to disk '
|
||||
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
|
||||
'DEBUG messages will start to be discarded (queue.discardMark with '
|
||||
'queue.discardSeverity=5).'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistance for external log aggregation (in GB)'),
|
||||
label=_('Maximum disk persistence for rsyslogd action queuing (in GB)'),
|
||||
help_text=_(
|
||||
'Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting.'
|
||||
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
|
||||
'to process an incoming message (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
|
||||
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -742,6 +776,7 @@ register(
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
@@ -783,6 +818,7 @@ register(
|
||||
help_text=_('Max jobs to allow bulk jobs to launch'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -793,6 +829,18 @@ register(
|
||||
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_HOST_MAX_DELETE',
|
||||
field_class=fields.IntegerField,
|
||||
default=250,
|
||||
label=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -803,6 +851,7 @@ register(
|
||||
help_text=_('Enable preview of new user interface.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -831,6 +880,65 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last computing date of HostMetricSummaryMonthly'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_CLEANUP_PATHS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable or Disable tmp dir cleanup'),
|
||||
default=True,
|
||||
help_text=_('Enable or Disable TMP Dir cleanup'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_REQUEST_PROFILE',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Debug Web Requests'),
|
||||
default=False,
|
||||
help_text=_('Debug web request python timing'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_CONTAINER_RUN_OPTIONS',
|
||||
field_class=fields.StringListField,
|
||||
label=_('Container Run Options'),
|
||||
default=['--network', 'slirp4netns:enable_ipv6=true'],
|
||||
help_text=_("List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']"),
|
||||
category=('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'RECEPTOR_RELEASE_WORK',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Release Receptor Work'),
|
||||
default=True,
|
||||
help_text=_('Release receptor work'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
register(
|
||||
'RECEPTOR_KEEP_WORK_ON_ERROR',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Keep receptor work on error'),
|
||||
default=False,
|
||||
help_text=_('Prevent receptor work from being released on when error is detected'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
@@ -857,3 +965,27 @@ def logging_validate(serializer, attrs):
|
||||
|
||||
|
||||
register_validate('logging', logging_validate)
|
||||
|
||||
|
||||
def csrf_trusted_origins_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'CSRF_TRUSTED_ORIGINS'):
|
||||
return attrs
|
||||
if 'CSRF_TRUSTED_ORIGINS' not in attrs:
|
||||
return attrs
|
||||
errors = []
|
||||
for origin in attrs['CSRF_TRUSTED_ORIGINS']:
|
||||
if "://" not in origin:
|
||||
errors.append(
|
||||
Error(
|
||||
"As of Django 4.0, the values in the CSRF_TRUSTED_ORIGINS "
|
||||
"setting must start with a scheme (usually http:// or "
|
||||
"https://) but found %s. See the release notes for details." % origin,
|
||||
)
|
||||
)
|
||||
if errors:
|
||||
error_messages = [error.msg for error in errors]
|
||||
raise serializers.ValidationError(_('\n'.join(error_messages)))
|
||||
return attrs
|
||||
|
||||
|
||||
register_validate('system', csrf_trusted_origins_validate)
|
||||
|
||||
@@ -14,7 +14,7 @@ __all__ = [
|
||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform', 'openshift_virtualization')
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
@@ -43,6 +43,7 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
ERROR_STATES = ('error',)
|
||||
MINIMAL_EVENTS = set(['playbook_on_play_start', 'playbook_on_task_start', 'playbook_on_stats', 'EOF'])
|
||||
CENSOR_VALUE = '************'
|
||||
ENV_BLOCKLIST = frozenset(
|
||||
@@ -114,3 +115,28 @@ SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
|
||||
|
||||
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
|
||||
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
|
||||
|
||||
# Data for RBAC compatibility layer
|
||||
role_name_to_perm_mapping = {
|
||||
'adhoc_role': ['adhoc_'],
|
||||
'approval_role': ['approve_'],
|
||||
'auditor_role': ['audit_'],
|
||||
'admin_role': ['change_', 'add_', 'delete_'],
|
||||
'execute_role': ['execute_'],
|
||||
'read_role': ['view_'],
|
||||
'update_role': ['update_'],
|
||||
'member_role': ['member_'],
|
||||
'use_role': ['use_'],
|
||||
}
|
||||
|
||||
org_role_to_permission = {
|
||||
'notification_admin_role': 'add_notificationtemplate',
|
||||
'project_admin_role': 'add_project',
|
||||
'execute_role': 'execute_jobtemplate',
|
||||
'inventory_admin_role': 'add_inventory',
|
||||
'credential_admin_role': 'add_credential',
|
||||
'workflow_admin_role': 'add_workflowjobtemplate',
|
||||
'job_template_admin_role': 'change_jobtemplate', # TODO: this doesnt really work, solution not clear
|
||||
'execution_environment_admin_role': 'add_executionenvironment',
|
||||
'auditor_role': 'view_project', # TODO: also doesnt really work
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "-" + message['metrics_namespace'] + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ aim_inputs = {
|
||||
'id': 'object_property',
|
||||
'label': _('Object Property'),
|
||||
'type': 'string',
|
||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
||||
'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'),
|
||||
},
|
||||
{
|
||||
'id': 'reason',
|
||||
@@ -111,8 +111,12 @@ def aim_backend(**kwargs):
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'username':
|
||||
object_property = 'UserName'
|
||||
elif object_property.lower() == 'password':
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'address':
|
||||
object_property = 'Address'
|
||||
elif object_property not in res:
|
||||
raise KeyError('Property {} not found in object'.format(object_property))
|
||||
raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property))
|
||||
else:
|
||||
object_property = object_property.capitalize()
|
||||
|
||||
|
||||
65
awx/main/credential_plugins/aws_secretsmanager.py
Normal file
65
awx/main/credential_plugins/aws_secretsmanager.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
secrets_manager_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'aws_access_key',
|
||||
'label': _('AWS Access Key'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'aws_secret_key',
|
||||
'label': _('AWS Secret Key'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},
|
||||
],
|
||||
'metadata': [
|
||||
{
|
||||
'id': 'region_name',
|
||||
'label': _('AWS Secrets Manager Region'),
|
||||
'type': 'string',
|
||||
'help_text': _('Region which the secrets manager is located'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_name',
|
||||
'label': _('AWS Secret Name'),
|
||||
'type': 'string',
|
||||
},
|
||||
],
|
||||
'required': ['aws_access_key', 'aws_secret_key', 'region_name', 'secret_name'],
|
||||
}
|
||||
|
||||
|
||||
def aws_secretsmanager_backend(**kwargs):
|
||||
secret_name = kwargs['secret_name']
|
||||
region_name = kwargs['region_name']
|
||||
aws_secret_access_key = kwargs['aws_secret_key']
|
||||
aws_access_key_id = kwargs['aws_access_key']
|
||||
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name='secretsmanager', region_name=region_name, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id
|
||||
)
|
||||
|
||||
try:
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
except ClientError as e:
|
||||
raise e
|
||||
# Secrets Manager decrypts the secret value using the associated KMS CMK
|
||||
# Depending on whether the secret was a string or binary, only one of these fields will be populated
|
||||
if 'SecretString' in get_secret_value_response:
|
||||
secret = get_secret_value_response['SecretString']
|
||||
|
||||
else:
|
||||
secret = get_secret_value_response['SecretBinary']
|
||||
|
||||
return secret
|
||||
|
||||
|
||||
aws_secretmanager_plugin = CredentialPlugin('AWS Secrets Manager lookup', inputs=secrets_manager_inputs, backend=aws_secretsmanager_backend)
|
||||
@@ -1,9 +1,10 @@
|
||||
from azure.keyvault.secrets import SecretClient
|
||||
from azure.identity import ClientSecretCredential
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
@@ -54,22 +55,9 @@ azure_keyvault_inputs = {
|
||||
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
url=url,
|
||||
client_id=kwargs['client'],
|
||||
secret=kwargs['secret'],
|
||||
tenant=kwargs['tenant'],
|
||||
resource=f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
kv = KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value
|
||||
csc = ClientSecretCredential(tenant_id=kwargs['tenant'], client_id=kwargs['client'], client_secret=kwargs['secret'])
|
||||
kv = SecretClient(credential=csc, vault_url=kwargs['url'])
|
||||
return kv.get_secret(name=kwargs['secret_field'], version=kwargs.get('secret_version', '')).value
|
||||
|
||||
|
||||
azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend)
|
||||
|
||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import requests
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
||||
}
|
||||
|
||||
|
||||
def _is_base64(s: str) -> bool:
|
||||
try:
|
||||
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||
'allow_redirects': False,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,25 +2,29 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
|
||||
from base64 import b64decode
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_id',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
@@ -41,8 +45,16 @@ dsv_inputs = {
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'secret_decoding',
|
||||
'label': _('Should the secret be base64 decoded?'),
|
||||
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
|
||||
'choices': ['No Decoding', 'Decode Base64'],
|
||||
'type': 'string',
|
||||
'default': 'No Decoding',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -51,12 +63,32 @@ if settings.DEBUG:
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
'default': 'https://{}.secretsvaultcloud.{}',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
def dsv_backend(**kwargs):
|
||||
tenant_name = kwargs['tenant']
|
||||
tenant_tld = kwargs.get('tld', 'com')
|
||||
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
|
||||
client_id = kwargs['client_id']
|
||||
client_secret = kwargs['client_secret']
|
||||
secret_path = kwargs['path']
|
||||
secret_field = kwargs['secret_field']
|
||||
# providing a default value to remain backward compatible for secrets that have not specified this option
|
||||
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
|
||||
|
||||
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
|
||||
|
||||
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
|
||||
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
|
||||
|
||||
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
|
||||
if secret_decoding == 'Decode Base64':
|
||||
return b64decode(dsv_secret['data'][secret_field]).decode()
|
||||
|
||||
return dsv_secret['data'][secret_field]
|
||||
|
||||
|
||||
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
|
||||
|
||||
@@ -41,6 +41,34 @@ base_inputs = {
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_public',
|
||||
'label': _('Client Certificate'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'help_text': _(
|
||||
'The PEM-encoded client certificate used for TLS client authentication.'
|
||||
' This should include the certificate and any intermediate certififcates.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_private',
|
||||
'label': _('Client Certificate Key'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'secret': True,
|
||||
'help_text': _('The certificate private key used for TLS client authentication.'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_role',
|
||||
'label': _('TLS Authentication Role'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _(
|
||||
'The role configured in Hashicorp Vault for TLS client authentication.'
|
||||
' If not provided, Hashicorp Vault may assign roles based on the certificate used.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'namespace',
|
||||
'label': _('Namespace name (Vault Enterprise only)'),
|
||||
@@ -59,6 +87,20 @@ base_inputs = {
|
||||
' see https://www.vaultproject.io/docs/auth/kubernetes#configuration'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'username',
|
||||
'label': _('Username'),
|
||||
'type': 'string',
|
||||
'secret': False,
|
||||
'help_text': _('Username for user authentication.'),
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': _('Password'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': _('Password for user authentication.'),
|
||||
},
|
||||
{
|
||||
'id': 'default_auth_path',
|
||||
'label': _('Path to Auth'),
|
||||
@@ -157,19 +199,25 @@ hashi_ssh_inputs['required'].extend(['public_key', 'role'])
|
||||
|
||||
def handle_auth(**kwargs):
|
||||
token = None
|
||||
|
||||
if kwargs.get('token'):
|
||||
token = kwargs['token']
|
||||
elif kwargs.get('username') and kwargs.get('password'):
|
||||
token = method_auth(**kwargs, auth_param=userpass_auth(**kwargs))
|
||||
elif kwargs.get('role_id') and kwargs.get('secret_id'):
|
||||
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
|
||||
elif kwargs.get('kubernetes_role'):
|
||||
token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs))
|
||||
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
|
||||
else:
|
||||
raise Exception('Either token or AppRole/Kubernetes authentication parameters must be set')
|
||||
|
||||
raise Exception('Token, Username/Password, AppRole, Kubernetes, or TLS authentication parameters must be set')
|
||||
return token
|
||||
|
||||
|
||||
def userpass_auth(**kwargs):
|
||||
return {'username': kwargs['username'], 'password': kwargs['password']}
|
||||
|
||||
|
||||
def approle_auth(**kwargs):
|
||||
return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']}
|
||||
|
||||
@@ -181,6 +229,10 @@ def kubernetes_auth(**kwargs):
|
||||
return {'role': kwargs['kubernetes_role'], 'jwt': jwt}
|
||||
|
||||
|
||||
def client_cert_auth(**kwargs):
|
||||
return {'name': kwargs.get('client_cert_role')}
|
||||
|
||||
|
||||
def method_auth(**kwargs):
|
||||
# get auth method specific params
|
||||
request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30}
|
||||
@@ -193,13 +245,25 @@ def method_auth(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
sess = requests.Session()
|
||||
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||
|
||||
# Namespace support
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
if kwargs['auth_param'].get('username'):
|
||||
request_url = request_url + '/' + (kwargs['username'])
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
# TLS client certificate support
|
||||
if kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
# Add client cert to requests Session before making call
|
||||
with CertFiles(kwargs['client_cert_public'], key=kwargs['client_cert_private']) as client_cert:
|
||||
sess.cert = client_cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
else:
|
||||
# Make call without client certificate
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
@@ -220,6 +284,7 @@ def kv_backend(**kwargs):
|
||||
}
|
||||
|
||||
sess = requests.Session()
|
||||
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
# Compatibility header for older installs of Hashicorp Vault
|
||||
sess.headers['X-Vault-Token'] = token
|
||||
@@ -265,6 +330,8 @@ def kv_backend(**kwargs):
|
||||
|
||||
if secret_key:
|
||||
try:
|
||||
if (secret_key != 'data') and (secret_key not in json['data']) and ('data' in json['data']):
|
||||
return json['data']['data'][secret_key]
|
||||
return json['data'][secret_key]
|
||||
except KeyError:
|
||||
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))
|
||||
@@ -288,6 +355,7 @@ def ssh_backend(**kwargs):
|
||||
request_kwargs['json']['valid_principals'] = kwargs['valid_principals']
|
||||
|
||||
sess = requests.Session()
|
||||
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
try:
|
||||
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
except ImportError:
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -50,8 +53,10 @@ tss_inputs = {
|
||||
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
if 'domain' in kwargs:
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
if kwargs.get("domain"):
|
||||
authorizer = DomainPasswordGrantAuthorizer(
|
||||
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||
)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
|
||||
@@ -87,7 +87,7 @@ class RecordedQueryLog(object):
|
||||
)
|
||||
log.commit()
|
||||
log.execute(
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) ' 'VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
(os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt),
|
||||
)
|
||||
log.commit()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import psycopg
|
||||
import select
|
||||
from copy import deepcopy
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
@@ -40,8 +41,12 @@ def get_task_queuename():
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
def __init__(self, conn, select_timeout=None):
|
||||
self.conn = conn
|
||||
if select_timeout is None:
|
||||
self.select_timeout = 5
|
||||
else:
|
||||
self.select_timeout = select_timeout
|
||||
|
||||
def listen(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
@@ -55,25 +60,63 @@ class PubSub(object):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
@staticmethod
|
||||
def current_notifies(conn):
|
||||
"""
|
||||
Altered version of .notifies method from psycopg library
|
||||
This removes the outer while True loop so that we only process
|
||||
queued notifications
|
||||
"""
|
||||
with conn.lock:
|
||||
try:
|
||||
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
|
||||
except psycopg.errors._NO_TRACEBACK as ex:
|
||||
raise ex.with_traceback(None)
|
||||
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
|
||||
for pgn in ns:
|
||||
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
|
||||
yield n
|
||||
|
||||
def events(self, yield_timeouts=False):
|
||||
if not self.conn.autocommit:
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if select.select([self.conn], [], [], self.select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
yield None
|
||||
else:
|
||||
self.conn.poll()
|
||||
while self.conn.notifies:
|
||||
yield self.conn.notifies.pop(0)
|
||||
notification_generator = self.current_notifies(self.conn)
|
||||
for notification in notification_generator:
|
||||
yield notification
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
|
||||
def create_listener_connection():
|
||||
conf = deepcopy(settings.DATABASES['default'])
|
||||
conf['OPTIONS'] = deepcopy(conf.get('OPTIONS', {}))
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
|
||||
# Apply overrides specifically for the listener connection
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
|
||||
if k != 'OPTIONS':
|
||||
conf[k] = v
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||
conf['OPTIONS'][k] = v
|
||||
|
||||
# Allow password-less authentication
|
||||
if 'PASSWORD' in conf:
|
||||
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
|
||||
|
||||
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
|
||||
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn(new_connection=False):
|
||||
def pg_bus_conn(new_connection=False, select_timeout=None):
|
||||
'''
|
||||
Any listeners probably want to establish a new database connection,
|
||||
separate from the Django connection used for queries, because that will prevent
|
||||
@@ -85,13 +128,7 @@ def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default'].copy()
|
||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
conn = create_listener_connection()
|
||||
else:
|
||||
if pg_connection.connection is None:
|
||||
pg_connection.connect()
|
||||
@@ -99,7 +136,7 @@ def pg_bus_conn(new_connection=False):
|
||||
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||
conn = pg_connection.connection
|
||||
|
||||
pubsub = PubSub(conn)
|
||||
pubsub = PubSub(conn, select_timeout=select_timeout)
|
||||
yield pubsub
|
||||
if new_connection:
|
||||
conn.close()
|
||||
|
||||
@@ -37,8 +37,14 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
def cancel(self, task_ids, with_reply=True):
|
||||
if with_reply:
|
||||
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
|
||||
else:
|
||||
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
return self.control_with_reply('schedule', *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def generate_reply_queue_name(cls):
|
||||
@@ -52,14 +58,14 @@ class Control(object):
|
||||
if not connection.get_autocommit():
|
||||
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
with pg_bus_conn(select_timeout=timeout) as conn:
|
||||
conn.listen(reply_queue)
|
||||
send_data = {'control': command, 'reply_to': reply_queue}
|
||||
if extra_data:
|
||||
send_data.update(extra_data)
|
||||
conn.notify(self.queuename, json.dumps(send_data))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
for reply in conn.events(yield_timeouts=True):
|
||||
if reply is None:
|
||||
logger.error(f'{self.service} did not reply within {timeout}s')
|
||||
raise RuntimeError(f"{self.service} did not reply within {timeout}s")
|
||||
|
||||
@@ -1,57 +1,142 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process
|
||||
import yaml
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
from django_guid import set_guid
|
||||
from django_guid.utils import generate_guid
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
def run_continuously(self):
|
||||
idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
|
||||
class ScheduledTask:
|
||||
"""
|
||||
Class representing schedules, very loosely modeled after python schedule library Job
|
||||
the idea of this class is to:
|
||||
- only deal in relative times (time since the scheduler global start)
|
||||
- only deal in integer math for target runtimes, but float for current relative time
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warning('periodic beat started')
|
||||
Missed schedule policy:
|
||||
Invariant target times are maintained, meaning that if interval=10s offset=0
|
||||
and it runs at t=7s, then it calls for next run in 3s.
|
||||
However, if a complete interval has passed, that is counted as a missed run,
|
||||
and missed runs are abandoned (no catch-up runs).
|
||||
"""
|
||||
|
||||
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
|
||||
def __init__(self, name: str, data: dict):
|
||||
# parameters need for schedule computation
|
||||
self.interval = int(data['schedule'].total_seconds())
|
||||
self.offset = 0 # offset relative to start time this schedule begins
|
||||
self.index = 0 # number of periods of the schedule that has passed
|
||||
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
set_guid(generate_guid())
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception('encountered an error while scheduling periodic tasks')
|
||||
time.sleep(idle_seconds)
|
||||
# parameters that do not affect scheduling logic
|
||||
self.last_run = None # time of last run, only used for debug
|
||||
self.completed_runs = 0 # number of times schedule is known to run
|
||||
self.name = name
|
||||
self.data = data # used by caller to know what to run
|
||||
|
||||
process = Process(target=run)
|
||||
process.daemon = True
|
||||
process.start()
|
||||
@property
|
||||
def next_run(self):
|
||||
"Time until the next run with t=0 being the global_start of the scheduler class"
|
||||
return (self.index + 1) * self.interval + self.offset
|
||||
|
||||
def due_to_run(self, relative_time):
|
||||
return bool(self.next_run <= relative_time)
|
||||
|
||||
def expected_runs(self, relative_time):
|
||||
return int((relative_time - self.offset) / self.interval)
|
||||
|
||||
def mark_run(self, relative_time):
|
||||
self.last_run = relative_time
|
||||
self.completed_runs += 1
|
||||
new_index = self.expected_runs(relative_time)
|
||||
if new_index > self.index + 1:
|
||||
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
|
||||
self.index = new_index
|
||||
|
||||
def missed_runs(self, relative_time):
|
||||
"Number of times job was supposed to ran but failed to, only used for debug"
|
||||
missed_ct = self.expected_runs(relative_time) - self.completed_runs
|
||||
# if this is currently due to run do not count that as a missed run
|
||||
if missed_ct and self.due_to_run(relative_time):
|
||||
missed_ct -= 1
|
||||
return missed_ct
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
scheduler.run_continuously()
|
||||
class Scheduler:
|
||||
def __init__(self, schedule):
|
||||
"""
|
||||
Expects schedule in the form of a dictionary like
|
||||
{
|
||||
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'}
|
||||
}
|
||||
Only the schedule nearest-second value is used for scheduling,
|
||||
the rest of the data is for use by the caller to know what to run.
|
||||
"""
|
||||
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
|
||||
min_interval = min(job.interval for job in self.jobs)
|
||||
num_jobs = len(self.jobs)
|
||||
|
||||
# this is intentionally oppioniated against spammy schedules
|
||||
# a core goal is to spread out the scheduled tasks (for worker management)
|
||||
# and high-frequency schedules just do not work with that
|
||||
if num_jobs > min_interval:
|
||||
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
|
||||
|
||||
# even space out jobs over the base interval
|
||||
for i, job in enumerate(self.jobs):
|
||||
job.offset = (i * min_interval) // num_jobs
|
||||
|
||||
# internally times are all referenced relative to startup time, add grace period
|
||||
self.global_start = time.time() + 2.0
|
||||
|
||||
def get_and_mark_pending(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
to_run = []
|
||||
for job in self.jobs:
|
||||
if job.due_to_run(relative_time):
|
||||
to_run.append(job)
|
||||
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
|
||||
job.mark_run(relative_time)
|
||||
return to_run
|
||||
|
||||
def time_until_next_run(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
next_job = min(self.jobs, key=lambda j: j.next_run)
|
||||
delta = next_job.next_run - relative_time
|
||||
if delta <= 0.1:
|
||||
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
|
||||
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
|
||||
return 0.1
|
||||
elif delta > 20.0:
|
||||
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
|
||||
return 20.0
|
||||
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
|
||||
return delta
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
data = dict()
|
||||
data['title'] = 'Scheduler status'
|
||||
|
||||
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
relative_time = time.time() - self.global_start
|
||||
data['started_time'] = start_time
|
||||
data['current_time'] = now
|
||||
data['current_time_relative'] = round(relative_time, 3)
|
||||
data['total_schedules'] = len(self.jobs)
|
||||
|
||||
data['schedule_list'] = dict(
|
||||
[
|
||||
(
|
||||
job.name,
|
||||
dict(
|
||||
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
|
||||
next_run_in_seconds=round(job.next_run - relative_time, 3),
|
||||
offset_in_seconds=job.offset,
|
||||
completed_runs=job.completed_runs,
|
||||
missed_runs=job.missed_runs(relative_time),
|
||||
),
|
||||
)
|
||||
for job in sorted(self.jobs, key=lambda job: job.interval)
|
||||
]
|
||||
)
|
||||
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
|
||||
|
||||
@@ -339,6 +339,17 @@ class AutoscalePool(WorkerPool):
|
||||
# but if the task takes longer than the time defined here, we will force it to stop here
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
|
||||
# initialize some things for subsystem metrics periodic gathering
|
||||
# the AutoscalePool class does not save these to redis directly, but reports via produce_subsystem_metrics
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
metrics_object.set('dispatcher_pool_max_worker_count', self.worker_count_max)
|
||||
self.worker_count_max = len(self.workers)
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
if len(self.workers) < self.min_workers:
|
||||
@@ -406,16 +417,16 @@ class AutoscalePool(WorkerPool):
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager')
|
||||
current_task_name = current_task.get('task', '')
|
||||
if any(current_task_name.endswith(e) for e in endings):
|
||||
if current_task_name.endswith(endings):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > self.task_manager_timeout:
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGUSR1)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
@@ -443,7 +454,12 @@ class AutoscalePool(WorkerPool):
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
return idx, self.workers[idx]
|
||||
else:
|
||||
return super(AutoscalePool, self).up()
|
||||
self.scale_up_ct += 1
|
||||
ret = super(AutoscalePool, self).up()
|
||||
new_worker_ct = len(self.workers)
|
||||
if new_worker_ct > self.worker_count_max:
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
|
||||
@@ -73,15 +73,15 @@ class task:
|
||||
return cls.apply_async(args, kwargs)
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
def get_async_body(cls, args=None, kwargs=None, uuid=None, **kw):
|
||||
"""
|
||||
Get the python dict to become JSON data in the pg_notify message
|
||||
This same message gets passed over the dispatcher IPC queue to workers
|
||||
If a task is submitted to a multiprocessing pool, skipping pg_notify, this might be used directly
|
||||
"""
|
||||
task_id = uuid or str(uuid4())
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||
guid = get_guid()
|
||||
if guid:
|
||||
@@ -89,6 +89,16 @@ class task:
|
||||
if bind_kwargs:
|
||||
obj['bind_kwargs'] = bind_kwargs
|
||||
obj.update(**kw)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = cls.get_async_body(args=args, kwargs=kwargs, uuid=uuid, **kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not is_testing():
|
||||
@@ -116,4 +126,5 @@ class task:
|
||||
setattr(fn, 'name', cls.name)
|
||||
setattr(fn, 'apply_async', cls.apply_async)
|
||||
setattr(fn, 'delay', cls.delay)
|
||||
setattr(fn, 'get_async_body', cls.get_async_body)
|
||||
return fn
|
||||
|
||||
@@ -7,18 +7,21 @@ import signal
|
||||
import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg
|
||||
import time
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
from datetime import timedelta
|
||||
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch.periodic import Scheduler
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.utils.db import set_connection_name
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -63,10 +66,12 @@ class AWXConsumerBase(object):
|
||||
def control(self, body):
|
||||
logger.warning(f'Received control signal:\n{body}')
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running', 'cancel'):
|
||||
if control in ('status', 'schedule', 'running', 'cancel'):
|
||||
reply_queue = body['reply_to']
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
if control == 'schedule':
|
||||
msg = self.scheduler.debug()
|
||||
elif control == 'running':
|
||||
msg = []
|
||||
for worker in self.pool.workers:
|
||||
@@ -84,24 +89,20 @@ class AWXConsumerBase(object):
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
if reply_queue is not None:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
|
||||
def process_task(self, body):
|
||||
def dispatch_task(self, body):
|
||||
"""This will place the given body into a worker queue to run method decorated as a task"""
|
||||
if isinstance(body, dict):
|
||||
body['time_ack'] = time.time()
|
||||
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
@@ -115,15 +116,24 @@ class AWXConsumerBase(object):
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
|
||||
def process_task(self, body):
|
||||
"""Routes the task details in body as either a control task or a task-task"""
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
self.dispatch_task(body)
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.last_stats = time.time()
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
self.last_stats = time.time()
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
@@ -142,29 +152,75 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
logger.info(f'Callback receiver started with pid={os.getpid()}')
|
||||
db.connection.close() # logs use database, so close connection
|
||||
|
||||
while True:
|
||||
logger.debug(f'{os.getpid()} is alive')
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, *args, schedule=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
|
||||
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE)
|
||||
# if no successful loops have ran since startup, then we should fail right away
|
||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = time.time()
|
||||
init_time = time.time()
|
||||
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = init_time
|
||||
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
|
||||
self.last_metrics_gather = init_time
|
||||
self.listen_cumulative_time = 0.0
|
||||
if schedule:
|
||||
schedule = schedule.copy()
|
||||
else:
|
||||
schedule = {}
|
||||
# add control tasks to be ran at regular schedules
|
||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||
# so that we scale down workers and free up connections
|
||||
schedule['pool_cleanup'] = {'control': self.pool.cleanup, 'schedule': timedelta(seconds=60)}
|
||||
# record subsystem metrics for the dispatcher
|
||||
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
|
||||
self.scheduler = Scheduler(schedule)
|
||||
|
||||
def record_metrics(self):
|
||||
current_time = time.time()
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
self.listen_cumulative_time = 0.0
|
||||
self.last_metrics_gather = current_time
|
||||
|
||||
def run_periodic_tasks(self):
|
||||
self.record_statistics() # maintains time buffer in method
|
||||
"""
|
||||
Run general periodic logic, and return maximum time in seconds before
|
||||
the next requested run
|
||||
This may be called more often than that when events are consumed
|
||||
so this should be very efficient in that
|
||||
"""
|
||||
try:
|
||||
self.record_statistics() # maintains time buffer in method
|
||||
except Exception as exc:
|
||||
logger.warning(f'Failed to save dispatcher statistics {exc}')
|
||||
|
||||
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
|
||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||
# so that we scale down workers and free up connections
|
||||
self.pool.cleanup()
|
||||
self.last_cleanup = time.time()
|
||||
for job in self.scheduler.get_and_mark_pending():
|
||||
if 'control' in job.data:
|
||||
try:
|
||||
job.data['control']()
|
||||
except Exception:
|
||||
logger.exception(f'Error running control task {job.data}')
|
||||
elif 'task' in job.data:
|
||||
body = self.worker.resolve_callable(job.data['task']).get_async_body()
|
||||
# bypasses pg_notify for scheduled tasks
|
||||
self.dispatch_task(body)
|
||||
|
||||
if self.pg_is_down:
|
||||
logger.info('Dispatcher listener connection established')
|
||||
self.pg_is_down = False
|
||||
|
||||
self.listen_start = time.time()
|
||||
|
||||
return self.scheduler.time_until_next_run()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
@@ -180,17 +236,21 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
if init is False:
|
||||
self.worker.on_start()
|
||||
init = True
|
||||
# run_periodic_tasks run scheduled actions and gives time until next scheduled action
|
||||
# this is saved to the conn (PubSub) object in order to modify read timeout in-loop
|
||||
conn.select_timeout = self.run_periodic_tasks()
|
||||
# this is the main operational loop for awx-manage run_dispatcher
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
self.listen_cumulative_time += time.time() - self.listen_start # for metrics
|
||||
if e is not None:
|
||||
self.process_task(json.loads(e.payload))
|
||||
self.run_periodic_tasks()
|
||||
self.pg_is_down = False
|
||||
conn.select_timeout = self.run_periodic_tasks()
|
||||
if self.should_stop:
|
||||
return
|
||||
except psycopg2.InterfaceError:
|
||||
except psycopg.InterfaceError:
|
||||
logger.warning("Stale Postgres message bus connection, reconnecting")
|
||||
continue
|
||||
except (db.DatabaseError, psycopg2.OperationalError):
|
||||
except (db.DatabaseError, psycopg.OperationalError):
|
||||
# If we have attained stady state operation, tolerate short-term database hickups
|
||||
if not self.pg_is_down:
|
||||
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
|
||||
@@ -199,6 +259,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
current_downtime = time.time() - self.pg_down_time
|
||||
if current_downtime > self.pg_max_wait:
|
||||
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
|
||||
# Sending QUIT to multiprocess queue to signal workers to exit
|
||||
for worker in self.pool.workers:
|
||||
try:
|
||||
worker.quit()
|
||||
except Exception:
|
||||
logger.exception(f"Error sending QUIT to worker {worker}")
|
||||
raise
|
||||
# Wait for a second before next attempt, but still listen for any shutdown signals
|
||||
for i in range(10):
|
||||
@@ -210,6 +276,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in dispatcher main loop')
|
||||
# Sending QUIT to multiprocess queue to signal workers to exit
|
||||
for worker in self.pool.workers:
|
||||
try:
|
||||
worker.quit()
|
||||
except Exception:
|
||||
logger.exception(f"Error sending QUIT to worker {worker}")
|
||||
raise
|
||||
|
||||
|
||||
@@ -232,8 +304,8 @@ class BaseWorker(object):
|
||||
break
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
||||
except Exception:
|
||||
logger.exception("Exception on worker {}, reconnecting: ".format(idx))
|
||||
continue
|
||||
try:
|
||||
for conn in db.connections.all():
|
||||
|
||||
@@ -72,7 +72,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.subsystem_metrics = s_metrics.CallbackReceiverMetrics(auto_pipe_execute=False)
|
||||
self.queue_pop = 0
|
||||
self.queue_name = settings.CALLBACK_QUEUE
|
||||
self.prof = AWXProfiler("CallbackBrokerWorker")
|
||||
@@ -191,7 +191,9 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
e._retry_count = retry_count
|
||||
|
||||
# special sanitization logic for postgres treatment of NUL 0x00 char
|
||||
if (retry_count == 1) and isinstance(exc_indv, ValueError) and ("\x00" in e.stdout):
|
||||
# This used to check the class of the exception but on the postgres3 upgrade it could appear
|
||||
# as either DataError or ValueError, so now lets just try if its there.
|
||||
if (retry_count == 1) and ("\x00" in e.stdout):
|
||||
e.stdout = e.stdout.replace("\x00", "")
|
||||
|
||||
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import copy
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
from jinja2 import sandbox, StrictUndefined
|
||||
@@ -67,10 +68,60 @@ def __enum_validate__(validator, enums, instance, schema):
|
||||
Draft4Validator.VALIDATORS['enum'] = __enum_validate__
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('awx.main.fields')
|
||||
|
||||
|
||||
class JSONBlob(JSONField):
|
||||
# Cringe... a JSONField that is back ended with a TextField.
|
||||
# This field was a legacy custom field type that tl;dr; was a TextField
|
||||
# Over the years, with Django upgrades, we were able to go to a JSONField instead of the custom field
|
||||
# However, we didn't want to have large customers with millions of events to update from text to json during an upgrade
|
||||
# So we keep this field type as backended with TextField.
|
||||
def get_internal_type(self):
|
||||
return "TextField"
|
||||
|
||||
# postgres uses a Jsonb field as the default backend
|
||||
# with psycopg2 it was using a psycopg2._json.Json class internally
|
||||
# with psycopg3 it uses a psycopg.types.json.Jsonb class internally
|
||||
# The binary class was not compatible with a text field, so we are going to override these next two methods and ensure we are using a string
|
||||
|
||||
def from_db_value(self, value, expression, connection):
|
||||
if value is None:
|
||||
return value
|
||||
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load JSONField {self.name}: {e}")
|
||||
|
||||
return value
|
||||
|
||||
def get_db_prep_value(self, value, connection, prepared=False):
|
||||
if not prepared:
|
||||
value = self.get_prep_value(value)
|
||||
try:
|
||||
# Null characters are not allowed in text fields and JSONBlobs are JSON data but saved as text
|
||||
# So we want to make sure we strip out any null characters also note, these "should" be escaped by the dumps process:
|
||||
# >>> my_obj = { 'test': '\x00' }
|
||||
# >>> import json
|
||||
# >>> json.dumps(my_obj)
|
||||
# '{"test": "\\u0000"}'
|
||||
# But just to be safe, lets remove them if they are there. \x00 and \u0000 are the same:
|
||||
# >>> string = "\x00"
|
||||
# >>> "\u0000" in string
|
||||
# True
|
||||
dumped_value = json.dumps(value)
|
||||
if "\x00" in dumped_value:
|
||||
dumped_value = dumped_value.replace("\x00", '')
|
||||
return dumped_value
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump JSONField {self.name}: {e} value: {value}")
|
||||
|
||||
return value
|
||||
|
||||
|
||||
# Based on AutoOneToOneField from django-annoying:
|
||||
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
|
||||
@@ -201,7 +252,7 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
kwargs.setdefault('related_name', '+')
|
||||
kwargs.setdefault('null', 'True')
|
||||
kwargs.setdefault('editable', False)
|
||||
kwargs.setdefault('on_delete', models.CASCADE)
|
||||
kwargs.setdefault('on_delete', models.SET_NULL)
|
||||
super(ImplicitRoleField, self).__init__(*args, **kwargs)
|
||||
|
||||
def deconstruct(self):
|
||||
@@ -356,11 +407,13 @@ class SmartFilterField(models.TextField):
|
||||
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
|
||||
if not value:
|
||||
return None
|
||||
value = urllib.parse.unquote(value)
|
||||
try:
|
||||
SmartFilter().query_from_string(value)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
# avoid doing too much during migrations
|
||||
if 'migrate' not in sys.argv:
|
||||
value = urllib.parse.unquote(value)
|
||||
try:
|
||||
SmartFilter().query_from_string(value)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
return super(SmartFilterField, self).get_prep_value(value)
|
||||
|
||||
|
||||
@@ -800,7 +853,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
def validate_env_var_allowed(self, env_var):
|
||||
if env_var.startswith('ANSIBLE_'):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} may affect Ansible configuration so its ' 'use is not allowed in credentials.').format(env_var),
|
||||
_('Environment variable {} may affect Ansible configuration so its use is not allowed in credentials.').format(env_var),
|
||||
code='invalid',
|
||||
params={'value': env_var},
|
||||
)
|
||||
|
||||
53
awx/main/management/commands/add_receptor_address.py
Normal file
53
awx/main/management/commands/add_receptor_address.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models import Instance, ReceptorAddress
|
||||
|
||||
|
||||
def add_address(**kwargs):
|
||||
try:
|
||||
instance = Instance.objects.get(hostname=kwargs.pop('instance'))
|
||||
kwargs['instance'] = instance
|
||||
|
||||
if kwargs.get('canonical') and instance.receptor_addresses.filter(canonical=True).exclude(address=kwargs['address']).exists():
|
||||
print(f"Instance {instance.hostname} already has a canonical address, skipping")
|
||||
return False
|
||||
# if ReceptorAddress already exists with address, just update
|
||||
# otherwise, create new ReceptorAddress
|
||||
addr, _ = ReceptorAddress.objects.update_or_create(address=kwargs.pop('address'), defaults=kwargs)
|
||||
print(f"Successfully added receptor address {addr.get_full_address()}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error adding receptor address: {e}")
|
||||
return False
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Internal controller command.
|
||||
Register receptor address to an already-registered instance.
|
||||
"""
|
||||
|
||||
help = "Add receptor address to an instance."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--instance', dest='instance', required=True, type=str, help="Instance hostname this address is added to")
|
||||
parser.add_argument('--address', dest='address', required=True, type=str, help="Receptor address")
|
||||
parser.add_argument('--port', dest='port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--websocket_path', dest='websocket_path', type=str, default="", help="Path for websockets")
|
||||
parser.add_argument('--is_internal', action='store_true', help="If true, address only resolvable within the Kubernetes cluster")
|
||||
parser.add_argument('--protocol', type=str, default='tcp', choices=['tcp', 'ws', 'wss'], help="Protocol to use for the Receptor listener")
|
||||
parser.add_argument('--canonical', action='store_true', help="If true, address is the canonical address for the instance")
|
||||
parser.add_argument('--peers_from_control_nodes', action='store_true', help="If true, control nodes will peer to this address")
|
||||
|
||||
def handle(self, **options):
|
||||
address_options = {
|
||||
k: options[k]
|
||||
for k in ('instance', 'address', 'port', 'websocket_path', 'is_internal', 'protocol', 'peers_from_control_nodes', 'canonical')
|
||||
if options[k]
|
||||
}
|
||||
changed = add_address(**address_options)
|
||||
if changed:
|
||||
print("(changed: True)")
|
||||
12
awx/main/management/commands/check_instance_ready.py
Normal file
12
awx/main/management/commands/check_instance_ready.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Check if the task manager instance is ready throw error if not ready, can be use as readiness probe for k8s.'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if Instance.objects.me().node_state != Instance.States.READY:
|
||||
raise CommandError('Instance is not ready') # so that return code is not 0
|
||||
|
||||
return
|
||||
@@ -23,7 +23,10 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
pks_to_delete.add(asobj.pk)
|
||||
# Cleanup objects in batches instead of deleting each one individually.
|
||||
if len(pks_to_delete) >= 500:
|
||||
if len(pks_to_delete) >= self.batch_size:
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
pks_to_delete.clear()
|
||||
@@ -63,4 +66,5 @@ class Command(BaseCommand):
|
||||
self.days = int(options.get('days', 30))
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 500))
|
||||
self.cleanup_activitystream()
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
This command provides cleanup task for HostMetric model.
|
||||
There are two modes, which run in following order:
|
||||
- soft cleanup
|
||||
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||
- - updates columns delete, deleted_counter and last_deleted
|
||||
- hard cleanup
|
||||
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||
This operation happens after the soft deletion has finished.
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
help = 'Run soft and hard-deletion of HostMetrics'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction, connection
|
||||
from django.db.models import Min, Max
|
||||
@@ -17,10 +18,7 @@ from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification
|
||||
|
||||
|
||||
def unified_job_class_to_event_table_name(job_class):
|
||||
return f'main_{job_class().event_class.__name__.lower()}'
|
||||
from awx.main.utils import unified_job_class_to_event_table_name
|
||||
|
||||
|
||||
def partition_table_name(job_class, dt):
|
||||
@@ -152,7 +150,10 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
|
||||
)
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
@@ -198,18 +199,58 @@ class Command(BaseCommand):
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
def has_unpartitioned_table(self, model):
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _delete_unpartitioned_table(self, model):
|
||||
"If the unpartitioned table is no longer necessary, it will drop the table"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
if not self.has_unpartitioned_table(model):
|
||||
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
|
||||
return
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
|
||||
row = cursor.fetchone()
|
||||
last_created = row[0]
|
||||
|
||||
if last_created:
|
||||
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||
else:
|
||||
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||
|
||||
if (last_created is None) or (last_created < self.cutoff):
|
||||
self.logger.warning(
|
||||
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
|
||||
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
|
||||
)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
|
||||
|
||||
def _delete_unpartitioned_events(self, model, pk_list):
|
||||
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
rel_name = model().event_parent_key
|
||||
|
||||
# Bail if the unpartitioned table does not exist anymore
|
||||
if not self.has_unpartitioned_table(model):
|
||||
return
|
||||
|
||||
# Table still exists, delete individual unpartitioned events
|
||||
if pk_list:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
batch_size = 100000
|
||||
|
||||
# Hack to avoid doing N+1 queries as each item in the Job query set does
|
||||
# an individual query to get the underlying UnifiedJob.
|
||||
Job.polymorphic_super_sub_accessors_replaced = True
|
||||
@@ -224,13 +265,14 @@ class Command(BaseCommand):
|
||||
deleted = 0
|
||||
info = qs.aggregate(min=Min('id'), max=Max('id'))
|
||||
if info['min'] is not None:
|
||||
for start in range(info['min'], info['max'] + 1, batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
|
||||
for start in range(info['min'], info['max'] + 1, self.batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
|
||||
pk_list = qs_batch.values_list('id', flat=True)
|
||||
|
||||
_, results = qs_batch.delete()
|
||||
deleted += results['main.Job']
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
# Avoid dropping the job event table in case we have interacted with it already
|
||||
self._delete_unpartitioned_events(Job, pk_list)
|
||||
|
||||
return skipped, deleted
|
||||
|
||||
@@ -253,7 +295,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
self._delete_unpartitioned_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -281,7 +323,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -309,7 +351,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -333,7 +375,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
self._delete_unpartitioned_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -378,12 +420,12 @@ class Command(BaseCommand):
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.days = int(options.get('days', 90))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 100000))
|
||||
try:
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
@@ -405,19 +447,29 @@ class Command(BaseCommand):
|
||||
del s.receivers[:]
|
||||
s.sender_receivers_cache.clear()
|
||||
|
||||
for m in model_names:
|
||||
if m not in models_to_cleanup:
|
||||
continue
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
|
||||
if not self.dry_run:
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
|
||||
unified_job_class = apps.get_model('main', unified_job_class_name)
|
||||
try:
|
||||
unified_job_class().event_class
|
||||
except (NotImplementedError, AttributeError):
|
||||
continue # no need to run this for models without events
|
||||
self._delete_unpartitioned_table(unified_job_class)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user