mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
1022 Commits
20.1.0
...
test_cyber
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c437a37be7 | ||
|
|
b59cee97d8 | ||
|
|
9ca554ce75 | ||
|
|
81e20c727d | ||
|
|
8e5af2b5f2 | ||
|
|
918db89dc8 | ||
|
|
6e25a552d3 | ||
|
|
83c48bb5fa | ||
|
|
1c65339a24 | ||
|
|
75e6366c5e | ||
|
|
af6fec5592 | ||
|
|
893dba7076 | ||
|
|
b28cc34ff3 | ||
|
|
776d39f057 | ||
|
|
61b242d194 | ||
|
|
22b81f5dd3 | ||
|
|
99e1920d42 | ||
|
|
2218fd5c25 | ||
|
|
bd7635e74e | ||
|
|
0faa999ceb | ||
|
|
1bedf32baf | ||
|
|
577f102e53 | ||
|
|
c5cf39abb7 | ||
|
|
6b315f39de | ||
|
|
529a936d0a | ||
|
|
6538d34b48 | ||
|
|
e40824bded | ||
|
|
ed318ea784 | ||
|
|
d2b69e05f6 | ||
|
|
b57ae592ed | ||
|
|
f3482f4038 | ||
|
|
e22f887765 | ||
|
|
fc838ba44b | ||
|
|
b19aa4a88d | ||
|
|
eba24db74c | ||
|
|
153a197fad | ||
|
|
8f4c329c2a | ||
|
|
368eb46f5b | ||
|
|
d6fea77082 | ||
|
|
878035c13b | ||
|
|
2cc971a43f | ||
|
|
9d77c54612 | ||
|
|
546fabbb97 | ||
|
|
ef651a3a21 | ||
|
|
aaf6f5f17e | ||
|
|
68862d5085 | ||
|
|
3303f7bfcf | ||
|
|
95dba81a9d | ||
|
|
66c7d5e9be | ||
|
|
4b308d313a | ||
|
|
d80db763bc | ||
|
|
41fd6ea37f | ||
|
|
4808a0053f | ||
|
|
de41601f27 | ||
|
|
ddd09461fb | ||
|
|
6d192927ae | ||
|
|
487efb77ce | ||
|
|
e655e1dbc2 | ||
|
|
e41f20320a | ||
|
|
192f45bbd0 | ||
|
|
e013d25e2d | ||
|
|
8a6ad47ca5 | ||
|
|
cba780a8f8 | ||
|
|
3fc67dc76c | ||
|
|
6f85aef5fe | ||
|
|
4d9b8400da | ||
|
|
4a7335676d | ||
|
|
eeb9d61488 | ||
|
|
234ce529fc | ||
|
|
4f36943b47 | ||
|
|
25737ba7c6 | ||
|
|
7127d18072 | ||
|
|
e5c834383c | ||
|
|
b9c9800210 | ||
|
|
c94dc08cf3 | ||
|
|
a0594c8948 | ||
|
|
ab5ea46006 | ||
|
|
6b471e468c | ||
|
|
50614b961e | ||
|
|
a2be320605 | ||
|
|
8a959e9586 | ||
|
|
1db189c7ee | ||
|
|
39c2fcd8c2 | ||
|
|
da857ea334 | ||
|
|
d50c97ae22 | ||
|
|
0f150aa3b3 | ||
|
|
cdb51a75b8 | ||
|
|
22b6ae6903 | ||
|
|
871175f97f | ||
|
|
e6497be200 | ||
|
|
3b9333be9f | ||
|
|
04b814cfd8 | ||
|
|
bb2e5cba0a | ||
|
|
42a4e9f10f | ||
|
|
882d2fdbe8 | ||
|
|
0d69d40859 | ||
|
|
2e38bbcbcd | ||
|
|
6f741b909a | ||
|
|
bbb00e0674 | ||
|
|
560b952dd6 | ||
|
|
62c773e912 | ||
|
|
fd38c926b2 | ||
|
|
7a8874b947 | ||
|
|
150c55c72a | ||
|
|
417ac3b88c | ||
|
|
9e0d1a678c | ||
|
|
1a766c09e7 | ||
|
|
7849c0fb1e | ||
|
|
35a7e43f22 | ||
|
|
47a6a73fc5 | ||
|
|
805091cfc1 | ||
|
|
8d05e339ae | ||
|
|
8472e3a26d | ||
|
|
174121cdbe | ||
|
|
385a2eabce | ||
|
|
a64467c5a6 | ||
|
|
58772d79c7 | ||
|
|
235ed2f0d0 | ||
|
|
03eaeac459 | ||
|
|
63fd18edcb | ||
|
|
208254ab81 | ||
|
|
aae57378f0 | ||
|
|
a4fba37222 | ||
|
|
3a09522d3e | ||
|
|
b5db710c8b | ||
|
|
534763727f | ||
|
|
b964905c80 | ||
|
|
37717ce3d5 | ||
|
|
8333b0cf66 | ||
|
|
d1588b94b0 | ||
|
|
2dcc7ec749 | ||
|
|
2d756959d3 | ||
|
|
e6518a1d1c | ||
|
|
84d00722b9 | ||
|
|
a95a76ec56 | ||
|
|
420b3c8b84 | ||
|
|
5ba0bf3a64 | ||
|
|
7031753a6d | ||
|
|
6415671d93 | ||
|
|
e5fd42c4da | ||
|
|
0f675cd375 | ||
|
|
a85268f74a | ||
|
|
0983bd8dc0 | ||
|
|
87c65c9997 | ||
|
|
1b46805373 | ||
|
|
d48e31b928 | ||
|
|
ea51e137eb | ||
|
|
d9f5193a18 | ||
|
|
710b02a443 | ||
|
|
e7c75f3510 | ||
|
|
5b5aac675b | ||
|
|
6b0618b244 | ||
|
|
ceea0a0a39 | ||
|
|
6b86c450b1 | ||
|
|
8e83f9b134 | ||
|
|
d3eb2c1975 | ||
|
|
1a696c4f25 | ||
|
|
34501fee24 | ||
|
|
5aa55d7347 | ||
|
|
65179d9cd0 | ||
|
|
42109fb45a | ||
|
|
ca46aec483 | ||
|
|
2e9956c9fc | ||
|
|
5648d9d96f | ||
|
|
2b2ddb68cf | ||
|
|
12e8608f98 | ||
|
|
eaad749cc9 | ||
|
|
4ffa577d05 | ||
|
|
7143777638 | ||
|
|
cc6eaa7f44 | ||
|
|
5551874352 | ||
|
|
84fa19f2ad | ||
|
|
c101619d08 | ||
|
|
cdd2282282 | ||
|
|
6e57bc47aa | ||
|
|
a1a4f26f19 | ||
|
|
fb4a7373a1 | ||
|
|
9c2185c68f | ||
|
|
a66b27edff | ||
|
|
80a0842df1 | ||
|
|
2dcb127d4e | ||
|
|
790998335c | ||
|
|
2dd2931ab2 | ||
|
|
e83a4d7234 | ||
|
|
88f0ab0233 | ||
|
|
3ad7913353 | ||
|
|
795569227a | ||
|
|
93f50b5211 | ||
|
|
c53228daf5 | ||
|
|
5b7a359c91 | ||
|
|
01b41afa0f | ||
|
|
bf8ba63860 | ||
|
|
ba26909dc5 | ||
|
|
7d645c8ff6 | ||
|
|
b879cbc2ec | ||
|
|
af8b5243a3 | ||
|
|
4bf612851f | ||
|
|
ada0d45654 | ||
|
|
c153ac9d3b | ||
|
|
78cc9fb019 | ||
|
|
301807466d | ||
|
|
e0c9013d9c | ||
|
|
9c6aa93093 | ||
|
|
4a41098b24 | ||
|
|
0510978516 | ||
|
|
6009d98163 | ||
|
|
532ad777a3 | ||
|
|
b4edfc24ac | ||
|
|
0e578534fa | ||
|
|
6619cc39f7 | ||
|
|
d4b25058cd | ||
|
|
c1ba769b20 | ||
|
|
fd10d83893 | ||
|
|
b1168ce77d | ||
|
|
1fde9c4f0c | ||
|
|
03685e51b5 | ||
|
|
08c18d71bf | ||
|
|
dfe6ce1ba8 | ||
|
|
eaa4f2483f | ||
|
|
68a44529b6 | ||
|
|
25afb8477e | ||
|
|
f3a9d4db07 | ||
|
|
cb49eec2b5 | ||
|
|
3333080616 | ||
|
|
e2b9352dad | ||
|
|
da945eed93 | ||
|
|
ebd200380a | ||
|
|
1b650d6927 | ||
|
|
b6946c7e35 | ||
|
|
0b1891d82a | ||
|
|
3bc86ca8cb | ||
|
|
dba03616f4 | ||
|
|
a59aa44249 | ||
|
|
3b024a057f | ||
|
|
e1c33935fb | ||
|
|
8ebeeaf148 | ||
|
|
28f24c8811 | ||
|
|
89a6162dcd | ||
|
|
7e627e1d1e | ||
|
|
0465a10df5 | ||
|
|
5051224781 | ||
|
|
7956fc3c31 | ||
|
|
9b034ad574 | ||
|
|
4bf9925cf7 | ||
|
|
d2c63a9b36 | ||
|
|
5d3a19e542 | ||
|
|
e4518f7b13 | ||
|
|
350efc12f5 | ||
|
|
604fac2295 | ||
|
|
24bfacb654 | ||
|
|
3bcd539b3d | ||
|
|
81e68cb9bf | ||
|
|
a575f17db5 | ||
|
|
2fba3db48f | ||
|
|
ff6fb32297 | ||
|
|
4c64fb3323 | ||
|
|
1cfbc02d98 | ||
|
|
e231e08869 | ||
|
|
e069150fbf | ||
|
|
61093b2532 | ||
|
|
23f4f7bb00 | ||
|
|
816e491d17 | ||
|
|
dca27b59c9 | ||
|
|
7de5f77262 | ||
|
|
86e7151508 | ||
|
|
75597cf29c | ||
|
|
d07177be9c | ||
|
|
b38e08174a | ||
|
|
b501b30db4 | ||
|
|
64dad61b29 | ||
|
|
2369dc9621 | ||
|
|
ef90adb67e | ||
|
|
a528a78e0e | ||
|
|
ffe970aee5 | ||
|
|
4579ab0d60 | ||
|
|
efeeeefd4c | ||
|
|
c1b20a8ba7 | ||
|
|
2a30a9b10f | ||
|
|
34e8087aee | ||
|
|
ead56bfa1b | ||
|
|
d63c940e2f | ||
|
|
e05eaeccab | ||
|
|
e076f1ee2a | ||
|
|
68e11d2b81 | ||
|
|
697193d3d6 | ||
|
|
4f5596eb0c | ||
|
|
42a7866da9 | ||
|
|
809df74050 | ||
|
|
2e217ed466 | ||
|
|
d5d24e421b | ||
|
|
663ef2cc64 | ||
|
|
4e665ca77f | ||
|
|
33c0fb79d6 | ||
|
|
04d0e3915c | ||
|
|
8e2003a36b | ||
|
|
a27680f7e9 | ||
|
|
4f52343cd9 | ||
|
|
4072b2786a | ||
|
|
d0b95c063b | ||
|
|
948d300f43 | ||
|
|
1b9326888e | ||
|
|
d67aef9d8e | ||
|
|
358024d029 | ||
|
|
9df447fe75 | ||
|
|
7e7991bb63 | ||
|
|
35e9d00beb | ||
|
|
461b5221f3 | ||
|
|
10d06f219d | ||
|
|
ecc4f46334 | ||
|
|
a227fea5ef | ||
|
|
3f4d0bc15d | ||
|
|
0812425671 | ||
|
|
94344c0214 | ||
|
|
16da9b784a | ||
|
|
1e952bab95 | ||
|
|
484db004db | ||
|
|
7465d7685f | ||
|
|
15fd5559a7 | ||
|
|
f0c125efb3 | ||
|
|
2d39b81e12 | ||
|
|
1044d34d98 | ||
|
|
63567fcc52 | ||
|
|
492ef6cf64 | ||
|
|
9041dc9dcd | ||
|
|
78973f845b | ||
|
|
cea8c16064 | ||
|
|
e7c97923a3 | ||
|
|
078c3ae6d8 | ||
|
|
1ab3dba476 | ||
|
|
15964dc395 | ||
|
|
b83b65da16 | ||
|
|
430f1986c7 | ||
|
|
c589f8776c | ||
|
|
82679ce9a3 | ||
|
|
6d2e28bfb0 | ||
|
|
7a4da5a8fa | ||
|
|
c475a7b6c0 | ||
|
|
32bb603554 | ||
|
|
8d71292d1a | ||
|
|
e896dc1aa7 | ||
|
|
f5a2246817 | ||
|
|
c467b6ea13 | ||
|
|
1636f6b196 | ||
|
|
5da528ffbb | ||
|
|
2e65ae49a5 | ||
|
|
d06bc815f8 | ||
|
|
0290784f9b | ||
|
|
1cc52afc42 | ||
|
|
88f7f987cd | ||
|
|
f512971991 | ||
|
|
53de245877 | ||
|
|
749622427c | ||
|
|
725d6fa896 | ||
|
|
a107bb684c | ||
|
|
ccbc8ce7de | ||
|
|
260e1d4f2d | ||
|
|
1afa49f3ff | ||
|
|
6f88ea1dc7 | ||
|
|
c59bbdecdb | ||
|
|
f9428c10b9 | ||
|
|
1ca054f43d | ||
|
|
374f76b527 | ||
|
|
f9dd5e0f1c | ||
|
|
bb7509498e | ||
|
|
8a06ffbe15 | ||
|
|
8ad948f268 | ||
|
|
73f808dee7 | ||
|
|
fecab52f86 | ||
|
|
609c67d85e | ||
|
|
0005d249c0 | ||
|
|
8828ea706e | ||
|
|
4070ef3f33 | ||
|
|
39f6e2fa32 | ||
|
|
1dfdff4a9e | ||
|
|
310e354164 | ||
|
|
dda2931e60 | ||
|
|
6d207d2490 | ||
|
|
01037fa561 | ||
|
|
61f3e5cbed | ||
|
|
44995e944a | ||
|
|
4a92fcfc62 | ||
|
|
d3f15f5784 | ||
|
|
2437a84b48 | ||
|
|
696f099940 | ||
|
|
3f0f538c40 | ||
|
|
66529d0f70 | ||
|
|
974f845059 | ||
|
|
f6b3413a11 | ||
|
|
b4ef687b60 | ||
|
|
2ef531b2dc | ||
|
|
125801ec5b | ||
|
|
691d9d7dc4 | ||
|
|
5ca898541f | ||
|
|
24821ff030 | ||
|
|
99815f8962 | ||
|
|
d752e6ce6d | ||
|
|
457dd890cb | ||
|
|
4fbf5e9e2f | ||
|
|
687b4ac71d | ||
|
|
a1b364f80c | ||
|
|
271938c5fc | ||
|
|
ff49cc5636 | ||
|
|
9946e644c8 | ||
|
|
1ed7a50755 | ||
|
|
9f3396d867 | ||
|
|
bcd018707a | ||
|
|
a462978433 | ||
|
|
6d11003975 | ||
|
|
017e474325 | ||
|
|
5d717af778 | ||
|
|
8d08ac559d | ||
|
|
4e24867a0b | ||
|
|
2b4b8839d1 | ||
|
|
dba33f9ef5 | ||
|
|
db2649d7ba | ||
|
|
edc3da85cc | ||
|
|
2357e24d1d | ||
|
|
e4d1056450 | ||
|
|
37d9c9eb1b | ||
|
|
d42a85714a | ||
|
|
88bf03c6bf | ||
|
|
4b8a56be39 | ||
|
|
2aa99234f4 | ||
|
|
bf9f1b1d56 | ||
|
|
704e4781d9 | ||
|
|
4a8613ce4c | ||
|
|
e87fabe6bb | ||
|
|
532aa83555 | ||
|
|
d87bb973d5 | ||
|
|
a72da3bd1a | ||
|
|
56df3f0c2a | ||
|
|
e0c59d12c1 | ||
|
|
7645cc2707 | ||
|
|
6719010050 | ||
|
|
ccd46a1c0f | ||
|
|
cc1e349ea8 | ||
|
|
e509d5f1de | ||
|
|
4fca27c664 | ||
|
|
51be22aebd | ||
|
|
54b21e5872 | ||
|
|
85beb9eb70 | ||
|
|
56739ac246 | ||
|
|
1ea3c564df | ||
|
|
621833ef0e | ||
|
|
16be38bb54 | ||
|
|
c5976e2584 | ||
|
|
3c51cb130f | ||
|
|
c649809eb2 | ||
|
|
43a53f41dd | ||
|
|
a3fef27002 | ||
|
|
cfc1255812 | ||
|
|
278db2cdde | ||
|
|
64157f7207 | ||
|
|
9e8ba6ca09 | ||
|
|
268ab128d7 | ||
|
|
fad5934c1e | ||
|
|
c9e3873a28 | ||
|
|
6a19aabd44 | ||
|
|
11e63e2e89 | ||
|
|
7c885dcadb | ||
|
|
b84a192bad | ||
|
|
35afb10add | ||
|
|
13fc845bcc | ||
|
|
f1bd1f1dfc | ||
|
|
67c9e1a0cb | ||
|
|
f6da9a5073 | ||
|
|
38a0950f46 | ||
|
|
55d295c2a6 | ||
|
|
be45919ee4 | ||
|
|
0a4a9f96c2 | ||
|
|
1ae1da3f9c | ||
|
|
cae2c06190 | ||
|
|
993dd61024 | ||
|
|
ea07aef73e | ||
|
|
268a4ad32d | ||
|
|
3712af4df8 | ||
|
|
8cf75fce8c | ||
|
|
46be2d9e5b | ||
|
|
998000bfbe | ||
|
|
43a50cc62c | ||
|
|
30f556f845 | ||
|
|
c5985c4c81 | ||
|
|
a9170236e1 | ||
|
|
85a5b58d18 | ||
|
|
6fb3c8daa8 | ||
|
|
a0103acbef | ||
|
|
f7e6a32444 | ||
|
|
7bbc256ff1 | ||
|
|
64f62d6755 | ||
|
|
b4cfe868fb | ||
|
|
8d8681580d | ||
|
|
8892cf2622 | ||
|
|
585d3f4e2a | ||
|
|
2c9a0444e6 | ||
|
|
279cebcef3 | ||
|
|
e6f8852b05 | ||
|
|
d06a3f060d | ||
|
|
957b2b7188 | ||
|
|
b94b3a1e91 | ||
|
|
7776a81e22 | ||
|
|
bf89093fac | ||
|
|
76d76d13b0 | ||
|
|
e603c23b40 | ||
|
|
8af4dd5988 | ||
|
|
0a47d05d26 | ||
|
|
b3eb9e0193 | ||
|
|
b26d2ab0e9 | ||
|
|
7eb0c7dd28 | ||
|
|
236c1df676 | ||
|
|
ff118f2177 | ||
|
|
29d91da1d2 | ||
|
|
ad08eafb9a | ||
|
|
431b9370df | ||
|
|
3e93eefe62 | ||
|
|
782667a34e | ||
|
|
90524611ea | ||
|
|
583086ae62 | ||
|
|
19c24cba10 | ||
|
|
5290c692c1 | ||
|
|
90a19057d5 | ||
|
|
a05c328081 | ||
|
|
6d9e353a4e | ||
|
|
82c062eab9 | ||
|
|
c0d59801d5 | ||
|
|
93ea8a0919 | ||
|
|
67f1ab2237 | ||
|
|
71be8fadcb | ||
|
|
c41becec13 | ||
|
|
6d0d8e57a4 | ||
|
|
6446b627ad | ||
|
|
fcebd188a6 | ||
|
|
1fca505b61 | ||
|
|
a0e9c30b4a | ||
|
|
bc94dc0257 | ||
|
|
65771b7629 | ||
|
|
86a67abbce | ||
|
|
d555093325 | ||
|
|
95a099acc5 | ||
|
|
d1fc2702ec | ||
|
|
3aa8320fc7 | ||
|
|
734899228b | ||
|
|
87f729c642 | ||
|
|
62fc3994fb | ||
|
|
0d097964be | ||
|
|
9f8b3948e1 | ||
|
|
1ce8240192 | ||
|
|
1bcfc8f28e | ||
|
|
71925de902 | ||
|
|
54057f1c80 | ||
|
|
ae388d943d | ||
|
|
2d310dc4e5 | ||
|
|
fe1a767f4f | ||
|
|
8c6581d80a | ||
|
|
33e445f4f6 | ||
|
|
9bcb60d9e0 | ||
|
|
40109d58c7 | ||
|
|
2ef3f5f9e8 | ||
|
|
389c4a3180 | ||
|
|
bee48671cd | ||
|
|
21f551f48a | ||
|
|
cbb019ed09 | ||
|
|
bf5dfdaba7 | ||
|
|
0f7f8af9b8 | ||
|
|
0237402390 | ||
|
|
84d7fa882d | ||
|
|
cd2fae3471 | ||
|
|
8be64145f9 | ||
|
|
23d28fb4c8 | ||
|
|
aeffd6f393 | ||
|
|
ab6b4bad03 | ||
|
|
769c253ac2 | ||
|
|
8031b3d402 | ||
|
|
bd93ac7edd | ||
|
|
37ff9913d3 | ||
|
|
9cb44a7e52 | ||
|
|
6279295541 | ||
|
|
de17cff39c | ||
|
|
22ca49e673 | ||
|
|
008a4b4d30 | ||
|
|
8d4089c7f3 | ||
|
|
e296d0adad | ||
|
|
df38650aee | ||
|
|
401b30b3ed | ||
|
|
20cc54694c | ||
|
|
e6ec0952fb | ||
|
|
db1dec3a98 | ||
|
|
1853d3850e | ||
|
|
a8e3c37bb9 | ||
|
|
1e57c84383 | ||
|
|
3cf120c6a7 | ||
|
|
fd671ecc9d | ||
|
|
a0d5f1fb03 | ||
|
|
ff882a322b | ||
|
|
b70231f7d0 | ||
|
|
93d1aa0a9d | ||
|
|
c586f8bbc6 | ||
|
|
26912a06d1 | ||
|
|
218a3d333b | ||
|
|
d2013bd416 | ||
|
|
6a3f9690b0 | ||
|
|
d59b6f834c | ||
|
|
cbea36745e | ||
|
|
ae7be525e1 | ||
|
|
5062ce1e61 | ||
|
|
566665ee8c | ||
|
|
96423af160 | ||
|
|
a01bef8d2c | ||
|
|
0522233892 | ||
|
|
63ea6bb5b3 | ||
|
|
c2715d7c29 | ||
|
|
783b744bdb | ||
|
|
f7982a0d64 | ||
|
|
2147ac226e | ||
|
|
6cc22786bc | ||
|
|
861a9f581e | ||
|
|
e57a8183ba | ||
|
|
8a7163ffad | ||
|
|
439b351c95 | ||
|
|
14afab918e | ||
|
|
ef8d4e73ae | ||
|
|
61f483ae32 | ||
|
|
21bed7473d | ||
|
|
31d8ddcf84 | ||
|
|
9419270897 | ||
|
|
f755d93a58 | ||
|
|
05df2ebad2 | ||
|
|
b44442c460 | ||
|
|
989b389ba4 | ||
|
|
5bd4aade0e | ||
|
|
470910b612 | ||
|
|
dbb81551c8 | ||
|
|
f7c5cb2979 | ||
|
|
babd6f0975 | ||
|
|
7bcceb7e98 | ||
|
|
c92619a2dc | ||
|
|
923cc671db | ||
|
|
db105c21e4 | ||
|
|
372aa36207 | ||
|
|
173318764b | ||
|
|
1dd535a859 | ||
|
|
e7d37b26f3 | ||
|
|
f4ef7d6927 | ||
|
|
7cbe112e4e | ||
|
|
c441db2aab | ||
|
|
fb292d9706 | ||
|
|
35a5f93182 | ||
|
|
116dc0c480 | ||
|
|
b87ba1c53d | ||
|
|
59691b71bb | ||
|
|
cc0bb3e401 | ||
|
|
7ef90bd9f4 | ||
|
|
f820c49b82 | ||
|
|
ac62d86f2a | ||
|
|
b9e67e7972 | ||
|
|
48a2ebd48c | ||
|
|
ee13ddd87d | ||
|
|
3fcf7429a3 | ||
|
|
51a8790d56 | ||
|
|
c231e4d05e | ||
|
|
987e5a084d | ||
|
|
70ac7b2920 | ||
|
|
bda335cb19 | ||
|
|
30c060cb27 | ||
|
|
9b0a2b0b76 | ||
|
|
2f82b75748 | ||
|
|
84fcd2ff00 | ||
|
|
3bc0c53e37 | ||
|
|
bc2dbcfce8 | ||
|
|
876edf54a3 | ||
|
|
b31bf8fab1 | ||
|
|
e8b2998578 | ||
|
|
8a92a01652 | ||
|
|
705f86f8cf | ||
|
|
9ab6a6d57e | ||
|
|
791eb4c1e1 | ||
|
|
870ca29388 | ||
|
|
816518cfab | ||
|
|
9e981583a6 | ||
|
|
d6fb8d6cd7 | ||
|
|
7dbf5f7138 | ||
|
|
aaec9487e6 | ||
|
|
96fa881df1 | ||
|
|
b7057fdc3e | ||
|
|
2679c99cad | ||
|
|
ea3a8d4912 | ||
|
|
63d9cd7b57 | ||
|
|
b692bbaa12 | ||
|
|
186af73e5d | ||
|
|
fddf292d47 | ||
|
|
1180634ba7 | ||
|
|
9abdafe101 | ||
|
|
48ebcd5918 | ||
|
|
fe6d0ce9cc | ||
|
|
62dabcae63 | ||
|
|
0b63af8d4d | ||
|
|
b05ebe9623 | ||
|
|
c836fafb61 | ||
|
|
96330f608d | ||
|
|
23aaf5b3ad | ||
|
|
a3e86dcd73 | ||
|
|
81b8028ea2 | ||
|
|
a4bfb032ff | ||
|
|
2704b202bf | ||
|
|
550d9d5e42 | ||
|
|
ab2d05a07d | ||
|
|
4543f6935f | ||
|
|
78d3d6dc94 | ||
|
|
02e7424f51 | ||
|
|
2d6ca4cbb1 | ||
|
|
e244644a1d | ||
|
|
d216457c09 | ||
|
|
20a1da61c0 | ||
|
|
bf7ab1ede7 | ||
|
|
3b6b449545 | ||
|
|
781cf531e6 | ||
|
|
9b7475247c | ||
|
|
44dc7f8d1d | ||
|
|
60eaf9e235 | ||
|
|
f5102ed24d | ||
|
|
309178e4e2 | ||
|
|
76ffdbb993 | ||
|
|
d8037618c8 | ||
|
|
e94e15977c | ||
|
|
f37951249f | ||
|
|
9191079dda | ||
|
|
fdd560747d | ||
|
|
faa5df19ca | ||
|
|
5f9326b131 | ||
|
|
8e389d40b4 | ||
|
|
e62c77e783 | ||
|
|
48b3a43ec2 | ||
|
|
5f783fd5ee | ||
|
|
e112cf93c2 | ||
|
|
d9f26a411e | ||
|
|
ea84e7a491 | ||
|
|
7fab619fed | ||
|
|
699a35b88a | ||
|
|
8095adb945 | ||
|
|
8d36712860 | ||
|
|
0db34d0498 | ||
|
|
7ab254e5e3 | ||
|
|
dd7ab459e2 | ||
|
|
33df2e8aa4 | ||
|
|
39b8fd433b | ||
|
|
c31d74100d | ||
|
|
3af89c1e2b | ||
|
|
1d35bba8c3 | ||
|
|
c3c3e24875 | ||
|
|
ab9c97b158 | ||
|
|
5e700c992d | ||
|
|
b548ad21a9 | ||
|
|
127016d36b | ||
|
|
3d0391173b | ||
|
|
ce560bcd5f | ||
|
|
d553c37d7d | ||
|
|
8a5e89e24b | ||
|
|
8c3e289170 | ||
|
|
9364c8e562 | ||
|
|
5831949ebf | ||
|
|
7fe98a670f | ||
|
|
6f68f3cba6 | ||
|
|
4dc956c76f | ||
|
|
11a56117eb | ||
|
|
10eed6286a | ||
|
|
d36befd9ce | ||
|
|
0c4ddc7f6f | ||
|
|
3ef9679de3 | ||
|
|
d36441489a | ||
|
|
d26c12dd7c | ||
|
|
7fa7ed3658 | ||
|
|
2c68e7a3d2 | ||
|
|
0c9b1c3c79 | ||
|
|
e10b0e513e | ||
|
|
68c66edada | ||
|
|
6eb17e7af7 | ||
|
|
9a24da3098 | ||
|
|
8ed0543b8b | ||
|
|
73a84444d1 | ||
|
|
451767c179 | ||
|
|
8366386126 | ||
|
|
997686a2ea | ||
|
|
f02212b1fe | ||
|
|
2ba68ef5d0 | ||
|
|
2041665880 | ||
|
|
1e6ca01686 | ||
|
|
e15a76e7aa | ||
|
|
64db44acef | ||
|
|
9972389a8d | ||
|
|
e0b1274eee | ||
|
|
973facebba | ||
|
|
df649e2c56 | ||
|
|
a778017efb | ||
|
|
6a9305818e | ||
|
|
2669904c72 | ||
|
|
35529b5eeb | ||
|
|
d55ed8713c | ||
|
|
7973f28bed | ||
|
|
8189964cce | ||
|
|
ee4c901dc7 | ||
|
|
78220cad82 | ||
|
|
40279bc6c0 | ||
|
|
f6fb46d99e | ||
|
|
954b32941e | ||
|
|
48b016802c | ||
|
|
35aa5dd79f | ||
|
|
237402068c | ||
|
|
31dda6e9d6 | ||
|
|
bca6e00e37 | ||
|
|
1c9b4af61d | ||
|
|
eba4a3f1c2 | ||
|
|
0ae9fe3624 | ||
|
|
1b662fcca5 | ||
|
|
cfdba959dd | ||
|
|
78660ad0a2 | ||
|
|
70697869d7 | ||
|
|
10e55108ef | ||
|
|
d4223b8877 | ||
|
|
9537d148d7 | ||
|
|
a133a14b70 | ||
|
|
4ca9e9577b | ||
|
|
44986fad36 | ||
|
|
eb2fca86b6 | ||
|
|
458a1fc035 | ||
|
|
6e87b29e92 | ||
|
|
be1d0c525c | ||
|
|
0787cb4fc2 | ||
|
|
19063a2d90 | ||
|
|
e8e2f820d2 | ||
|
|
aaad634483 | ||
|
|
dfa4127bae | ||
|
|
f3725c714a | ||
|
|
cef3ed01ac | ||
|
|
fc1a3f46f9 | ||
|
|
bfa5feb51b | ||
|
|
4c0813bd69 | ||
|
|
9b0b0f2a5f | ||
|
|
e87c121f8f | ||
|
|
65dfc424bc | ||
|
|
dfea9cc526 | ||
|
|
0d97a0364a | ||
|
|
1da57a4a12 | ||
|
|
b73078e9db | ||
|
|
b17f22cd38 | ||
|
|
7b225057ce | ||
|
|
8242078c06 | ||
|
|
a86740c3c9 | ||
|
|
cbde56549d | ||
|
|
385a94866c | ||
|
|
21972c91dd | ||
|
|
36d3f9afdb | ||
|
|
df2d303ab0 | ||
|
|
05eba350b7 | ||
|
|
1e12e12578 | ||
|
|
bbdab82433 | ||
|
|
f7be6b6423 | ||
|
|
ba358eaa4f | ||
|
|
162e09972f | ||
|
|
2cfccdbe16 | ||
|
|
434fa7b7be | ||
|
|
2f8bdf1eab | ||
|
|
e1705738a1 | ||
|
|
4cfb8fe482 | ||
|
|
d52d2af4b4 | ||
|
|
97fd3832d4 | ||
|
|
3cedd0e0bd | ||
|
|
507b1898ce | ||
|
|
e3fe9010b7 | ||
|
|
2c350b8b90 | ||
|
|
d74e258079 | ||
|
|
b03cabd314 | ||
|
|
6a63af83c0 | ||
|
|
452744b67e | ||
|
|
703a68d4fe | ||
|
|
557893e4b0 | ||
|
|
d7051fb6ce | ||
|
|
867c50da19 | ||
|
|
e8d76ec272 | ||
|
|
c102c61532 | ||
|
|
adb2b0da89 | ||
|
|
3610008699 | ||
|
|
3b44838dde | ||
|
|
0205d7deab | ||
|
|
dd47829bdb | ||
|
|
e7e72d13a9 | ||
|
|
4bbdf1ec8a | ||
|
|
4596df449e | ||
|
|
2b0846e8a2 | ||
|
|
ecbb636ba1 | ||
|
|
e3aed9dad4 | ||
|
|
213983a322 | ||
|
|
2977084787 | ||
|
|
b6362a63cc | ||
|
|
7517ba820b | ||
|
|
29d60844a8 | ||
|
|
41b0607d7e | ||
|
|
13f7166a30 | ||
|
|
0cc9b84ead | ||
|
|
68ee4311bf | ||
|
|
6e6c3f676e | ||
|
|
c67f50831b | ||
|
|
50ef234bd6 | ||
|
|
2bef5ce09b | ||
|
|
a49c4796f4 | ||
|
|
9eab9586e5 | ||
|
|
cd35787a86 | ||
|
|
cbe84ff4f3 | ||
|
|
410f38eccf | ||
|
|
b885fc2d86 | ||
|
|
4c93f5794a | ||
|
|
456bb75dcb | ||
|
|
02fd8b0d20 | ||
|
|
fbe6c80f86 | ||
|
|
3d5f302d10 | ||
|
|
856a2c1734 | ||
|
|
4277b73438 | ||
|
|
2888f9f8d0 | ||
|
|
68221cdcbe | ||
|
|
f50501cc2a | ||
|
|
c84fac65e0 | ||
|
|
d64c457b3d | ||
|
|
1bd5a880dc | ||
|
|
47d5a89f40 | ||
|
|
6060e7e29f | ||
|
|
677187a43e | ||
|
|
972cb82d16 | ||
|
|
3102df0bf6 | ||
|
|
cb63d92bbf | ||
|
|
c43424ed09 | ||
|
|
a0ccc8c925 | ||
|
|
47160f0118 | ||
|
|
29702400f1 | ||
|
|
44f0609314 | ||
|
|
689a216726 | ||
|
|
4b45148614 | ||
|
|
c84e603ac5 | ||
|
|
c7049e1a0e | ||
|
|
0b4c3e3046 | ||
|
|
8a5fd11506 | ||
|
|
b565038fdf | ||
|
|
526b1e692a | ||
|
|
c93155132a | ||
|
|
ae7960e9d7 | ||
|
|
3a1268de1e | ||
|
|
10042df309 | ||
|
|
2530ada9d7 | ||
|
|
11890f0eee | ||
|
|
5cb3f31df0 | ||
|
|
ac0624236e | ||
|
|
13eb174c9f | ||
|
|
a3e29317c5 | ||
|
|
75d7cb5bca | ||
|
|
9059dce8af | ||
|
|
1676c02611 | ||
|
|
86a888f0d0 | ||
|
|
816652a8e2 | ||
|
|
c1817ab19e | ||
|
|
b2dcc0d7e9 | ||
|
|
ba5361b25e | ||
|
|
ae826ed19d | ||
|
|
e24fc43a45 | ||
|
|
b719e5771c | ||
|
|
778862fe51 | ||
|
|
30d185a67f | ||
|
|
89c2a4c6ed | ||
|
|
868e811b3f | ||
|
|
f6496c28fe | ||
|
|
81cda0ba74 | ||
|
|
2e9974133a | ||
|
|
49051c4aaf | ||
|
|
e2a89ad8a2 | ||
|
|
f4b0bd68bd | ||
|
|
5a304db840 | ||
|
|
e3044298bf | ||
|
|
bbb9770a97 | ||
|
|
4328b4cb67 | ||
|
|
a324753180 | ||
|
|
1462af61b0 | ||
|
|
8478a0f70b | ||
|
|
8288655b30 | ||
|
|
ac8204427e | ||
|
|
f6b8ce18d0 | ||
|
|
dc42946ff3 | ||
|
|
44cc934c2b | ||
|
|
933956eccb | ||
|
|
27dc8caabd | ||
|
|
4b98df237e | ||
|
|
0fa3ca8dc0 | ||
|
|
0712affa9b | ||
|
|
b646aa03f8 | ||
|
|
4beea35d9e | ||
|
|
e8948a9d6e | ||
|
|
28f25d5aba | ||
|
|
7cbb783b2c | ||
|
|
1793f94f27 | ||
|
|
0b7c9cd8ad | ||
|
|
51b5b78084 | ||
|
|
bea924ddc6 | ||
|
|
b5fcc6e541 | ||
|
|
ffb46fec52 | ||
|
|
4190cf126c | ||
|
|
58721098d5 | ||
|
|
cfd6df7a3b | ||
|
|
9f6fa4cf97 | ||
|
|
7822da03fb | ||
|
|
58cb3d5bdc | ||
|
|
a3c97a51be | ||
|
|
202dc00f4c | ||
|
|
309e58b6d7 | ||
|
|
34b20e26fa | ||
|
|
1de2487e8f | ||
|
|
8d95b72527 | ||
|
|
a920c9cc20 | ||
|
|
427f6d1687 | ||
|
|
dc64168ed4 | ||
|
|
4b913a0ae8 | ||
|
|
6c56f2b35b | ||
|
|
be6657239d | ||
|
|
0caf263508 | ||
|
|
c77667788a | ||
|
|
efb01f3c36 | ||
|
|
b562d5cc88 | ||
|
|
dfde30798e |
@@ -1,3 +1,2 @@
|
||||
awx/ui/node_modules
|
||||
Dockerfile
|
||||
.git
|
||||
|
||||
17
.github/BOTMETA.yml
vendored
17
.github/BOTMETA.yml
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
files:
|
||||
awx/ui/:
|
||||
labels: component:ui
|
||||
maintainers: $team_ui
|
||||
awx/api/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
awx/main/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
installer/:
|
||||
labels: component:installer
|
||||
|
||||
macros:
|
||||
team_api: wwitzel3 matburt chrismeyersfsu cchurch AlanCoding ryanpetrello rooftopcellist
|
||||
team_ui: jlmitch5 jaredevantabor mabashian marshmalien benthomasson jakemcdermott
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
workflows/e2e_test.yml @tiagodread @shanemcd @jakemcdermott
|
||||
26
.github/ISSUE_TEMPLATE.md
vendored
26
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,17 +6,37 @@ practices regarding responsible disclosure, see
|
||||
https://www.ansible.com/security
|
||||
-->
|
||||
|
||||
|
||||
|
||||
<!--
|
||||
|
||||
PLEASE DO NOT USE A BLANK TEMPLATE IN THE AWX REPO.
|
||||
This is a legacy template used for internal testing ONLY.
|
||||
|
||||
Any issues opened will this template will be automatically closed.
|
||||
|
||||
Instead use the bug or feature request.
|
||||
|
||||
-->
|
||||
|
||||
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Bug Report
|
||||
- Feature Idea
|
||||
- Documentation
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug, Docs Fix or other nominal change
|
||||
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- Docs
|
||||
- CLI
|
||||
- Other
|
||||
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,13 +1,12 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
description: "🐞 Create a report to help us improve"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Issues are for **concrete, actionable bugs and feature requests** only. For debugging help or technical support, please use:
|
||||
- The #ansible-awx channel on irc.libera.chat
|
||||
- The awx project mailing list, https://groups.google.com/forum/#!forum/awx-project
|
||||
Bug Report issues are for **concrete, actionable bugs** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
@@ -24,7 +23,7 @@ body:
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Summary
|
||||
label: Bug Summary
|
||||
description: Briefly describe the problem.
|
||||
validations:
|
||||
required: false
|
||||
@@ -45,6 +44,9 @@ body:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
- type: dropdown
|
||||
id: awx-install-method
|
||||
@@ -57,9 +59,8 @@ body:
|
||||
- minikube
|
||||
- openshift
|
||||
- minishift
|
||||
- docker on linux
|
||||
- docker for mac
|
||||
- boot2docker
|
||||
- docker development environment
|
||||
- N/A
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: For debugging help or technical support
|
||||
url: https://github.com/ansible/awx#get-involved
|
||||
about: For general debugging or technical support please see the Get Involved section of our readme.
|
||||
- name: 📝 Ansible Code of Conduct
|
||||
url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: AWX uses the Ansible Code of Conduct; ❤ Be nice to other members of the community. ☮ Behave.
|
||||
- name: 💼 For Enterprise
|
||||
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser
|
||||
about: Red Hat offers support for the Ansible Automation Platform
|
||||
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,17 +0,0 @@
|
||||
---
|
||||
name: "✨ Feature request"
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
|
||||
|
||||
- http://web.libera.chat/?channels=#ansible-awx
|
||||
- https://groups.google.com/forum/#!forum/awx-project
|
||||
|
||||
We have to limit this because of limited volunteer time to respond to issues! -->
|
||||
|
||||
##### ISSUE TYPE
|
||||
- Feature Idea
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem or desired enhancement. -->
|
||||
88
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
88
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
name: ✨ Feature request
|
||||
description: Suggest an idea for this project
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Feature Request issues are for **feature requests** only.
|
||||
For debugging help or technical support, please see the [Get Involved section of our README](https://github.com/ansible/awx#get-involved)
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
options:
|
||||
- label: I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
|
||||
required: true
|
||||
- label: I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: feature-type
|
||||
attributes:
|
||||
label: Feature type
|
||||
description: >-
|
||||
What kind of feature is this?
|
||||
multiple: false
|
||||
options:
|
||||
- "New Feature"
|
||||
- "Enhancement to Existing Feature"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
attributes:
|
||||
label: Feature Summary
|
||||
description: Briefly describe the desired enhancement.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: checkboxes
|
||||
id: components
|
||||
attributes:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
- label: CLI
|
||||
- label: Other
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: >-
|
||||
Describe the necessary steps to understand the scenario of the requested enhancement.
|
||||
Include all the steps that will help the developer and QE team understand what you are requesting.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: current-results
|
||||
attributes:
|
||||
label: Current results
|
||||
description: What is currently happening on the scenario?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: sugested-results
|
||||
attributes:
|
||||
label: Sugested feature result
|
||||
description: What is the result this new feature will bring?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: additional-information
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Please provide any other information you think is relevant that could help us understand your feature request.
|
||||
validations:
|
||||
required: false
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: "\U0001F525 Security bug report"
|
||||
about: How to report security vulnerabilities
|
||||
|
||||
---
|
||||
|
||||
For all security related bugs, email security@ansible.com instead of using this issue tracker and you will receive a prompt response.
|
||||
|
||||
For more information on the Ansible community's practices regarding responsible disclosure, see https://www.ansible.com/security
|
||||
9
.github/LABEL_MAP.md
vendored
9
.github/LABEL_MAP.md
vendored
@@ -1,9 +0,0 @@
|
||||
Bug Report: type:bug
|
||||
Bugfix Pull Request: type:bug
|
||||
Feature Request: type:enhancement
|
||||
Feature Pull Request: type:enhancement
|
||||
UI: component:ui
|
||||
API: component:api
|
||||
Installer: component:installer
|
||||
Docs Pull Request: component:docs
|
||||
Documentation: component:docs
|
||||
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
17
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,11 +1,3 @@
|
||||
<!--- changelog-entry
|
||||
# Fill in 'msg' below to have an entry automatically added to the next release changelog.
|
||||
# Leaving 'msg' blank will not generate a changelog entry for this PR.
|
||||
# Please ensure this is a simple (and readable) one-line string.
|
||||
---
|
||||
msg: ""
|
||||
-->
|
||||
|
||||
##### SUMMARY
|
||||
<!--- Describe the change, including rationale and design decisions -->
|
||||
|
||||
@@ -17,15 +9,18 @@ the change does.
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Feature Pull Request
|
||||
- Bugfix Pull Request
|
||||
- Docs Pull Request
|
||||
- Breaking Change
|
||||
- New or Enhanced Feature
|
||||
- Bug, Docs Fix or other nominal change
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Collection
|
||||
- CLI
|
||||
- Docs
|
||||
- Other
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
|
||||
19
.github/dependabot.yml
vendored
Normal file
19
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/awx/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 5
|
||||
allow:
|
||||
- dependency-type: "production"
|
||||
reviewers:
|
||||
- "AlexSCorey"
|
||||
- "keithjgrant"
|
||||
- "kialam"
|
||||
- "mabashian"
|
||||
- "marshmalien"
|
||||
labels:
|
||||
- "component:ui"
|
||||
- "dependencies"
|
||||
target-branch: "devel"
|
||||
8
.github/issue_labeler.yml
vendored
8
.github/issue_labeler.yml
vendored
@@ -1,12 +1,16 @@
|
||||
needs_triage:
|
||||
- '.*'
|
||||
"type:bug":
|
||||
- "Please confirm the following"
|
||||
- "Bug Summary"
|
||||
"type:enhancement":
|
||||
- "Feature Idea"
|
||||
- "Feature Summary"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
- "\\[X\\] Docs"
|
||||
"component:awx_collection":
|
||||
- "\\[X\\] Collection"
|
||||
"component:cli":
|
||||
- "\\[X\\] awxkit"
|
||||
|
||||
17
.github/pr_labeler.yml
vendored
17
.github/pr_labeler.yml
vendored
@@ -1,14 +1,19 @@
|
||||
"component:api":
|
||||
- any: ['awx/**/*', '!awx/ui/*']
|
||||
- any: ["awx/**/*", "!awx/ui/**"]
|
||||
|
||||
"component:ui":
|
||||
- any: ['awx/ui/**/*']
|
||||
- any: ["awx/ui/**/*"]
|
||||
|
||||
"component:docs":
|
||||
- any: ['docs/**/*']
|
||||
- any: ["docs/**/*"]
|
||||
|
||||
"component:cli":
|
||||
- any: ['awxkit/**/*']
|
||||
- any: ["awxkit/**/*"]
|
||||
|
||||
"component:collection":
|
||||
- any: ['awx_collection/**/*']
|
||||
"component:awx_collection":
|
||||
- any: ["awx_collection/**/*"]
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
|
||||
114
.github/triage_replies.md
vendored
Normal file
114
.github/triage_replies.md
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
## General
|
||||
- For the roundup of all the different mailing lists available from AWX, Ansible, and beyond visit: https://docs.ansible.com/ansible/latest/community/communication.html
|
||||
- Hello, we think your question is answered in our FAQ. Does this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
- You can find the latest documentation here: https://docs.ansible.com/automation-controller/latest/html/userguide/index.html
|
||||
|
||||
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
|
||||
### Denied Submission
|
||||
|
||||
- Hi! \
|
||||
\
|
||||
Thanks very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
At this time we do not want to merge this PR. Our reasons for this are: \
|
||||
\
|
||||
(A) INSERT ITEM HERE \
|
||||
\
|
||||
Please know that we are always up for discussion but this project is very active. Because of this, we're unlikely to see comments made on closed PRs, and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it is not always necessary. \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress Issue
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for for this issue. It means a lot to us that you have taken time to contribute by opening this report. \
|
||||
\
|
||||
On this issue, there were comments added but it has been some time since then without response. At this time we are closing this issue. If you get time to address the comments we can reopen the issue if you can contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
### No Progress PR
|
||||
- Hi! \
|
||||
\
|
||||
Thank you very much for your submission to AWX. It means a lot to us that you have taken time to contribute. \
|
||||
\
|
||||
On this PR, changes were requested but it has been some time since then. We think this PR has merit but without the requested changes we are unable to merge it. At this time we are closing your PR. If you get time to address the changes you are welcome to open another PR or we can reopen this PR upon request if you contact us by using any of the communication methods listed in the page below: \
|
||||
\
|
||||
https://github.com/ansible/awx/#get-involved \
|
||||
\
|
||||
Thank you once again for this and your interest in AWX!
|
||||
|
||||
|
||||
|
||||
|
||||
## Common
|
||||
|
||||
### Give us more info
|
||||
- Hello, we'd love to help, but we need a little more information about the problem you're having. Screenshots, log outputs, or any reproducers would be very helpful.
|
||||
|
||||
### Code of Conduct
|
||||
- Hello. Please keep in mind that Ansible adheres to a Code of Conduct in its community spaces. The spirit of the code of conduct is to be kind, and this is your friendly reminder to be so. Please see the full code of conduct here if you have questions: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
|
||||
### EE Contents / Community General
|
||||
- Hello. The awx-ee contains the collections and dependencies needed for supported AWX features to function. Anything beyond that (like the community.general package) will require you to build your own EE. For information on how to do that, see https://ansible-builder.readthedocs.io/en/stable/ \
|
||||
\
|
||||
The Ansible Community is looking at building an EE that corresponds to all of the collections inside the ansible package. That may help you if and when it happens; see https://github.com/ansible-community/community-topics/issues/31 for details.
|
||||
|
||||
|
||||
|
||||
## Mailing List Triage
|
||||
|
||||
### Create an issue
|
||||
- Hello, thanks for reaching out on list. We think this merits an issue on our Github, https://github.com/ansible/awx/issues. If you could open an issue up on Github it will get tagged and integrated into our planning and workflow. All future work will be tracked there. Issues should include as much information as possible, including screenshots, log outputs, or any reproducers.
|
||||
|
||||
### Create a Pull Request
|
||||
- Hello, we think your idea is good! Please consider contributing a PR for this following our contributing guidelines: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
|
||||
### Receptor
|
||||
- You can find the receptor docs here: https://receptor.readthedocs.io/en/latest/
|
||||
- Hello, your issue seems related to receptor. Could you please open an issue in the receptor repository? https://github.com/ansible/receptor. Thanks!
|
||||
|
||||
### Ansible Engine not AWX
|
||||
- Hello, your question seems to be about Ansible development, not about AWX. Try asking on the Ansible-devel specific mailing list: https://groups.google.com/g/ansible-devel
|
||||
- Hello, your question seems to be about using Ansible, not about AWX. https://groups.google.com/g/ansible-project is the best place to visit for user questions about Ansible. Thanks!
|
||||
|
||||
### Ansible Galaxy not AWX
|
||||
- Hey there. That sounds like an FAQ question. Did this: https://www.ansible.com/products/awx-project/faq cover your question?
|
||||
|
||||
### Contributing Guidelines
|
||||
- AWX: https://github.com/ansible/awx/blob/devel/CONTRIBUTING.md
|
||||
- AWX-Operator: https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md
|
||||
|
||||
### Oracle AWX
|
||||
We'd be happy to help if you can reproduce this with AWX since we do not have Oracle's Linux Automation Manager. If you need help with this specific version of Oracles Linux Automation Manager you will need to contact your Oracle for support.
|
||||
|
||||
### AWX Release
|
||||
Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb
|
||||
|
||||
- Hi all, \
|
||||
\
|
||||
We're happy to announce that the next release of AWX, version <b>`Xa.Ya.za`</b> is now available! \
|
||||
In addition AWX Operator version <b>`Xb.Yb.zb`</b> has also been released! \
|
||||
\
|
||||
Please see the releases pages for more details: \
|
||||
AWX: https://github.com/ansible/awx/releases/tag/Xa.Ya.za \
|
||||
Operator: https://github.com/ansible/awx-operator/releases/tag/Xb.Yb.zb \
|
||||
\
|
||||
The AWX team.
|
||||
|
||||
## Try latest version
|
||||
- Hello, this issue pertains to an older version of AWX. Try upgrading to the latest version and let us know if that resolves your issue.
|
||||
9
.github/workflows/ci.yml
vendored
9
.github/workflows/ci.yml
vendored
@@ -111,6 +111,15 @@ jobs:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
24
.github/workflows/feature_branch_deletion.yml
vendored
Normal file
24
.github/workflows/feature_branch_deletion.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: Feature branch deletion cleanup
|
||||
on:
|
||||
delete:
|
||||
branches:
|
||||
- feature_**
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Delete API Schema
|
||||
env:
|
||||
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
|
||||
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
|
||||
AWS_REGION: 'us-east-1'
|
||||
run: |
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"
|
||||
|
||||
|
||||
31
.github/workflows/label_issue.yml
vendored
31
.github/workflows/label_issue.yml
vendored
@@ -19,3 +19,34 @@ jobs:
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
configuration-path: .github/issue_labeler.yml
|
||||
enable-versioned-regex: 0
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
with:
|
||||
script: |
|
||||
import requests
|
||||
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||
response = requests.get('${{ fromJson(toJson(github.event.issue.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||
is_member = False
|
||||
for org in response.json():
|
||||
if org['login'] == 'ansible':
|
||||
is_member = True
|
||||
if is_member:
|
||||
print("User is member")
|
||||
else:
|
||||
print("User is community")
|
||||
- name: Add community label if not a member
|
||||
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "community"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
31
.github/workflows/label_pr.yml
vendored
31
.github/workflows/label_pr.yml
vendored
@@ -18,3 +18,34 @@ jobs:
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
configuration-path: .github/pr_labeler.yml
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
with:
|
||||
script: |
|
||||
import requests
|
||||
headers = {'Accept': 'application/vnd.github+json', 'Authorization': 'token ${{ secrets.GITHUB_TOKEN }}'}
|
||||
response = requests.get('${{ fromJson(toJson(github.event.pull_request.user.url)) }}/orgs?per_page=100', headers=headers)
|
||||
is_member = False
|
||||
for org in response.json():
|
||||
if org['login'] == 'ansible':
|
||||
is_member = True
|
||||
if is_member:
|
||||
print("User is member")
|
||||
else:
|
||||
print("User is community")
|
||||
- name: Add community label if not a member
|
||||
if: contains(steps.check_user.outputs.stdout, 'community')
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "community"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
37
.github/workflows/pr_body_check.yml
vendored
Normal file
37
.github/workflows/pr_body_check.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: PR Check
|
||||
env:
|
||||
BRANCH: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check for each of the lines
|
||||
env:
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
echo $PR_BODY | grep "Bug, Docs Fix or other nominal change" > Z
|
||||
echo $PR_BODY | grep "New or Enhanced Feature" > Y
|
||||
echo $PR_BODY | grep "Breaking Change" > X
|
||||
exit 0
|
||||
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
||||
shell: bash {0}
|
||||
|
||||
- name: Check for exactly one item
|
||||
run: |
|
||||
if [ $(cat X Y Z | wc -l) != 1 ] ; then
|
||||
echo "The PR body must contain exactly one of [ 'Bug, Docs Fix or other nominal change', 'New or Enhanced Feature', 'Breaking Change' ]"
|
||||
echo "We counted $(cat X Y Z | wc -l)"
|
||||
echo "See the default PR body for examples"
|
||||
exit 255;
|
||||
else
|
||||
exit 0;
|
||||
fi
|
||||
6
.github/workflows/promote.yml
vendored
6
.github/workflows/promote.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python${{ env.py_version }} -m pip install wheel twine
|
||||
python${{ env.py_version }} -m pip install wheel twine setuptools-scm
|
||||
|
||||
- name: Set official collection namespace
|
||||
run: echo collection_namespace=awx >> $GITHUB_ENV
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
run: |
|
||||
COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
ansible-galaxy collection publish \
|
||||
--token=${{ secrets.GALAXY_TOKEN }} \
|
||||
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz
|
||||
@@ -70,4 +70,4 @@ jobs:
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
|
||||
|
||||
|
||||
13
.github/workflows/stage.yml
vendored
13
.github/workflows/stage.yml
vendored
@@ -100,23 +100,10 @@ jobs:
|
||||
AWX_TEST_IMAGE: ${{ github.repository }}
|
||||
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
|
||||
|
||||
- name: Generate changelog
|
||||
uses: shanemcd/simple-changelog-generator@v1
|
||||
id: changelog
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
|
||||
- name: Write changelog to file
|
||||
run: |
|
||||
cat << 'EOF' > /tmp/awx-changelog
|
||||
${{ steps.changelog.outputs.changelog }}
|
||||
EOF
|
||||
|
||||
- name: Create draft release for AWX
|
||||
working-directory: awx
|
||||
run: |
|
||||
ansible-playbook -v tools/ansible/stage.yml \
|
||||
-e changelog_path=/tmp/awx-changelog \
|
||||
-e repo=${{ github.repository }} \
|
||||
-e awx_image=ghcr.io/${{ github.repository }} \
|
||||
-e version=${{ github.event.inputs.version }} \
|
||||
|
||||
29
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
29
.github/workflows/update_dependabot_prs.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Dependency Pr Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, reopened]
|
||||
|
||||
jobs:
|
||||
pr-check:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Update PR Body
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
REPO: ${{ github.event.repository.name }}
|
||||
PR: ${{github.event.pull_request.number}}
|
||||
PR_BODY: ${{github.event.pull_request.body}}
|
||||
run: |
|
||||
gh pr checkout ${{ env.PR }}
|
||||
echo "${{ env.PR_BODY }}" > my_pr_body.txt
|
||||
echo "" >> my_pr_body.txt
|
||||
echo "Bug, Docs Fix or other nominal change" >> my_pr_body.txt
|
||||
gh pr edit ${{env.PR}} --body-file my_pr_body.txt
|
||||
1
.github/workflows/upload_schema.yml
vendored
1
.github/workflows/upload_schema.yml
vendored
@@ -5,6 +5,7 @@ on:
|
||||
branches:
|
||||
- devel
|
||||
- release_**
|
||||
- feature_**
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,7 +38,6 @@ awx/ui/build
|
||||
awx/ui/.env.local
|
||||
awx/ui/instrumented
|
||||
rsyslog.pid
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/ansible/awx_dump.sql
|
||||
tools/docker-compose/Dockerfile
|
||||
tools/docker-compose/_build
|
||||
|
||||
@@ -8,6 +8,8 @@ ignore: |
|
||||
awx/ui/test/e2e/tests/smoke-vars.yml
|
||||
awx/ui/node_modules
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
@@ -19,16 +19,17 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
- [Purging containers and images](#purging-containers-and-images)
|
||||
- [Pre commit hooks](#pre-commit-hooks)
|
||||
- [What should I work on?](#what-should-i-work-on)
|
||||
- [Translations](#translations)
|
||||
- [Submitting Pull Requests](#submitting-pull-requests)
|
||||
- [PR Checks run by Zuul](#pr-checks-run-by-zuul)
|
||||
- [Reporting Issues](#reporting-issues)
|
||||
- [Getting Help](#getting-help)
|
||||
|
||||
## Things to know prior to submitting code
|
||||
|
||||
- All code submissions are done through pull requests against the `devel` branch.
|
||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
|
||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see [git push docs](https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt).
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.libera.chat, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
@@ -42,8 +43,7 @@ The AWX development environment workflow and toolchain uses Docker and the docke
|
||||
|
||||
Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases.
|
||||
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
|
||||
respectively.
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows) respectively.
|
||||
|
||||
For Linux platforms, refer to the following from Docker:
|
||||
|
||||
@@ -79,17 +79,13 @@ See the [README.md](./tools/docker-compose/README.md) for docs on how to build t
|
||||
|
||||
### Building API Documentation
|
||||
|
||||
AWX includes support for building [Swagger/OpenAPI
|
||||
documentation](https://swagger.io). To build the documentation locally, run:
|
||||
AWX includes support for building [Swagger/OpenAPI documentation](https://swagger.io). To build the documentation locally, run:
|
||||
|
||||
```bash
|
||||
(container)/awx_devel$ make swagger
|
||||
```
|
||||
|
||||
This will write a file named `swagger.json` that contains the API specification
|
||||
in OpenAPI format. A variety of online tools are available for translating
|
||||
this data into more consumable formats (such as HTML). http://editor.swagger.io
|
||||
is an example of one such service.
|
||||
This will write a file named `swagger.json` that contains the API specification in OpenAPI format. A variety of online tools are available for translating this data into more consumable formats (such as HTML). http://editor.swagger.io is an example of one such service.
|
||||
|
||||
### Accessing the AWX web interface
|
||||
|
||||
@@ -115,20 +111,30 @@ While you can use environment variables to skip the pre-commit hooks GitHub will
|
||||
|
||||
## What should I work on?
|
||||
|
||||
We have a ["good first issue" label](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) we put on some issues that might be a good starting point for new contributors.
|
||||
|
||||
Fixing bugs and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
|
||||
|
||||
For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement).
|
||||
|
||||
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
|
||||
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start. For extra information on debugging tools, see [Debugging](./docs/debugging/).
|
||||
**NOTES**
|
||||
|
||||
> Issue assignment will only be done for maintainers of the project. If you decide to work on an issue, please feel free to add a comment in the issue to let others know that you are working on it; but know that we will accept the first pull request from whomever is able to fix an issue. Once your PR is accepted we can add you as an assignee to an issue upon request.
|
||||
|
||||
**NOTE**
|
||||
|
||||
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.libera.chat, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
**NOTE**
|
||||
|
||||
> If you're planning to develop features or fixes for the UI, please review the [UI Developer doc](./awx/ui/README.md).
|
||||
|
||||
### Translations
|
||||
|
||||
At this time we do not accept PRs for adding additional language translations as we have an automated process for generating our translations. This is because translations require constant care as new strings are added and changed in the code base. Because of this the .po files are overwritten during every translation release cycle. We also can't support a lot of translations on AWX as its an open source project and each language adds time and cost to maintain. If you would like to see AWX translated into a new language please create an issue and ask others you know to upvote the issue. Our translation team will review the needs of the community and see what they can do around supporting additional language.
|
||||
|
||||
If you find an issue with an existing translation, please see the [Reporting Issues](#reporting-issues) section to open an issue and our translation team will work with you on a resolution.
|
||||
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) against the `devel` branch.
|
||||
@@ -152,28 +158,14 @@ We like to keep our commit history clean, and will require resubmission of pull
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
|
||||
|
||||
## PR Checks run by Zuul
|
||||
|
||||
Zuul jobs for awx are defined in the [zuul-jobs](https://github.com/ansible/zuul-jobs) repo.
|
||||
|
||||
Zuul runs the following checks that must pass:
|
||||
|
||||
1. `tox-awx-api-lint`
|
||||
2. `tox-awx-ui-lint`
|
||||
3. `tox-awx-api`
|
||||
4. `tox-awx-ui`
|
||||
5. `tox-awx-swagger`
|
||||
|
||||
Zuul runs the following checks that are non-voting (can not pass but serve to inform PR reviewers):
|
||||
|
||||
1. `tox-awx-detect-schema-change`
|
||||
This check generates the schema and diffs it against a reference copy of the `devel` version of the schema.
|
||||
Reviewers should inspect the `job-output.txt.gz` related to the check if their is a failure (grep for `diff -u -b` to find beginning of diff).
|
||||
If the schema change is expected and makes sense in relation to the changes made by the PR, then you are good to go!
|
||||
If not, the schema changes should be fixed, but this decision must be enforced by reviewers.
|
||||
When your PR is initially submitted the checks will not be run until a maintainer allows them to be. Once a maintainer has done a quick review of your work the PR will have the linter and unit tests run against them via GitHub Actions, and the status reported in the PR.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you require additional assistance, please reach out to us at `#ansible-awx` on irc.libera.chat, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
For extra information on debugging tools, see [Debugging](./docs/debugging/).
|
||||
|
||||
@@ -3,7 +3,7 @@ recursive-include awx *.po
|
||||
recursive-include awx *.mo
|
||||
recursive-include awx/static *
|
||||
recursive-include awx/templates *.html
|
||||
recursive-include awx/api/templates *.md *.html
|
||||
recursive-include awx/api/templates *.md *.html *.yml
|
||||
recursive-include awx/ui/build *.html
|
||||
recursive-include awx/ui/build *
|
||||
recursive-include awx/playbooks *.yml
|
||||
|
||||
165
Makefile
165
Makefile
@@ -5,8 +5,8 @@ NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) setup.py --version)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) setup.py --version | cut -d . -f 1-3)
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
COLLECTION_VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -15,6 +15,12 @@ MAIN_NODE_TYPE ?= hybrid
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
LDAP ?= false
|
||||
# If set to true docker-compose will also start a splunk instance
|
||||
SPLUNK ?= false
|
||||
# If set to true docker-compose will also start a prometheus instance
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
@@ -43,7 +49,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build sdist \
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit
|
||||
@@ -66,7 +72,7 @@ clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
rm -rf awx/public
|
||||
rm -rf awx/lib/site-packages
|
||||
@@ -79,6 +85,7 @@ clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
|
||||
clean-api:
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
rm -rf .tox
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
rm -f awx/awx_test.sqlite3*
|
||||
@@ -88,7 +95,7 @@ clean-api:
|
||||
clean-awxkit:
|
||||
rm -rf awxkit/*.egg-info awxkit/.tox awxkit/build/*
|
||||
|
||||
# convenience target to assert environment variables are defined
|
||||
## convenience target to assert environment variables are defined
|
||||
guard-%:
|
||||
@if [ "$${$*}" = "" ]; then \
|
||||
echo "The required environment variable '$*' is not set"; \
|
||||
@@ -111,7 +118,7 @@ virtualenv_awx:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
## Install third-party requirements needed for AWX's environment.
|
||||
# this does not use system site packages intentionally
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -130,7 +137,7 @@ requirements_dev: requirements_awx requirements_awx_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
# "Install" awx package in development mode.
|
||||
## "Install" awx package in development mode.
|
||||
develop:
|
||||
@if [ "$(VIRTUAL_ENV)" ]; then \
|
||||
pip uninstall -y awx; \
|
||||
@@ -147,21 +154,21 @@ version_file:
|
||||
fi; \
|
||||
$(PYTHON) -c "import awx; print(awx.__version__)" > /var/lib/awx/.awx_version; \
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
## Refresh development environment after pulling new code.
|
||||
refresh: clean requirements_dev version_file develop migrate
|
||||
|
||||
# Create Django superuser.
|
||||
## Create Django superuser.
|
||||
adduser:
|
||||
$(MANAGEMENT_COMMAND) createsuperuser
|
||||
|
||||
# Create database tables and apply any new migrations.
|
||||
## Create database tables and apply any new migrations.
|
||||
migrate:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) migrate --noinput
|
||||
|
||||
# Run after making changes to the models to create a new migration.
|
||||
## Run after making changes to the models to create a new migration.
|
||||
dbchange:
|
||||
$(MANAGEMENT_COMMAND) makemigrations
|
||||
|
||||
@@ -175,9 +182,9 @@ collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
mkdir -p awx/public/static && $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
UWSGI_DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:awx-dispatcher tower-processes:awx-receiver
|
||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -192,12 +199,13 @@ uwsgi: collectstatic
|
||||
--processes=5 \
|
||||
--harakiri=120 --master \
|
||||
--no-orphans \
|
||||
--py-autoreload 1 \
|
||||
--max-requests=1000 \
|
||||
--stats /tmp/stats.socket \
|
||||
--lazy-apps \
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)" \
|
||||
--hook-accepting1="exec: $(UWSGI_DEV_RELOAD_COMMAND)"
|
||||
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
|
||||
|
||||
awx-autoreload:
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -211,7 +219,7 @@ wsbroadcast:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
# Run to start the background task dispatcher for development.
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -219,7 +227,7 @@ dispatcher:
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -266,12 +274,12 @@ api-lint:
|
||||
yamllint -s .
|
||||
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/setup.py egg_info_dev
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
# Run all API unit tests.
|
||||
## Run all API unit tests.
|
||||
test:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -285,6 +293,7 @@ COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
COLLECTION_TEMPLATE_VERSION ?= false
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
@@ -312,7 +321,7 @@ awx_collection_build: $(shell find awx_collection -type f)
|
||||
-e collection_package=$(COLLECTION_PACKAGE) \
|
||||
-e collection_namespace=$(COLLECTION_NAMESPACE) \
|
||||
-e collection_version=$(COLLECTION_VERSION) \
|
||||
-e '{"awx_template_version":false}'
|
||||
-e '{"awx_template_version": $(COLLECTION_TEMPLATE_VERSION)}'
|
||||
ansible-galaxy collection build awx_collection_build --force --output-path=awx_collection_build
|
||||
|
||||
build_collection: awx_collection_build
|
||||
@@ -333,23 +342,24 @@ test_unit:
|
||||
fi; \
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||
|
||||
# Run all API unit tests with coverage enabled.
|
||||
## Run all API unit tests with coverage enabled.
|
||||
test_coverage:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test --create-db --cov=awx --cov-report=xml --junitxml=./reports/junit.xml $(TEST_DIRS)
|
||||
|
||||
# Output test coverage as HTML (into htmlcov directory).
|
||||
## Output test coverage as HTML (into htmlcov directory).
|
||||
coverage_html:
|
||||
coverage html
|
||||
|
||||
# Run API unit tests across multiple Python/Django versions with Tox.
|
||||
## Run API unit tests across multiple Python/Django versions with Tox.
|
||||
test_tox:
|
||||
tox -v
|
||||
|
||||
# Make fake data
|
||||
|
||||
DATA_GEN_PRESET = ""
|
||||
## Make fake data
|
||||
bulk_data:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -368,24 +378,25 @@ clean-ui:
|
||||
rm -rf awx/ui/build
|
||||
rm -rf awx/ui/src/locales/_build
|
||||
rm -rf $(UI_BUILD_FLAG_FILE)
|
||||
# the collectstatic command doesn't like it if this dir doesn't exist.
|
||||
mkdir -p awx/ui/build/static
|
||||
|
||||
awx/ui/node_modules:
|
||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn ci
|
||||
NODE_OPTIONS=--max-old-space-size=6144 $(NPM_BIN) --prefix awx/ui --loglevel warn --force ci
|
||||
|
||||
$(UI_BUILD_FLAG_FILE): awx/ui/node_modules
|
||||
$(UI_BUILD_FLAG_FILE):
|
||||
$(MAKE) awx/ui/node_modules
|
||||
$(PYTHON) tools/scripts/compilemessages.py
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run compile-strings
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run build
|
||||
mkdir -p awx/public/static/css
|
||||
mkdir -p awx/public/static/js
|
||||
mkdir -p awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* awx/public/static/media
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
touch $@
|
||||
|
||||
|
||||
|
||||
ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@@ -416,21 +427,13 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# Build a pip-installable package into dist/ with a timestamped version number.
|
||||
dev_build:
|
||||
$(PYTHON) setup.py dev_build
|
||||
|
||||
# Build a pip-installable package into dist/ with the release version number.
|
||||
release_build:
|
||||
$(PYTHON) setup.py release_build
|
||||
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
endif
|
||||
$(PYTHON) setup.py $(SDIST_COMMAND)
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
|
||||
sdist: dist/$(SDIST_TAR_FILE)
|
||||
@@ -451,10 +454,16 @@ COMPOSE_OPTS ?=
|
||||
CONTROL_PLANE_NODE_COUNT ?= 1
|
||||
EXECUTION_NODE_COUNT ?= 2
|
||||
MINIKUBE_CONTAINER_GROUP ?= false
|
||||
MINIKUBE_SETUP ?= false # if false, run minikube separately
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS ?=
|
||||
|
||||
ifneq ($(ADMIN_PASSWORD),)
|
||||
EXTRA_SOURCES_ANSIBLE_OPTS := -e admin_password=$(ADMIN_PASSWORD) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
endif
|
||||
|
||||
docker-compose-sources: .git/hooks/pre-commit
|
||||
@if [ $(MINIKUBE_CONTAINER_GROUP) = true ]; then\
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose-minikube/deploy.yml; \
|
||||
ansible-playbook -i tools/docker-compose/inventory -e minikube_setup=$(MINIKUBE_SETUP) tools/docker-compose-minikube/deploy.yml; \
|
||||
fi;
|
||||
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/sources.yml \
|
||||
@@ -465,7 +474,11 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP)
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
@@ -499,7 +512,7 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
# Base development image build
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
@@ -513,20 +526,17 @@ docker-clean:
|
||||
fi
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm tools_awx_db
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
## Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
|
||||
@@ -557,26 +567,34 @@ Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
-e template_dest=_build_kube_dev \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_kube_devel image for development on local Kubernetes environment.
|
||||
awx-kube-dev-build: Dockerfile.kube-dev
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.kube-dev \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
# generate UI .pot file, an empty template of strings yet to be translated
|
||||
## generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||
|
||||
# generate UI .po files for each locale (will update translated strings for `en`)
|
||||
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
|
||||
# generate API django .pot .po
|
||||
LANG = "en-us"
|
||||
LANG = "en_us"
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -585,3 +603,38 @@ messages:
|
||||
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
# HELP related targets
|
||||
# --------------------------------------
|
||||
|
||||
HELP_FILTER=.PHONY
|
||||
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@awk '/^[-a-zA-Z_0-9%:\\\.\/]+:/ { \
|
||||
helpMessage = match(lastLine, /^## (.*)/); \
|
||||
if (helpMessage) { \
|
||||
helpCommand = $$1; \
|
||||
helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \
|
||||
gsub("\\\\", "", helpCommand); \
|
||||
gsub(":+$$", "", helpCommand); \
|
||||
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, helpMessage; \
|
||||
} else { \
|
||||
helpCommand = $$1; \
|
||||
gsub("\\\\", "", helpCommand); \
|
||||
gsub(":+$$", "", helpCommand); \
|
||||
printf " \x1b[32;01m%-35s\x1b[0m %s\n", helpCommand, "No help available"; \
|
||||
} \
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
|
||||
@@ -6,9 +6,40 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
__version__ = get_distribution('awx').version
|
||||
def get_version():
|
||||
version_from_file = get_version_from_file()
|
||||
if version_from_file:
|
||||
return version_from_file
|
||||
else:
|
||||
from setuptools_scm import get_version
|
||||
|
||||
version = get_version(root='..', relative_to=__file__)
|
||||
return version
|
||||
|
||||
|
||||
def get_version_from_file():
|
||||
vf = version_file()
|
||||
if vf:
|
||||
with open(vf, 'r') as file:
|
||||
return file.read().strip()
|
||||
|
||||
|
||||
def version_file():
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
version_file = os.path.join(current_dir, '..', 'VERSION')
|
||||
|
||||
if os.path.exists(version_file):
|
||||
return version_file
|
||||
|
||||
|
||||
try:
|
||||
import pkg_resources
|
||||
|
||||
__version__ = pkg_resources.get_distribution('awx').version
|
||||
except pkg_resources.DistributionNotFound:
|
||||
__version__ = get_version()
|
||||
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
@@ -21,7 +52,6 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
@@ -78,9 +108,10 @@ def oauth2_getattribute(self, attr):
|
||||
# Custom method to override
|
||||
# oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__
|
||||
from django.conf import settings
|
||||
from oauth2_provider.settings import DEFAULTS
|
||||
|
||||
val = None
|
||||
if 'migrate' not in sys.argv:
|
||||
if (isinstance(attr, str)) and (attr in DEFAULTS) and (not attr.startswith('_')):
|
||||
# certain Django OAuth Toolkit migrations actually reference
|
||||
# setting lookups for references to model classes (e.g.,
|
||||
# oauth2_settings.REFRESH_TOKEN_MODEL)
|
||||
@@ -159,7 +190,7 @@ def manage():
|
||||
sys.stdout.write('%s\n' % __version__)
|
||||
# If running as a user without permission to read settings, display an
|
||||
# error message. Allow --help to still work.
|
||||
elif settings.SECRET_KEY == 'permission-denied':
|
||||
elif not os.getenv('SKIP_SECRET_KEY_CHECK', False) and settings.SECRET_KEY == 'permission-denied':
|
||||
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
|
||||
execute_from_command_line(sys.argv)
|
||||
sys.stdout.write('\n')
|
||||
|
||||
@@ -157,7 +157,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
|
||||
# A list of fields that we know can be filtered on without the possiblity
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField)
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
|
||||
@@ -232,6 +232,9 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__iexact'):
|
||||
if not isinstance(field, (CharField, TextField)):
|
||||
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
@@ -258,8 +261,8 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an iterm must satisfy all condition to show up in the results.
|
||||
# If 'OR' is used, an item just need to satisfy one condition to appear in results.
|
||||
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
|
||||
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
@@ -398,11 +401,11 @@ class OrderByBackend(BaseFilterBackend):
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
if order_by is None:
|
||||
order_by = self.get_default_ordering(view)
|
||||
default_order_by = self.get_default_ordering(view)
|
||||
# glue the order by and default order by together so that the default is the backup option
|
||||
order_by = list(order_by or []) + list(default_order_by or [])
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
|
||||
# Special handling of the type field for ordering. In this
|
||||
# case, we're not sorting exactly on the type field, but
|
||||
# given the limited number of views with multiple types,
|
||||
|
||||
@@ -6,7 +6,6 @@ import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
import urllib.parse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -14,7 +13,7 @@ from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
from django.http import QueryDict
|
||||
from django.shortcuts import get_object_or_404
|
||||
@@ -30,7 +29,7 @@ from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import views
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.renderers import StaticHTMLRenderer, JSONRenderer
|
||||
from rest_framework.renderers import StaticHTMLRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# AWX
|
||||
@@ -41,7 +40,7 @@ from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd,
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
|
||||
from awx.conf import settings_registry
|
||||
@@ -63,9 +62,9 @@ __all__ = [
|
||||
'SubDetailAPIView',
|
||||
'ResourceAccessList',
|
||||
'ParentMixin',
|
||||
'DeleteLastUnattachLabelMixin',
|
||||
'SubListAttachDetachAPIView',
|
||||
'CopyAPIView',
|
||||
'GenericCancelView',
|
||||
'BaseUsersList',
|
||||
]
|
||||
|
||||
@@ -91,14 +90,9 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||
current_user = getattr(request, 'user', None)
|
||||
if request.user.is_authenticated:
|
||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
||||
ret.set_cookie('userLoggedIn', 'true')
|
||||
current_user = UserSerializer(self.request.user)
|
||||
current_user = smart_str(JSONRenderer().render(current_user.data))
|
||||
current_user = urllib.parse.quote('%s' % current_user, '')
|
||||
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
|
||||
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||
|
||||
return ret
|
||||
@@ -255,7 +249,7 @@ class APIView(views.APIView):
|
||||
response['X-API-Query-Time'] = '%0.3fs' % sum(q_times)
|
||||
|
||||
if getattr(self, 'deprecated', False):
|
||||
response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."' # noqa
|
||||
response['Warning'] = '299 awx "This resource has been deprecated and will be removed in a future release."'
|
||||
|
||||
return response
|
||||
|
||||
@@ -638,6 +632,11 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
# attaching/detaching them from the parent.
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
"Override in subclasses to do efficient validation of attaching"
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
"Same as is_valid_relation but called on disassociation"
|
||||
return None
|
||||
|
||||
def get_description_context(self):
|
||||
@@ -722,6 +721,11 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
|
||||
if not request.user.can_access(self.parent_model, 'unattach', parent, sub, self.relationship, request.data):
|
||||
raise PermissionDenied()
|
||||
|
||||
# Verify that removing the relationship is valid.
|
||||
unattach_errors = self.is_valid_removal(parent, sub)
|
||||
if unattach_errors is not None:
|
||||
return Response(unattach_errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if parent_key:
|
||||
sub.delete()
|
||||
else:
|
||||
@@ -765,28 +769,6 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
|
||||
return {'id': None}
|
||||
|
||||
|
||||
class DeleteLastUnattachLabelMixin(object):
|
||||
"""
|
||||
Models for which you want the last instance to be deleted from the database
|
||||
when the last disassociate is called should inherit from this class. Further,
|
||||
the model should implement is_detached()
|
||||
"""
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return res
|
||||
|
||||
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
|
||||
|
||||
obj = self.model.objects.get(id=sub_id)
|
||||
|
||||
if obj.is_detached():
|
||||
obj.delete()
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
|
||||
pass
|
||||
|
||||
@@ -1004,6 +986,23 @@ class CopyAPIView(GenericAPIView):
|
||||
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class GenericCancelView(RetrieveAPIView):
|
||||
# In subclass set model, serializer_class
|
||||
obj_permission_type = 'cancel'
|
||||
|
||||
@transaction.non_atomic_requests
|
||||
def dispatch(self, *args, **kwargs):
|
||||
return super(GenericCancelView, self).dispatch(*args, **kwargs)
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class BaseUsersList(SubListCreateAttachDetachAPIView):
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(BaseUsersList, self).post(request, *args, **kwargs)
|
||||
|
||||
@@ -24,7 +24,6 @@ __all__ = [
|
||||
'InventoryInventorySourcesUpdatePermission',
|
||||
'UserPermission',
|
||||
'IsSystemAdminOrAuditor',
|
||||
'InstanceGroupTowerPermission',
|
||||
'WorkflowApprovalPermission',
|
||||
]
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.text import capfirst
|
||||
from django.utils.timezone import now
|
||||
from django.utils.functional import cached_property
|
||||
from django.core.validators import RegexValidator, MaxLengthValidator
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ValidationError, PermissionDenied
|
||||
@@ -113,6 +113,7 @@ from awx.main.utils import (
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstanceGroups, TaskManagerInstances
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
@@ -120,6 +121,9 @@ from awx.main.validators import vars_validate_or_raise
|
||||
from awx.api.versioning import reverse
|
||||
from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField, DeprecatedCredentialField
|
||||
|
||||
# AWX Utils
|
||||
from awx.api.validators import HostnameRegexValidator
|
||||
|
||||
logger = logging.getLogger('awx.api.serializers')
|
||||
|
||||
# Fields that should be summarized regardless of object type.
|
||||
@@ -154,6 +158,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -614,7 +619,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
def validate(self, attrs):
|
||||
attrs = super(BaseSerializer, self).validate(attrs)
|
||||
try:
|
||||
# Create/update a model instance and run it's full_clean() method to
|
||||
# Create/update a model instance and run its full_clean() method to
|
||||
# do any validation implemented on the model class.
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
obj = self.instance or self.Meta.model()
|
||||
@@ -1470,6 +1475,7 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
'allow_override',
|
||||
'custom_virtualenv',
|
||||
'default_environment',
|
||||
'signature_validation_credential',
|
||||
) + (
|
||||
'last_update_failed',
|
||||
'last_updated',
|
||||
@@ -1606,7 +1612,6 @@ class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
|
||||
|
||||
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
|
||||
class Meta:
|
||||
@@ -1621,14 +1626,6 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except ProjectUpdateEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
@@ -1687,6 +1684,7 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
'total_inventory_sources',
|
||||
'inventory_sources_with_failures',
|
||||
'pending_deletion',
|
||||
'prevent_instance_group_fallback',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -2081,7 +2079,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
|
||||
class Meta:
|
||||
model = InventorySource
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project', 'update_on_project_update') + (
|
||||
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout', 'source_project') + (
|
||||
'last_update_failed',
|
||||
'last_updated',
|
||||
) # Backwards compatibility.
|
||||
@@ -2144,11 +2142,6 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
|
||||
return value
|
||||
|
||||
def validate_update_on_project_update(self, value):
|
||||
if value and self.instance and self.instance.schedules.exists():
|
||||
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
@@ -2199,7 +2192,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'update_on_project_update']))
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
|
||||
if redundant_scm_fields:
|
||||
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
|
||||
|
||||
@@ -2228,6 +2221,15 @@ class InventorySourceUpdateSerializer(InventorySourceSerializer):
|
||||
class Meta:
|
||||
fields = ('can_update',)
|
||||
|
||||
def validate(self, attrs):
|
||||
project = self.instance.source_project
|
||||
if project:
|
||||
failed_reason = project.get_reason_if_failed()
|
||||
if failed_reason:
|
||||
raise serializers.ValidationError(failed_reason)
|
||||
|
||||
return super(InventorySourceUpdateSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
|
||||
|
||||
@@ -2244,7 +2246,7 @@ class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSeri
|
||||
'source_project_update',
|
||||
'custom_virtualenv',
|
||||
'instance_group',
|
||||
'-controller_node',
|
||||
'scm_revision',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -2319,7 +2321,6 @@ class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
|
||||
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
|
||||
class Meta:
|
||||
model = InventoryUpdate
|
||||
fields = ('*', '-controller_node') # field removal undone by UJ serializer
|
||||
|
||||
|
||||
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
|
||||
@@ -2672,6 +2673,13 @@ class CredentialSerializer(BaseSerializer):
|
||||
|
||||
return credential_type
|
||||
|
||||
def validate_inputs(self, inputs):
|
||||
if self.instance and self.instance.credential_type.kind == "vault":
|
||||
if 'vault_id' in inputs and inputs['vault_id'] != self.instance.inputs['vault_id']:
|
||||
raise ValidationError(_('Vault IDs cannot be changed once they have been created.'))
|
||||
|
||||
return inputs
|
||||
|
||||
|
||||
class CredentialSerializerCreate(CredentialSerializer):
|
||||
|
||||
@@ -2929,6 +2937,12 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
'survey_enabled',
|
||||
'become_enabled',
|
||||
'diff_mode',
|
||||
@@ -2937,6 +2951,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'job_slice_count',
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'prevent_instance_group_fallback',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@@ -3106,7 +3121,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
|
||||
class JobDetailSerializer(JobSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
playbook_counts = serializers.SerializerMethodField(help_text=_('A count of all plays and tasks for the job run.'))
|
||||
custom_virtualenv = serializers.ReadOnlyField()
|
||||
|
||||
@@ -3122,14 +3136,6 @@ class JobDetailSerializer(JobSerializer):
|
||||
|
||||
return data
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.get_event_queryset().only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except JobEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class JobCancelSerializer(BaseSerializer):
|
||||
|
||||
@@ -3200,7 +3206,7 @@ class JobRelaunchSerializer(BaseSerializer):
|
||||
return attrs
|
||||
|
||||
|
||||
class JobCreateScheduleSerializer(BaseSerializer):
|
||||
class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer):
|
||||
|
||||
can_schedule = serializers.SerializerMethodField()
|
||||
prompts = serializers.SerializerMethodField()
|
||||
@@ -3226,14 +3232,17 @@ class JobCreateScheduleSerializer(BaseSerializer):
|
||||
try:
|
||||
config = obj.launch_config
|
||||
ret = config.prompts_dict(display=True)
|
||||
if 'inventory' in ret:
|
||||
ret['inventory'] = self._summarize('inventory', ret['inventory'])
|
||||
if 'credentials' in ret:
|
||||
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
|
||||
ret['credentials'] = all_creds
|
||||
for field_name in ('inventory', 'execution_environment'):
|
||||
if field_name in ret:
|
||||
ret[field_name] = self._summarize(field_name, ret[field_name])
|
||||
for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')):
|
||||
if field_name in ret:
|
||||
ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]]
|
||||
if 'labels' in ret:
|
||||
ret['labels'] = self._summary_field_labels(config)
|
||||
return ret
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
|
||||
return {'all': _('Unknown, job may have been run before launch configurations were saved.')}
|
||||
|
||||
|
||||
class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
@@ -3318,21 +3327,10 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
|
||||
|
||||
host_status_counts = serializers.SerializerMethodField(help_text=_('A count of hosts uniquely assigned to each status.'))
|
||||
|
||||
class Meta:
|
||||
model = AdHocCommand
|
||||
fields = ('*', 'host_status_counts')
|
||||
|
||||
def get_host_status_counts(self, obj):
|
||||
try:
|
||||
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
|
||||
except AdHocCommandEvent.DoesNotExist:
|
||||
counts = {}
|
||||
|
||||
return counts
|
||||
|
||||
|
||||
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
|
||||
|
||||
@@ -3414,6 +3412,9 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
|
||||
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobTemplate
|
||||
fields = (
|
||||
@@ -3432,6 +3433,11 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'-execution_environment',
|
||||
'ask_labels_on_launch',
|
||||
'ask_skip_tags_on_launch',
|
||||
'ask_tags_on_launch',
|
||||
'skip_tags',
|
||||
'job_tags',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3475,7 +3481,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
|
||||
# process char_prompts, these are not direct fields on the model
|
||||
mock_obj = self.Meta.model()
|
||||
for field_name in ('scm_branch', 'limit'):
|
||||
for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'):
|
||||
if field_name in attrs:
|
||||
setattr(mock_obj, field_name, attrs[field_name])
|
||||
attrs.pop(field_name)
|
||||
@@ -3501,6 +3507,9 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
|
||||
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
fields = (
|
||||
@@ -3520,6 +3529,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'webhook_guid',
|
||||
'skip_tags',
|
||||
'job_tags',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -3636,6 +3647,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
|
||||
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
|
||||
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
|
||||
forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
|
||||
job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
|
||||
timeout = serializers.IntegerField(required=False, allow_null=True, default=None)
|
||||
exclude_errors = ()
|
||||
|
||||
class Meta:
|
||||
@@ -3651,13 +3665,21 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
'skip_tags',
|
||||
'diff_mode',
|
||||
'verbosity',
|
||||
'execution_environment',
|
||||
'forks',
|
||||
'job_slice_count',
|
||||
'timeout',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
|
||||
if obj.inventory_id:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
|
||||
if obj.execution_environment_id:
|
||||
res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id})
|
||||
res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def _build_mock_obj(self, attrs):
|
||||
@@ -3737,7 +3759,11 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
|
||||
# Build unsaved version of this config, use it to detect prompts errors
|
||||
mock_obj = self._build_mock_obj(attrs)
|
||||
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
|
||||
if set(list(ujt.get_ask_mapping().keys()) + ['extra_data']) & set(attrs.keys()):
|
||||
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
|
||||
else:
|
||||
# Only perform validation of prompts if prompts fields are provided
|
||||
errors = {}
|
||||
|
||||
# Remove all unprocessed $encrypted$ strings, indicating default usage
|
||||
if 'extra_data' in attrs and password_dict:
|
||||
@@ -4109,7 +4135,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
|
||||
class JobLaunchSerializer(BaseSerializer):
|
||||
|
||||
# Representational fields
|
||||
passwords_needed_to_start = serializers.ReadOnlyField()
|
||||
can_start_without_user_input = serializers.BooleanField(read_only=True)
|
||||
@@ -4132,6 +4157,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
|
||||
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
|
||||
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||
timeout = serializers.IntegerField(required=False, write_only=True)
|
||||
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = JobTemplate
|
||||
@@ -4159,6 +4190,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
'survey_enabled',
|
||||
'variables_needed_to_start',
|
||||
'credential_needed_to_start',
|
||||
@@ -4166,6 +4203,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'job_template_data',
|
||||
'defaults',
|
||||
'verbosity',
|
||||
'execution_environment',
|
||||
'labels',
|
||||
'forks',
|
||||
'job_slice_count',
|
||||
'timeout',
|
||||
'instance_groups',
|
||||
)
|
||||
read_only_fields = (
|
||||
'ask_scm_branch_on_launch',
|
||||
@@ -4178,6 +4221,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
)
|
||||
|
||||
def get_credential_needed_to_start(self, obj):
|
||||
@@ -4202,6 +4251,17 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
||||
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||
elif field_name == 'execution_environment':
|
||||
if obj.execution_environment_id:
|
||||
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
|
||||
else:
|
||||
defaults_dict[field_name] = {}
|
||||
elif field_name == 'labels':
|
||||
for label in obj.labels.all():
|
||||
label_dict = {'id': label.id, 'name': label.name}
|
||||
defaults_dict.setdefault(field_name, []).append(label_dict)
|
||||
elif field_name == 'instance_groups':
|
||||
defaults_dict[field_name] = []
|
||||
else:
|
||||
defaults_dict[field_name] = getattr(obj, field_name)
|
||||
return defaults_dict
|
||||
@@ -4221,8 +4281,10 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
# Basic validation - cannot run a playbook without a playbook
|
||||
if not template.project:
|
||||
errors['project'] = _("A project is required to run a job.")
|
||||
elif template.project.status in ('error', 'failed'):
|
||||
errors['playbook'] = _("Missing a revision to run due to failed project update.")
|
||||
else:
|
||||
failure_reason = template.project.get_reason_if_failed()
|
||||
if failure_reason:
|
||||
errors['playbook'] = failure_reason
|
||||
|
||||
# cannot run a playbook without an inventory
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
@@ -4300,6 +4362,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
workflow_job_template_data = serializers.SerializerMethodField()
|
||||
|
||||
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobTemplate
|
||||
fields = (
|
||||
@@ -4319,8 +4385,22 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
'workflow_job_template_data',
|
||||
'survey_enabled',
|
||||
'ask_variables_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'labels',
|
||||
'ask_skip_tags_on_launch',
|
||||
'ask_tags_on_launch',
|
||||
'skip_tags',
|
||||
'job_tags',
|
||||
)
|
||||
read_only_fields = (
|
||||
'ask_inventory_on_launch',
|
||||
'ask_variables_on_launch',
|
||||
'ask_skip_tags_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_limit_on_launch',
|
||||
'ask_scm_branch_on_launch',
|
||||
'ask_tags_on_launch',
|
||||
)
|
||||
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
|
||||
|
||||
def get_survey_enabled(self, obj):
|
||||
if obj:
|
||||
@@ -4328,10 +4408,15 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
return False
|
||||
|
||||
def get_defaults(self, obj):
|
||||
|
||||
defaults_dict = {}
|
||||
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
|
||||
if field_name == 'inventory':
|
||||
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
|
||||
elif field_name == 'labels':
|
||||
for label in obj.labels.all():
|
||||
label_dict = {"id": label.id, "name": label.name}
|
||||
defaults_dict.setdefault(field_name, []).append(label_dict)
|
||||
else:
|
||||
defaults_dict[field_name] = getattr(obj, field_name)
|
||||
return defaults_dict
|
||||
@@ -4340,6 +4425,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
return dict(name=obj.name, id=obj.id, description=obj.description)
|
||||
|
||||
def validate(self, attrs):
|
||||
|
||||
template = self.instance
|
||||
|
||||
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
|
||||
@@ -4357,6 +4443,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
WFJT_inventory = template.inventory
|
||||
WFJT_limit = template.limit
|
||||
WFJT_scm_branch = template.scm_branch
|
||||
|
||||
super(WorkflowJobLaunchSerializer, self).validate(attrs)
|
||||
template.extra_vars = WFJT_extra_vars
|
||||
template.inventory = WFJT_inventory
|
||||
@@ -4501,7 +4588,10 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
body = messages[event].get('body', {})
|
||||
if body:
|
||||
try:
|
||||
potential_body = json.loads(body)
|
||||
rendered_body = (
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
)
|
||||
potential_body = json.loads(rendered_body)
|
||||
if not isinstance(potential_body, dict):
|
||||
error_list.append(
|
||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
@@ -4644,69 +4734,74 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
|
||||
# We reject rrules if:
|
||||
# - DTSTART is not include
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - TZID is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - BYYEARDAY
|
||||
# - BYWEEKNO
|
||||
# - Multiple DTSTART or RRULE elements
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
# - Multiple DTSTART
|
||||
# - At least one of RRULE is not included
|
||||
# - EXDATE or RDATE is included
|
||||
# For any rule in the ruleset:
|
||||
# - INTERVAL is not included
|
||||
# - SECONDLY is used
|
||||
# - BYDAY prefixed with a number (MO is good but not 20MO)
|
||||
# - Can't contain both COUNT and UNTIL
|
||||
# - COUNT > 999
|
||||
def validate_rrule(self, value):
|
||||
rrule_value = value
|
||||
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RULE\:[^\s]*)", rrule_value)
|
||||
errors = []
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
errors.append(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
errors.append(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
|
||||
if len(match_multiple_dtstart) > 1:
|
||||
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
|
||||
if not len(match_multiple_rrule):
|
||||
raise serializers.ValidationError(_('RRULE required in rrule.'))
|
||||
if len(match_multiple_rrule) > 1:
|
||||
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
|
||||
if 'interval' not in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
|
||||
if 'secondly' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('SECONDLY is not supported.'))
|
||||
if re.match(multi_by_month_day, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
|
||||
if re.match(multi_by_month, rrule_value):
|
||||
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
|
||||
if re.match(by_day_with_numeric_prefix, rrule_value):
|
||||
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
|
||||
if 'byyearday' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYYEARDAY not supported."))
|
||||
if 'byweekno' in rrule_value.lower():
|
||||
raise serializers.ValidationError(_("BYWEEKNO not supported."))
|
||||
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
|
||||
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
|
||||
errors.append(_('Multiple DTSTART is not supported.'))
|
||||
if "rrule:" not in rrule_value.lower():
|
||||
errors.append(_('One or more rule required in rrule.'))
|
||||
if "exdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('EXDATE not allowed in rrule.'))
|
||||
if "rdate:" in rrule_value.lower():
|
||||
raise serializers.ValidationError(_('RDATE not allowed in rrule.'))
|
||||
for a_rule in match_multiple_rrule:
|
||||
if 'interval' not in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('INTERVAL required in rrule'), a_rule))
|
||||
elif 'secondly' in a_rule.lower():
|
||||
errors.append("{0}: {1}".format(_('SECONDLY is not supported'), a_rule))
|
||||
if re.match(by_day_with_numeric_prefix, a_rule):
|
||||
errors.append("{0}: {1}".format(_("BYDAY with numeric prefix not supported"), a_rule))
|
||||
if 'COUNT' in a_rule and 'UNTIL' in a_rule:
|
||||
errors.append("{0}: {1}".format(_("RRULE may not contain both COUNT and UNTIL"), a_rule))
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", a_rule)
|
||||
if match_count:
|
||||
count_val = match_count.groups()[0].strip().split("=")
|
||||
if int(count_val[1]) > 999:
|
||||
errors.append("{0}: {1}".format(_("COUNT > 999 is unsupported"), a_rule))
|
||||
|
||||
try:
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
logger.error(traceback.format_exc())
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
errors.append(_("rrule parsing failed validation: {}").format(e))
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
timezone = serializers.SerializerMethodField()
|
||||
until = serializers.SerializerMethodField()
|
||||
timezone = serializers.SerializerMethodField(
|
||||
help_text=_(
|
||||
'The timezone this schedule runs in. This field is extracted from the RRULE. If the timezone in the RRULE is a link to another timezone, the link will be reflected in this field.'
|
||||
),
|
||||
)
|
||||
until = serializers.SerializerMethodField(
|
||||
help_text=_('The date this schedule will end. This field is computed from the RRULE. If the schedule does not end an emptry string will be returned'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Schedule
|
||||
@@ -4740,6 +4835,8 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
if isinstance(obj.unified_job_template, SystemJobTemplate):
|
||||
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
|
||||
|
||||
# We are not showing instance groups on summary fields because JTs don't either
|
||||
|
||||
if 'inventory' in summary_fields:
|
||||
return summary_fields
|
||||
|
||||
@@ -4760,13 +4857,6 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
elif type(value) == Project and value.scm_type == '':
|
||||
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
|
||||
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
|
||||
raise serializers.ValidationError(
|
||||
_(
|
||||
'Inventory sources with `update_on_project_update` cannot be scheduled. '
|
||||
'Schedule its source project `{}` instead.'.format(value.source_project.name)
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
@@ -4781,7 +4871,7 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target')
|
||||
fields = ('source', 'target', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
@@ -4790,63 +4880,93 @@ class InstanceLinkSerializer(BaseSerializer):
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
fields = ('id', 'hostname', 'node_type', 'node_state')
|
||||
|
||||
node_state = serializers.SerializerMethodField()
|
||||
|
||||
def get_node_state(self, obj):
|
||||
if not obj.enabled:
|
||||
return "disabled"
|
||||
return "error" if obj.errors else "healthy"
|
||||
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
||||
|
||||
|
||||
class InstanceSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit']
|
||||
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that ' 'are targeted for this instance'), read_only=True)
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('uuid', 'hostname', 'version', 'node_type')
|
||||
read_only_fields = ('ip_address', 'uuid', 'version')
|
||||
fields = (
|
||||
"id",
|
||||
"type",
|
||||
"url",
|
||||
"related",
|
||||
"uuid",
|
||||
"hostname",
|
||||
"created",
|
||||
"modified",
|
||||
"last_seen",
|
||||
"last_health_check",
|
||||
"errors",
|
||||
'id',
|
||||
'hostname',
|
||||
'type',
|
||||
'url',
|
||||
'related',
|
||||
'summary_fields',
|
||||
'uuid',
|
||||
'created',
|
||||
'modified',
|
||||
'last_seen',
|
||||
'health_check_started',
|
||||
'health_check_pending',
|
||||
'last_health_check',
|
||||
'errors',
|
||||
'capacity_adjustment',
|
||||
"version",
|
||||
"capacity",
|
||||
"consumed_capacity",
|
||||
"percent_capacity_remaining",
|
||||
"jobs_running",
|
||||
"jobs_total",
|
||||
"cpu",
|
||||
"memory",
|
||||
"cpu_capacity",
|
||||
"mem_capacity",
|
||||
"enabled",
|
||||
"managed_by_policy",
|
||||
"node_type",
|
||||
'version',
|
||||
'capacity',
|
||||
'consumed_capacity',
|
||||
'percent_capacity_remaining',
|
||||
'jobs_running',
|
||||
'jobs_total',
|
||||
'cpu',
|
||||
'memory',
|
||||
'cpu_capacity',
|
||||
'mem_capacity',
|
||||
'enabled',
|
||||
'managed_by_policy',
|
||||
'node_type',
|
||||
'node_state',
|
||||
'ip_address',
|
||||
'listener_port',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
'node_state': {'initial': Instance.States.INSTALLED, 'default': Instance.States.INSTALLED},
|
||||
'hostname': {
|
||||
'validators': [
|
||||
MaxLengthValidator(limit_value=250),
|
||||
validators.UniqueValidator(queryset=Instance.objects.all()),
|
||||
RegexValidator(
|
||||
regex=r'^localhost$|^127(?:\.[0-9]+){0,2}\.[0-9]+$|^(?:0*\:)*?:?0*1$',
|
||||
flags=re.IGNORECASE,
|
||||
inverse_match=True,
|
||||
message="hostname cannot be localhost or 127.0.0.1",
|
||||
),
|
||||
HostnameRegexValidator(),
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceSerializer, self).get_related(obj)
|
||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
if obj.node_type != 'hop':
|
||||
if obj.node_type == 'execution':
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary = super().get_summary_fields(obj)
|
||||
|
||||
# use this handle to distinguish between a listView and a detailView
|
||||
if self.is_detail_view:
|
||||
summary['links'] = InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source').filter(source=obj), many=True).data
|
||||
|
||||
return summary
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return obj.consumed_capacity
|
||||
|
||||
@@ -4856,10 +4976,58 @@ class InstanceSerializer(BaseSerializer):
|
||||
else:
|
||||
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
|
||||
|
||||
def validate(self, attrs):
|
||||
if self.instance.node_type == 'hop':
|
||||
raise serializers.ValidationError(_('Hop node instances may not be changed.'))
|
||||
return attrs
|
||||
def get_health_check_pending(self, obj):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, data):
|
||||
if self.instance:
|
||||
if self.instance.node_type == Instance.Types.HOP:
|
||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||
else:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||
return data
|
||||
|
||||
def validate_node_type(self, value):
|
||||
if not self.instance:
|
||||
if value not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only create execution nodes.")
|
||||
else:
|
||||
if self.instance.node_type != value:
|
||||
raise serializers.ValidationError("Cannot change node type.")
|
||||
|
||||
return value
|
||||
|
||||
def validate_node_state(self, value):
|
||||
if self.instance:
|
||||
if value != self.instance.node_state:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||
|
||||
return value
|
||||
|
||||
def validate_hostname(self, value):
|
||||
"""
|
||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
||||
"""
|
||||
if self.instance and self.instance.hostname != value:
|
||||
raise serializers.ValidationError("Cannot change hostname.")
|
||||
|
||||
return value
|
||||
|
||||
def validate_listener_port(self, value):
|
||||
if self.instance and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError("Cannot change listener port.")
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
@@ -4873,7 +5041,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
committed_capacity = serializers.SerializerMethodField()
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.IntegerField(
|
||||
@@ -4922,7 +5089,6 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
"created",
|
||||
"modified",
|
||||
"capacity",
|
||||
"committed_capacity",
|
||||
"consumed_capacity",
|
||||
"percent_capacity_remaining",
|
||||
"jobs_running",
|
||||
@@ -5003,30 +5169,29 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
return attrs
|
||||
|
||||
def get_capacity_dict(self):
|
||||
def get_ig_mgr(self):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'capacity_map' not in self.context:
|
||||
ig_qs = None
|
||||
if 'task_manager_igs' not in self.context:
|
||||
instance_groups_queryset = None
|
||||
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
|
||||
if self.parent: # Is ListView:
|
||||
ig_qs = self.parent.instance
|
||||
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(qs=ig_qs, tasks=jobs_qs, breakdown=True)
|
||||
return self.context['capacity_map']
|
||||
instance_groups_queryset = self.parent.instance
|
||||
|
||||
instances = TaskManagerInstances(jobs_qs)
|
||||
instance_groups = TaskManagerInstanceGroups(instances_by_hostname=instances, instance_groups_queryset=instance_groups_queryset)
|
||||
|
||||
self.context['task_manager_igs'] = instance_groups
|
||||
return self.context['task_manager_igs']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['running_capacity']
|
||||
|
||||
def get_committed_capacity(self, obj):
|
||||
return self.get_capacity_dict()[obj.name]['committed_capacity']
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return ig_mgr.get_consumed_capacity(obj.name)
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity:
|
||||
return 0.0
|
||||
consumed = self.get_consumed_capacity(obj)
|
||||
if consumed >= obj.capacity:
|
||||
return 0.0
|
||||
else:
|
||||
return float("{0:.2f}".format(((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100))
|
||||
ig_mgr = self.get_ig_mgr()
|
||||
return float("{0:.2f}".format((float(ig_mgr.get_remaining_capacity(obj.name)) / (float(obj.capacity))) * 100))
|
||||
|
||||
def get_instances(self, obj):
|
||||
return obj.instances.count()
|
||||
@@ -5038,8 +5203,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
object_association = serializers.SerializerMethodField(help_text=_("When present, shows the field name of the role or relationship that changed."))
|
||||
object_type = serializers.SerializerMethodField(help_text=_("When present, shows the model on which the role or relationship was defined."))
|
||||
|
||||
@cached_property
|
||||
def _local_summarizable_fk_fields(self):
|
||||
def _local_summarizable_fk_fields(self, obj):
|
||||
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
|
||||
# Special requests
|
||||
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
|
||||
@@ -5059,7 +5223,13 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
('workflow_approval', ('id', 'name', 'unified_job_id')),
|
||||
('instance', ('id', 'hostname')),
|
||||
]
|
||||
return field_list
|
||||
# Optimization - do not attempt to summarize all fields, pair down to only relations that exist
|
||||
if not obj:
|
||||
return field_list
|
||||
existing_association_types = [obj.object1, obj.object2]
|
||||
if 'user' in existing_association_types:
|
||||
existing_association_types.append('role')
|
||||
return [entry for entry in field_list if entry[0] in existing_association_types]
|
||||
|
||||
class Meta:
|
||||
model = ActivityStream
|
||||
@@ -5143,7 +5313,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
data = {}
|
||||
if obj.actor is not None:
|
||||
data['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
|
||||
for fk, __ in self._local_summarizable_fk_fields:
|
||||
for fk, __ in self._local_summarizable_fk_fields(obj):
|
||||
if not hasattr(obj, fk):
|
||||
continue
|
||||
m2m_list = self._get_related_objects(obj, fk)
|
||||
@@ -5200,7 +5370,7 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = OrderedDict()
|
||||
for fk, related_fields in self._local_summarizable_fk_fields:
|
||||
for fk, related_fields in self._local_summarizable_fk_fields(obj):
|
||||
try:
|
||||
if not hasattr(obj, fk):
|
||||
continue
|
||||
|
||||
102
awx/api/templates/api/job_job_events_children_summary.md
Normal file
102
awx/api/templates/api/job_job_events_children_summary.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# View a summary of children events
|
||||
|
||||
Special view to facilitate processing job output in the UI.
|
||||
In order to collapse events and their children, the UI needs to know how
|
||||
many children exist for a given event.
|
||||
The UI also needs to know the order of the event (0 based index), which
|
||||
usually matches the counter, but not always.
|
||||
This view returns a JSON object where the key is the event counter, and the value
|
||||
includes the number of children (and grandchildren) events.
|
||||
Only events with children are included in the output.
|
||||
|
||||
## Example
|
||||
|
||||
e.g. Demo Job Template job
|
||||
tuple(event counter, uuid, parent_uuid)
|
||||
|
||||
```
|
||||
(1, '4598d19e-93b4-4e33-a0ae-b387a7348964', '')
|
||||
(2, 'aae0d189-e3cb-102a-9f00-000000000006', '4598d19e-93b4-4e33-a0ae-b387a7348964')
|
||||
(3, 'aae0d189-e3cb-102a-9f00-00000000000c', 'aae0d189-e3cb-102a-9f00-000000000006')
|
||||
(4, 'f4194f14-e406-4124-8519-0fdb08b18f4b', 'aae0d189-e3cb-102a-9f00-00000000000c')
|
||||
(5, '39f7ad99-dbf3-41e0-93f8-9999db4004f2', 'aae0d189-e3cb-102a-9f00-00000000000c')
|
||||
(6, 'aae0d189-e3cb-102a-9f00-000000000008', 'aae0d189-e3cb-102a-9f00-000000000006')
|
||||
(7, '39a49992-5ca4-4b6c-b178-e56d0b0333da', 'aae0d189-e3cb-102a-9f00-000000000008')
|
||||
(8, '504f3b28-3ea8-4f6f-bd82-60cf8e807cc0', 'aae0d189-e3cb-102a-9f00-000000000008')
|
||||
(9, 'a242be54-ebe6-4021-afab-f2878bff2e9f', '4598d19e-93b4-4e33-a0ae-b387a7348964')
|
||||
```
|
||||
|
||||
output
|
||||
|
||||
```
|
||||
{
|
||||
"1": {
|
||||
"rowNumber": 0,
|
||||
"numChildren": 8
|
||||
},
|
||||
"2": {
|
||||
"rowNumber": 1,
|
||||
"numChildren": 6
|
||||
},
|
||||
"3": {
|
||||
"rowNumber": 2,
|
||||
"numChildren": 2
|
||||
},
|
||||
"6": {
|
||||
"rowNumber": 5,
|
||||
"numChildren": 2
|
||||
}
|
||||
}
|
||||
"meta_event_nested_parent_uuid": {}
|
||||
}
|
||||
```
|
||||
|
||||
counter 1 is event 0, and has 8 children
|
||||
counter 2 is event 1, and has 6 children
|
||||
etc.
|
||||
|
||||
The UI also needs to be able to collapse over "meta" events -- events that
|
||||
show up due to verbosity or warnings from the system while the play is running.
|
||||
These events have a 0 level event, with no parent uuid.
|
||||
|
||||
```
|
||||
playbook_on_start
|
||||
verbose
|
||||
playbook_on_play_start
|
||||
playbook_on_task_start
|
||||
runner_on_start <- level 3
|
||||
verbose <- jump to level 0
|
||||
verbose
|
||||
runner_on_ok <- jump back to level 3
|
||||
playbook_on_task_start
|
||||
runner_on_start
|
||||
runner_on_ok
|
||||
verbose
|
||||
verbose
|
||||
playbook_on_stats
|
||||
```
|
||||
|
||||
These verbose statements that fall in the middle of a series of children events
|
||||
are problematic for the UI.
|
||||
To help, this view will attempt to place the events into the hierarchy, without
|
||||
the event level jumps.
|
||||
|
||||
```
|
||||
playbook_on_start
|
||||
verbose
|
||||
playbook_on_play_start
|
||||
playbook_on_task_start
|
||||
runner_on_start <- A
|
||||
verbose <- this maps to the uuid of A
|
||||
verbose
|
||||
runner_on_ok
|
||||
playbook_on_task_start <- B
|
||||
runner_on_start
|
||||
runner_on_ok
|
||||
verbose <- this maps to the uuid of B
|
||||
verbose
|
||||
playbook_on_stats
|
||||
```
|
||||
|
||||
The output will include a JSON object where the key is the event counter,
|
||||
and the value is the assigned nested uuid.
|
||||
@@ -1,5 +1,5 @@
|
||||
Launch a Job Template:
|
||||
|
||||
{% ifmeth GET %}
|
||||
Make a GET request to this resource to determine if the job_template can be
|
||||
launched and whether any passwords are required to launch the job_template.
|
||||
The response will include the following fields:
|
||||
@@ -29,8 +29,8 @@ The response will include the following fields:
|
||||
* `inventory_needed_to_start`: Flag indicating the presence of an inventory
|
||||
associated with the job template. If not then one should be supplied when
|
||||
launching the job (boolean, read-only)
|
||||
|
||||
Make a POST request to this resource to launch the job_template. If any
|
||||
{% endifmeth %}
|
||||
{% ifmeth POST %}Make a POST request to this resource to launch the job_template. If any
|
||||
passwords, inventory, or extra variables (extra_vars) are required, they must
|
||||
be passed via POST data, with extra_vars given as a YAML or JSON string and
|
||||
escaped parentheses. If the `inventory_needed_to_start` is `True` then the
|
||||
@@ -41,3 +41,4 @@ are not provided, a 400 status code will be returned. If the job cannot be
|
||||
launched, a 405 status code will be returned. If the provided credential or
|
||||
inventory are not allowed to be used by the user, then a 403 status code will
|
||||
be returned.
|
||||
{% endifmeth %}
|
||||
23
awx/api/templates/instance_install_bundle/group_vars/all.yml
Normal file
23
awx/api/templates/instance_install_bundle/group_vars/all.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
receptor_user: awx
|
||||
receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
params: worker
|
||||
allowruntimeparams: true
|
||||
verifysignature: true
|
||||
custom_worksign_public_keyfile: receptor/work-public-key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
|
||||
receptor_protocol: 'tcp'
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_dependencies:
|
||||
- python39-pip
|
||||
{% verbatim %}
|
||||
podman_user: "{{ receptor_user }}"
|
||||
podman_group: "{{ receptor_group }}"
|
||||
{% endverbatim %}
|
||||
@@ -0,0 +1,20 @@
|
||||
{% verbatim %}
|
||||
---
|
||||
- hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create the receptor user
|
||||
user:
|
||||
name: "{{ receptor_user }}"
|
||||
shell: /bin/bash
|
||||
- name: Enable Copr repo for Receptor
|
||||
command: dnf copr enable ansible-awx/receptor -y
|
||||
- import_role:
|
||||
name: ansible.receptor.podman
|
||||
- import_role:
|
||||
name: ansible.receptor.setup
|
||||
- name: Install ansible-runner
|
||||
pip:
|
||||
name: ansible-runner
|
||||
executable: pip3.9
|
||||
{% endverbatim %}
|
||||
7
awx/api/templates/instance_install_bundle/inventory.yml
Normal file
7
awx/api/templates/instance_install_bundle/inventory.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
all:
|
||||
hosts:
|
||||
remote-execution:
|
||||
ansible_host: {{ instance.hostname }}
|
||||
ansible_user: <username> # user provided
|
||||
ansible_ssh_private_key_file: ~/.ssh/id_rsa
|
||||
@@ -0,0 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 1.1.0
|
||||
17
awx/api/urls/debug.py
Normal file
17
awx/api/urls/debug.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views.debug import (
|
||||
DebugRootView,
|
||||
TaskManagerDebugView,
|
||||
DependencyManagerDebugView,
|
||||
WorkflowManagerDebugView,
|
||||
)
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', DebugRootView.as_view(), name='debug'),
|
||||
re_path(r'^task_manager/$', TaskManagerDebugView.as_view(), name='task_manager'),
|
||||
re_path(r'^dependency_manager/$', DependencyManagerDebugView.as_view(), name='dependency_manager'),
|
||||
re_path(r'^workflow_manager/$', WorkflowManagerDebugView.as_view(), name='workflow_manager'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -3,7 +3,15 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import InstanceList, InstanceDetail, InstanceUnifiedJobsList, InstanceInstanceGroupsList, InstanceHealthCheck
|
||||
from awx.api.views import (
|
||||
InstanceList,
|
||||
InstanceDetail,
|
||||
InstanceUnifiedJobsList,
|
||||
InstanceInstanceGroupsList,
|
||||
InstanceHealthCheck,
|
||||
InstancePeersList,
|
||||
)
|
||||
from awx.api.views.instance_install_bundle import InstanceInstallBundle
|
||||
|
||||
|
||||
urls = [
|
||||
@@ -12,6 +20,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceUnifiedJobsList.as_view(), name='instance_unified_jobs_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -3,26 +3,28 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import (
|
||||
from awx.api.views.inventory import (
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
InventoryHostsList,
|
||||
InventoryGroupsList,
|
||||
InventoryRootGroupsList,
|
||||
InventoryVariableData,
|
||||
InventoryScriptView,
|
||||
InventoryTreeView,
|
||||
InventoryInventorySourcesList,
|
||||
InventoryInventorySourcesUpdate,
|
||||
InventoryActivityStreamList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryAdHocCommandsList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
InventoryInstanceGroupsList,
|
||||
InventoryLabelList,
|
||||
InventoryCopy,
|
||||
)
|
||||
from awx.api.views import (
|
||||
InventoryHostsList,
|
||||
InventoryGroupsList,
|
||||
InventoryInventorySourcesList,
|
||||
InventoryInventorySourcesUpdate,
|
||||
InventoryAdHocCommandsList,
|
||||
InventoryRootGroupsList,
|
||||
InventoryScriptView,
|
||||
InventoryTreeView,
|
||||
InventoryVariableData,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views.inventory import (
|
||||
InventoryUpdateEventsList,
|
||||
)
|
||||
from awx.api.views import (
|
||||
InventoryUpdateList,
|
||||
InventoryUpdateDetail,
|
||||
@@ -10,7 +13,6 @@ from awx.api.views import (
|
||||
InventoryUpdateStdout,
|
||||
InventoryUpdateNotificationsList,
|
||||
InventoryUpdateCredentialsList,
|
||||
InventoryUpdateEventsList,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
JobRelaunch,
|
||||
JobCreateSchedule,
|
||||
JobJobHostSummariesList,
|
||||
JobJobEventsChildrenSummary,
|
||||
JobJobEventsList,
|
||||
JobActivityStreamList,
|
||||
JobStdout,
|
||||
@@ -27,6 +28,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_host_summaries/$', JobJobHostSummariesList.as_view(), name='job_job_host_summaries_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_events/$', JobJobEventsList.as_view(), name='job_job_events_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_events/children_summary/$', JobJobEventsChildrenSummary.as_view(), name='job_job_events_children_summary'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', JobActivityStreamList.as_view(), name='job_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import LabelList, LabelDetail
|
||||
from awx.api.views.labels import LabelList, LabelDetail
|
||||
|
||||
|
||||
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]
|
||||
|
||||
@@ -10,7 +10,7 @@ from oauthlib import oauth2
|
||||
from oauth2_provider import views
|
||||
|
||||
from awx.main.models import RefreshToken
|
||||
from awx.api.views import ApiOAuthAuthorizationRootView
|
||||
from awx.api.views.root import ApiOAuthAuthorizationRootView
|
||||
|
||||
|
||||
class TokenView(views.TokenView):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import (
|
||||
from awx.api.views.organization import (
|
||||
OrganizationList,
|
||||
OrganizationDetail,
|
||||
OrganizationUsersList,
|
||||
@@ -14,7 +14,6 @@ from awx.api.views import (
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationCredentialList,
|
||||
OrganizationActivityStreamList,
|
||||
OrganizationNotificationTemplatesList,
|
||||
OrganizationNotificationTemplatesErrorList,
|
||||
@@ -25,8 +24,8 @@ from awx.api.views import (
|
||||
OrganizationGalaxyCredentialsList,
|
||||
OrganizationObjectRolesList,
|
||||
OrganizationAccessList,
|
||||
OrganizationApplicationList,
|
||||
)
|
||||
from awx.api.views import OrganizationCredentialList, OrganizationApplicationList
|
||||
|
||||
|
||||
urls = [
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList
|
||||
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList
|
||||
|
||||
|
||||
urls = [
|
||||
@@ -11,6 +11,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -2,17 +2,19 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from django.conf import settings
|
||||
from django.urls import include, re_path
|
||||
|
||||
from awx import MODE
|
||||
from awx.api.generics import LoggedLoginView, LoggedLogoutView
|
||||
from awx.api.views import (
|
||||
from awx.api.views.root import (
|
||||
ApiRootView,
|
||||
ApiV2RootView,
|
||||
ApiV2PingView,
|
||||
ApiV2ConfigView,
|
||||
ApiV2SubscriptionView,
|
||||
ApiV2AttachView,
|
||||
)
|
||||
from awx.api.views import (
|
||||
AuthView,
|
||||
UserMeList,
|
||||
DashboardView,
|
||||
@@ -28,8 +30,8 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
MeshVisualizer,
|
||||
)
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
|
||||
@@ -145,7 +147,12 @@ urlpatterns = [
|
||||
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
|
||||
re_path(r'^o/', include(oauth2_root_urls)),
|
||||
]
|
||||
if settings.SETTINGS_MODULE == 'awx.settings.development':
|
||||
if MODE == 'development':
|
||||
# Only include these if we are in the development environment
|
||||
from awx.api.swagger import SwaggerSchemaView
|
||||
|
||||
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
||||
|
||||
from awx.api.urls.debug import urls as debug_urls
|
||||
|
||||
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
|
||||
|
||||
|
||||
urlpatterns = [
|
||||
|
||||
@@ -10,6 +10,8 @@ from awx.api.views import (
|
||||
WorkflowJobNodeFailureNodesList,
|
||||
WorkflowJobNodeAlwaysNodesList,
|
||||
WorkflowJobNodeCredentialsList,
|
||||
WorkflowJobNodeLabelsList,
|
||||
WorkflowJobNodeInstanceGroupsList,
|
||||
)
|
||||
|
||||
|
||||
@@ -20,6 +22,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -11,6 +11,8 @@ from awx.api.views import (
|
||||
WorkflowJobTemplateNodeAlwaysNodesList,
|
||||
WorkflowJobTemplateNodeCredentialsList,
|
||||
WorkflowJobTemplateNodeCreateApproval,
|
||||
WorkflowJobTemplateNodeLabelsList,
|
||||
WorkflowJobTemplateNodeInstanceGroupsList,
|
||||
)
|
||||
|
||||
|
||||
@@ -21,6 +23,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
|
||||
]
|
||||
|
||||
|
||||
55
awx/api/validators.py
Normal file
55
awx/api/validators.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import re
|
||||
|
||||
from django.core.validators import RegexValidator, validate_ipv46_address
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
|
||||
class HostnameRegexValidator(RegexValidator):
|
||||
"""
|
||||
Fully validates a domain name that is compliant with norms in Linux/RHEL
|
||||
- Cannot start with a hyphen
|
||||
- Cannot begin with, or end with a "."
|
||||
- Cannot contain any whitespaces
|
||||
- Entire hostname is max 255 chars (including dots)
|
||||
- Each domain/label is between 1 and 63 characters, except top level domain, which must be at least 2 characters
|
||||
- Supports ipv4, ipv6, simple hostnames and FQDNs
|
||||
- Follows RFC 9210 (modern RFC 1123, 1178) requirements
|
||||
|
||||
Accepts an IP Address or Hostname as the argument
|
||||
"""
|
||||
|
||||
regex = '^[a-z0-9][-a-z0-9]*$|^([a-z0-9][-a-z0-9]{0,62}[.])*[a-z0-9][-a-z0-9]{1,62}$'
|
||||
flags = re.IGNORECASE
|
||||
|
||||
def __call__(self, value):
|
||||
regex_matches, err = self.__validate(value)
|
||||
invalid_input = regex_matches if self.inverse_match else not regex_matches
|
||||
if invalid_input:
|
||||
if err is None:
|
||||
err = ValidationError(self.message, code=self.code, params={"value": value})
|
||||
raise err
|
||||
|
||||
def __str__(self):
|
||||
return f"regex={self.regex}, message={self.message}, code={self.code}, inverse_match={self.inverse_match}, flags={self.flags}"
|
||||
|
||||
def __validate(self, value):
|
||||
|
||||
if ' ' in value:
|
||||
return False, ValidationError("whitespaces in hostnames are illegal")
|
||||
|
||||
"""
|
||||
If we have an IP address, try and validate it.
|
||||
"""
|
||||
try:
|
||||
validate_ipv46_address(value)
|
||||
return True, None
|
||||
except ValidationError:
|
||||
pass
|
||||
|
||||
"""
|
||||
By this point in the code, we probably have a simple hostname, FQDN or a strange hostname like "192.localhost.domain.101"
|
||||
"""
|
||||
if not self.regex.match(value):
|
||||
return False, ValidationError(f"illegal characters detected in hostname={value}. Please verify.")
|
||||
|
||||
return True, None
|
||||
@@ -22,6 +22,7 @@ from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Sum
|
||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||
from django.db.models.fields.related import ManyToManyField, ForeignKey
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
@@ -68,7 +69,7 @@ from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
CopyAPIView,
|
||||
DeleteLastUnattachLabelMixin,
|
||||
GenericCancelView,
|
||||
GenericAPIView,
|
||||
ListAPIView,
|
||||
ListCreateAPIView,
|
||||
@@ -85,6 +86,7 @@ from awx.api.generics import (
|
||||
SubListCreateAttachDetachAPIView,
|
||||
SubListDestroyAPIView,
|
||||
)
|
||||
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main import models
|
||||
from awx.main.utils import (
|
||||
@@ -93,7 +95,7 @@ from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
getattrd,
|
||||
get_pk_from_dict,
|
||||
schedule_task_manager,
|
||||
ScheduleWorkflowManager,
|
||||
ignore_inventory_computed_fields,
|
||||
)
|
||||
from awx.main.utils.encryption import encrypt_value
|
||||
@@ -115,66 +117,15 @@ from awx.api.metadata import RoleMetadata
|
||||
from awx.main.constants import ACTIVE_STATES, SURVEY_TYPE_MAPPING
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.api.views.mixin import (
|
||||
ControlledByScmMixin,
|
||||
InstanceGroupMembershipMixin,
|
||||
OrganizationCountsMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
UnifiedJobDeletionMixin,
|
||||
NoTruncateMixin,
|
||||
)
|
||||
from awx.api.views.organization import ( # noqa
|
||||
OrganizationList,
|
||||
OrganizationDetail,
|
||||
OrganizationInventoriesList,
|
||||
OrganizationUsersList,
|
||||
OrganizationAdminsList,
|
||||
OrganizationExecutionEnvironmentsList,
|
||||
OrganizationProjectsList,
|
||||
OrganizationJobTemplatesList,
|
||||
OrganizationWorkflowJobTemplatesList,
|
||||
OrganizationTeamsList,
|
||||
OrganizationActivityStreamList,
|
||||
OrganizationNotificationTemplatesList,
|
||||
OrganizationNotificationTemplatesAnyList,
|
||||
OrganizationNotificationTemplatesErrorList,
|
||||
OrganizationNotificationTemplatesStartedList,
|
||||
OrganizationNotificationTemplatesSuccessList,
|
||||
OrganizationNotificationTemplatesApprovalList,
|
||||
OrganizationInstanceGroupsList,
|
||||
OrganizationGalaxyCredentialsList,
|
||||
OrganizationAccessList,
|
||||
OrganizationObjectRolesList,
|
||||
)
|
||||
from awx.api.views.inventory import ( # noqa
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
InventoryUpdateEventsList,
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
InventoryActivityStreamList,
|
||||
InventoryInstanceGroupsList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryLabelList,
|
||||
InventoryCopy,
|
||||
)
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer # noqa
|
||||
from awx.api.views.root import ( # noqa
|
||||
ApiRootView,
|
||||
ApiOAuthAuthorizationRootView,
|
||||
ApiVersionRootView,
|
||||
ApiV2RootView,
|
||||
ApiV2PingView,
|
||||
ApiV2ConfigView,
|
||||
ApiV2SubscriptionView,
|
||||
ApiV2AttachView,
|
||||
)
|
||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver # noqa
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
from awx.main.utils import set_environ
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
@@ -359,12 +310,13 @@ class DashboardJobsGraphView(APIView):
|
||||
return Response(dashboard_data)
|
||||
|
||||
|
||||
class InstanceList(ListAPIView):
|
||||
class InstanceList(ListCreateAPIView):
|
||||
|
||||
name = _("Instances")
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
search_fields = ('hostname',)
|
||||
ordering = ('id',)
|
||||
|
||||
|
||||
class InstanceDetail(RetrieveUpdateAPIView):
|
||||
@@ -397,6 +349,17 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
||||
return qs
|
||||
|
||||
|
||||
class InstancePeersList(SubListAPIView):
|
||||
|
||||
name = _("Instance Peers")
|
||||
parent_model = models.Instance
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
parent_access = 'read'
|
||||
search_fields = {'hostname'}
|
||||
relationship = 'peers'
|
||||
|
||||
|
||||
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
name = _("Instance's Instance Groups")
|
||||
@@ -409,7 +372,15 @@ class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAtta
|
||||
if parent.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {parent.hostname}.")}
|
||||
if parent.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {parent.hostname}.")}
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node : {parent.hostname}.")}
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
res = self.is_valid_relation(parent, sub)
|
||||
if res:
|
||||
return res
|
||||
if sub.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME and parent.node_type == 'hybrid':
|
||||
return {'msg': _(f"Cannot disassociate hybrid instance {parent.hostname} from {sub.name}.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -421,8 +392,8 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(node_type='execution')
|
||||
# FIXME: For now, we don't have a good way of checking the health of a hop node.
|
||||
return super().get_queryset().exclude(node_type='hop')
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -431,40 +402,22 @@ class InstanceHealthCheck(GenericAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.health_check_pending:
|
||||
return Response({'msg': f"Health check was already in progress for {obj.hostname}."}, status=status.HTTP_200_OK)
|
||||
|
||||
if obj.node_type == 'execution':
|
||||
# Note: hop nodes are already excluded by the get_queryset method
|
||||
obj.health_check_started = now()
|
||||
obj.save(update_fields=['health_check_started'])
|
||||
if obj.node_type == models.Instance.Types.EXECUTION:
|
||||
from awx.main.tasks.system import execution_node_health_check
|
||||
|
||||
runner_data = execution_node_health_check(obj.hostname)
|
||||
obj.refresh_from_db()
|
||||
data = self.get_serializer(data=request.data).to_representation(obj)
|
||||
# Add in some extra unsaved fields
|
||||
for extra_field in ('transmit_timing', 'run_timing'):
|
||||
if extra_field in runner_data:
|
||||
data[extra_field] = runner_data[extra_field]
|
||||
execution_node_health_check.apply_async([obj.hostname])
|
||||
else:
|
||||
from awx.main.tasks.system import cluster_node_health_check
|
||||
|
||||
if settings.CLUSTER_HOST_ID == obj.hostname:
|
||||
cluster_node_health_check(obj.hostname)
|
||||
else:
|
||||
cluster_node_health_check.apply_async([obj.hostname], queue=obj.hostname)
|
||||
start_time = time.time()
|
||||
prior_check_time = obj.last_health_check
|
||||
while time.time() - start_time < 50.0:
|
||||
obj.refresh_from_db(fields=['last_health_check'])
|
||||
if obj.last_health_check != prior_check_time:
|
||||
break
|
||||
if time.time() - start_time < 1.0:
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
time.sleep(1.0)
|
||||
else:
|
||||
obj.mark_offline(errors=_('Health check initiated by user determined this instance to be unresponsive'))
|
||||
obj.refresh_from_db()
|
||||
data = self.get_serializer(data=request.data).to_representation(obj)
|
||||
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
return Response(
|
||||
{"error": f"Cannot run a health check on instances of type {obj.node_type}. Health checks can only be run on execution nodes."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
return Response({'msg': f"Health check is running for {obj.hostname}."}, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
class InstanceGroupList(ListCreateAPIView):
|
||||
@@ -511,7 +464,15 @@ class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetac
|
||||
if sub.node_type == 'control':
|
||||
return {'msg': _(f"Cannot change instance group membership of control-only node: {sub.hostname}.")}
|
||||
if sub.node_type == 'hop':
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node: {sub.hostname}.")}
|
||||
return {'msg': _(f"Cannot change instance group membership of hop node : {sub.hostname}.")}
|
||||
return None
|
||||
|
||||
def is_valid_removal(self, parent, sub):
|
||||
res = self.is_valid_relation(parent, sub)
|
||||
if res:
|
||||
return res
|
||||
if sub.node_type == 'hybrid' and parent.name == settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME:
|
||||
return {'msg': _(f"Cannot disassociate hybrid node {sub.hostname} from {parent.name}.")}
|
||||
return None
|
||||
|
||||
|
||||
@@ -520,6 +481,7 @@ class ScheduleList(ListCreateAPIView):
|
||||
name = _("Schedules")
|
||||
model = models.Schedule
|
||||
serializer_class = serializers.ScheduleSerializer
|
||||
ordering = ('id',)
|
||||
|
||||
|
||||
class ScheduleDetail(RetrieveUpdateDestroyAPIView):
|
||||
@@ -560,8 +522,7 @@ class ScheduleZoneInfo(APIView):
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def get(self, request):
|
||||
zones = [{'name': zone} for zone in models.Schedule.get_zoneinfo()]
|
||||
return Response(zones)
|
||||
return Response({'zones': models.Schedule.get_zoneinfo(), 'links': models.Schedule.get_zoneinfo_links()})
|
||||
|
||||
|
||||
class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
@@ -601,6 +562,19 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
|
||||
parent_model = models.Schedule
|
||||
|
||||
|
||||
class ScheduleLabelsList(LabelSubListCreateAttachDetachView):
|
||||
|
||||
parent_model = models.Schedule
|
||||
|
||||
|
||||
class ScheduleInstanceGroupList(SubListAttachDetachAPIView):
|
||||
|
||||
model = models.InstanceGroup
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.Schedule
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class ScheduleUnifiedJobsList(SubListAPIView):
|
||||
|
||||
model = models.UnifiedJob
|
||||
@@ -1004,20 +978,11 @@ class SystemJobEventsList(SubListAPIView):
|
||||
return job.get_event_queryset()
|
||||
|
||||
|
||||
class ProjectUpdateCancel(RetrieveAPIView):
|
||||
class ProjectUpdateCancel(GenericCancelView):
|
||||
|
||||
model = models.ProjectUpdate
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.ProjectUpdateCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class ProjectUpdateNotificationsList(SubListAPIView):
|
||||
|
||||
@@ -1658,7 +1623,7 @@ class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
return Response(dict(error=_(str(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1692,7 +1657,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
return qs
|
||||
|
||||
|
||||
class HostGroupsList(ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
'''the list of groups a host is directly a member of'''
|
||||
|
||||
model = models.Group
|
||||
@@ -1808,7 +1773,7 @@ class EnforceParentRelationshipMixin(object):
|
||||
return super(EnforceParentRelationshipMixin, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class GroupChildrenList(ControlledByScmMixin, EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupChildrenList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -1854,7 +1819,7 @@ class GroupPotentialChildrenList(SubListAPIView):
|
||||
return qs.exclude(pk__in=except_pks)
|
||||
|
||||
|
||||
class GroupHostsList(HostRelatedSearchMixin, ControlledByScmMixin, SubListCreateAttachDetachAPIView):
|
||||
class GroupHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
'''the list of hosts directly below a group'''
|
||||
|
||||
model = models.Host
|
||||
@@ -1918,7 +1883,7 @@ class GroupActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(group=parent) | Q(host__in=parent.hosts.all()))
|
||||
|
||||
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class GroupDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = models.Group
|
||||
serializer_class = serializers.GroupSerializer
|
||||
@@ -2256,6 +2221,8 @@ class InventorySourceUpdateView(RetrieveAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
serializer = self.get_serializer(instance=obj, data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
if obj.can_update:
|
||||
update = obj.update()
|
||||
if not update:
|
||||
@@ -2290,20 +2257,11 @@ class InventoryUpdateCredentialsList(SubListAPIView):
|
||||
relationship = 'credentials'
|
||||
|
||||
|
||||
class InventoryUpdateCancel(RetrieveAPIView):
|
||||
class InventoryUpdateCancel(GenericCancelView):
|
||||
|
||||
model = models.InventoryUpdate
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.InventoryUpdateCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryUpdateNotificationsList(SubListAPIView):
|
||||
|
||||
@@ -2365,10 +2323,13 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
for field, ask_field_name in modified_ask_mapping.items():
|
||||
if not getattr(obj, ask_field_name):
|
||||
data.pop(field, None)
|
||||
elif field == 'inventory':
|
||||
elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
|
||||
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
||||
elif field == 'credentials':
|
||||
data[field] = [cred.id for cred in obj.credentials.all()]
|
||||
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
|
||||
if field == 'instance_groups':
|
||||
data[field] = []
|
||||
continue
|
||||
data[field] = [item.id for item in getattr(obj, field).all()]
|
||||
else:
|
||||
data[field] = getattr(obj, field)
|
||||
return data
|
||||
@@ -2381,9 +2342,8 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
"""
|
||||
modern_data = data.copy()
|
||||
|
||||
id_fd = '{}_id'.format('inventory')
|
||||
if 'inventory' not in modern_data and id_fd in modern_data:
|
||||
modern_data['inventory'] = modern_data[id_fd]
|
||||
if 'inventory' not in modern_data and 'inventory_id' in modern_data:
|
||||
modern_data['inventory'] = modern_data['inventory_id']
|
||||
|
||||
# credential passwords were historically provided as top-level attributes
|
||||
if 'credential_passwords' not in modern_data:
|
||||
@@ -2703,28 +2663,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
|
||||
|
||||
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
|
||||
class JobTemplateLabelList(LabelSubListCreateAttachDetachView):
|
||||
|
||||
model = models.Label
|
||||
serializer_class = serializers.LabelSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'labels'
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
# If a label already exists in the database, attach it instead of erroring out
|
||||
# that it already exists
|
||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
||||
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
||||
if existing.exists():
|
||||
existing = existing[0]
|
||||
request.data['id'] = existing.id
|
||||
del request.data['name']
|
||||
del request.data['organization']
|
||||
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
|
||||
return Response(
|
||||
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
|
||||
|
||||
|
||||
class JobTemplateCallback(GenericAPIView):
|
||||
@@ -2950,6 +2891,22 @@ class WorkflowJobNodeCredentialsList(SubListAPIView):
|
||||
relationship = 'credentials'
|
||||
|
||||
|
||||
class WorkflowJobNodeLabelsList(SubListAPIView):
|
||||
|
||||
model = models.Label
|
||||
serializer_class = serializers.LabelSerializer
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = 'labels'
|
||||
|
||||
|
||||
class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = models.InstanceGroup
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class WorkflowJobTemplateNodeList(ListCreateAPIView):
|
||||
|
||||
model = models.WorkflowJobTemplateNode
|
||||
@@ -2968,6 +2925,19 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
|
||||
parent_model = models.WorkflowJobTemplateNode
|
||||
|
||||
|
||||
class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView):
|
||||
|
||||
parent_model = models.WorkflowJobTemplateNode
|
||||
|
||||
|
||||
class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = models.InstanceGroup
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.WorkflowJobTemplateNode
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.WorkflowJobTemplateNode
|
||||
@@ -3066,8 +3036,7 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobeNodes to the related nodes of specified by
|
||||
#'relationship'
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
@@ -3180,13 +3149,17 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView):
|
||||
data['extra_vars'] = extra_vars
|
||||
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
|
||||
modified_ask_mapping.pop('extra_vars')
|
||||
for field_name, ask_field_name in obj.get_ask_mapping().items():
|
||||
|
||||
for field, ask_field_name in modified_ask_mapping.items():
|
||||
if not getattr(obj, ask_field_name):
|
||||
data.pop(field_name, None)
|
||||
elif field_name == 'inventory':
|
||||
data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None)
|
||||
data.pop(field, None)
|
||||
elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
|
||||
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
||||
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
|
||||
data[field] = [item.id for item in getattr(obj, field).all()]
|
||||
else:
|
||||
data[field_name] = getattr(obj, field_name)
|
||||
data[field] = getattr(obj, field)
|
||||
|
||||
return data
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
@@ -3365,20 +3338,15 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
|
||||
|
||||
|
||||
class WorkflowJobCancel(RetrieveAPIView):
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
|
||||
model = models.WorkflowJob
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.WorkflowJobCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
schedule_task_manager()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
r = super().post(request, *args, **kwargs)
|
||||
ScheduleWorkflowManager().schedule()
|
||||
return r
|
||||
|
||||
|
||||
class WorkflowJobNotificationsList(SubListAPIView):
|
||||
@@ -3534,20 +3502,11 @@ class JobActivityStreamList(SubListAPIView):
|
||||
search_fields = ('changes',)
|
||||
|
||||
|
||||
class JobCancel(RetrieveAPIView):
|
||||
class JobCancel(GenericCancelView):
|
||||
|
||||
model = models.Job
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.JobCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class JobRelaunch(RetrieveAPIView):
|
||||
|
||||
@@ -3673,15 +3632,21 @@ class JobCreateSchedule(RetrieveAPIView):
|
||||
extra_data=config.extra_data,
|
||||
survey_passwords=config.survey_passwords,
|
||||
inventory=config.inventory,
|
||||
execution_environment=config.execution_environment,
|
||||
char_prompts=config.char_prompts,
|
||||
credentials=set(config.credentials.all()),
|
||||
labels=set(config.labels.all()),
|
||||
instance_groups=list(config.instance_groups.all()),
|
||||
)
|
||||
if not request.user.can_access(models.Schedule, 'add', schedule_data):
|
||||
raise PermissionDenied()
|
||||
|
||||
creds_list = schedule_data.pop('credentials')
|
||||
related_fields = ('credentials', 'labels', 'instance_groups')
|
||||
related = [schedule_data.pop(relationship) for relationship in related_fields]
|
||||
schedule = models.Schedule.objects.create(**schedule_data)
|
||||
schedule.credentials.add(*creds_list)
|
||||
for relationship, items in zip(related_fields, related):
|
||||
for item in items:
|
||||
getattr(schedule, relationship).add(item)
|
||||
|
||||
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
|
||||
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
|
||||
@@ -3823,7 +3788,113 @@ class JobJobEventsList(BaseJobEventsList):
|
||||
def get_queryset(self):
|
||||
job = self.get_parent_object()
|
||||
self.check_parent_access(job)
|
||||
return job.get_event_queryset().select_related('host').order_by('start_line')
|
||||
return job.get_event_queryset().prefetch_related('job__job_template', 'host').order_by('start_line')
|
||||
|
||||
|
||||
class JobJobEventsChildrenSummary(APIView):
|
||||
|
||||
renderer_classes = [JSONRenderer]
|
||||
meta_events = ('debug', 'verbose', 'warning', 'error', 'system_warning', 'deprecated')
|
||||
|
||||
def get(self, request, **kwargs):
|
||||
resp = dict(children_summary={}, meta_event_nested_uuid={}, event_processing_finished=False, is_tree=True)
|
||||
job = get_object_or_404(models.Job, pk=kwargs['pk'])
|
||||
if not job.event_processing_finished:
|
||||
return Response(resp)
|
||||
else:
|
||||
resp["event_processing_finished"] = True
|
||||
|
||||
events = list(job.get_event_queryset().values('counter', 'uuid', 'parent_uuid', 'event').order_by('counter'))
|
||||
if len(events) == 0:
|
||||
return Response(resp)
|
||||
|
||||
# key is counter, value is number of total children (including children of children, etc.)
|
||||
map_counter_children_tally = {i['counter']: {"rowNumber": 0, "numChildren": 0} for i in events}
|
||||
# key is uuid, value is counter
|
||||
map_uuid_counter = {i['uuid']: i['counter'] for i in events}
|
||||
# key is uuid, value is parent uuid. Used as a quick lookup
|
||||
map_uuid_puuid = {i['uuid']: i['parent_uuid'] for i in events}
|
||||
# key is counter of meta events (i.e. verbose), value is uuid of the assigned parent
|
||||
map_meta_counter_nested_uuid = {}
|
||||
|
||||
# collapsable tree view in the UI only makes sense for tree-like
|
||||
# hierarchy. If ansible is ran with a strategy like free or host_pinned, then
|
||||
# events can be out of sequential order, and no longer follow a tree structure
|
||||
# E1
|
||||
# E2
|
||||
# E3
|
||||
# E4 <- parent is E3
|
||||
# E5 <- parent is E1
|
||||
# in the above, there is no clear way to collapse E1, because E5 comes after
|
||||
# E3, which occurs after E1. Thus the tree view should be disabled.
|
||||
|
||||
# mark the last seen uuid at a given level (0-3)
|
||||
# if a parent uuid is not in this list, then we know the events are not tree-like
|
||||
# and return a response with is_tree: False
|
||||
level_current_uuid = [None, None, None, None]
|
||||
|
||||
prev_non_meta_event = events[0]
|
||||
for i, e in enumerate(events):
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
prev_non_meta_event = e
|
||||
if not e['uuid']:
|
||||
continue
|
||||
|
||||
if not e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
level = models.JobEvent.LEVEL_FOR_EVENT[e['event']]
|
||||
level_current_uuid[level] = e['uuid']
|
||||
# if setting level 1, for example, set levels 2 and 3 back to None
|
||||
for u in range(level + 1, len(level_current_uuid)):
|
||||
level_current_uuid[u] = None
|
||||
|
||||
puuid = e['parent_uuid']
|
||||
if puuid and puuid not in level_current_uuid:
|
||||
# improper tree detected, so bail out early
|
||||
resp['is_tree'] = False
|
||||
return Response(resp)
|
||||
|
||||
# if event is verbose (or debug, etc), we need to "assign" it a
|
||||
# parent. This code looks at the event level of the previous
|
||||
# non-verbose event, and the level of the next (by looking ahead)
|
||||
# non-verbose event. The verbose event is assigned the same parent
|
||||
# uuid of the higher level event.
|
||||
# e.g.
|
||||
# E1
|
||||
# E2
|
||||
# verbose
|
||||
# verbose <- we are on this event currently
|
||||
# E4
|
||||
# We'll compare E2 and E4, and the verbose event
|
||||
# will be assigned the parent uuid of E4 (higher event level)
|
||||
if e['event'] in JobJobEventsChildrenSummary.meta_events:
|
||||
event_level_before = models.JobEvent.LEVEL_FOR_EVENT[prev_non_meta_event['event']]
|
||||
# find next non meta event
|
||||
z = i
|
||||
next_non_meta_event = events[-1]
|
||||
while z < len(events):
|
||||
if events[z]['event'] not in JobJobEventsChildrenSummary.meta_events:
|
||||
next_non_meta_event = events[z]
|
||||
break
|
||||
z += 1
|
||||
event_level_after = models.JobEvent.LEVEL_FOR_EVENT[next_non_meta_event['event']]
|
||||
if event_level_after and event_level_after > event_level_before:
|
||||
puuid = next_non_meta_event['parent_uuid']
|
||||
else:
|
||||
puuid = prev_non_meta_event['parent_uuid']
|
||||
if puuid:
|
||||
map_meta_counter_nested_uuid[e['counter']] = puuid
|
||||
map_counter_children_tally[e['counter']]['rowNumber'] = i
|
||||
if not puuid:
|
||||
continue
|
||||
# now traverse up the parent, grandparent, etc. events and tally those
|
||||
while puuid:
|
||||
map_counter_children_tally[map_uuid_counter[puuid]]['numChildren'] += 1
|
||||
puuid = map_uuid_puuid.get(puuid, None)
|
||||
|
||||
# create new dictionary, dropping events with 0 children
|
||||
resp["children_summary"] = {k: v for k, v in map_counter_children_tally.items() if v['numChildren'] != 0}
|
||||
resp["meta_event_nested_uuid"] = map_meta_counter_nested_uuid
|
||||
return Response(resp)
|
||||
|
||||
|
||||
class AdHocCommandList(ListCreateAPIView):
|
||||
@@ -3906,20 +3977,11 @@ class AdHocCommandDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
serializer_class = serializers.AdHocCommandDetailSerializer
|
||||
|
||||
|
||||
class AdHocCommandCancel(RetrieveAPIView):
|
||||
class AdHocCommandCancel(GenericCancelView):
|
||||
|
||||
model = models.AdHocCommand
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.AdHocCommandCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class AdHocCommandRelaunch(GenericAPIView):
|
||||
|
||||
@@ -4054,20 +4116,11 @@ class SystemJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
serializer_class = serializers.SystemJobSerializer
|
||||
|
||||
|
||||
class SystemJobCancel(RetrieveAPIView):
|
||||
class SystemJobCancel(GenericCancelView):
|
||||
|
||||
model = models.SystemJob
|
||||
obj_permission_type = 'cancel'
|
||||
serializer_class = serializers.SystemJobCancelSerializer
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.can_cancel:
|
||||
obj.cancel()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
else:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
|
||||
|
||||
class SystemJobNotificationsList(SubListAPIView):
|
||||
|
||||
@@ -4306,18 +4359,6 @@ class NotificationDetail(RetrieveAPIView):
|
||||
serializer_class = serializers.NotificationSerializer
|
||||
|
||||
|
||||
class LabelList(ListCreateAPIView):
|
||||
|
||||
model = models.Label
|
||||
serializer_class = serializers.LabelSerializer
|
||||
|
||||
|
||||
class LabelDetail(RetrieveUpdateAPIView):
|
||||
|
||||
model = models.Label
|
||||
serializer_class = serializers.LabelSerializer
|
||||
|
||||
|
||||
class ActivityStreamList(SimpleListAPIView):
|
||||
|
||||
model = models.ActivityStream
|
||||
|
||||
68
awx/api/views/debug.py
Normal file
68
awx/api/views/debug.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.response import Response
|
||||
from awx.api.generics import APIView
|
||||
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
|
||||
|
||||
class TaskManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Task'
|
||||
|
||||
def get(self, request):
|
||||
TaskManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class DependencyManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Dependency'
|
||||
|
||||
def get(self, request):
|
||||
DependencyManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class WorkflowManagerDebugView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
prefix = 'Workflow'
|
||||
|
||||
def get(self, request):
|
||||
WorkflowManager().schedule()
|
||||
if not settings.AWX_DISABLE_TASK_MANAGERS:
|
||||
msg = f"Running {self.prefix} manager. To disable other triggers to the {self.prefix} manager, set AWX_DISABLE_TASK_MANAGERS to True"
|
||||
else:
|
||||
msg = f"AWX_DISABLE_TASK_MANAGERS is True, this view is the only way to trigger the {self.prefix} manager"
|
||||
return Response(msg)
|
||||
|
||||
|
||||
class DebugRootView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
|
||||
def get(self, request, format=None):
|
||||
'''List of available debug urls'''
|
||||
data = OrderedDict()
|
||||
data['task_manager'] = '/api/debug/task_manager/'
|
||||
data['dependency_manager'] = '/api/debug/dependency_manager/'
|
||||
data['workflow_manager'] = '/api/debug/workflow_manager/'
|
||||
return Response(data)
|
||||
199
awx/api/views/instance_install_bundle.py
Normal file
199
awx/api/views/instance_install_bundle.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import datetime
|
||||
import io
|
||||
import ipaddress
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
import asn1
|
||||
from awx.api import serializers
|
||||
from awx.api.generics import GenericAPIView, Response
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.main import models
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives import hashes, serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName
|
||||
from cryptography.x509.oid import NameOID
|
||||
from django.http import HttpResponse
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import status
|
||||
|
||||
# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
|
||||
RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||
|
||||
# generate install bundle for the instance
|
||||
# install bundle directory structure
|
||||
# ├── install_receptor.yml (playbook)
|
||||
# ├── inventory.yml
|
||||
# ├── group_vars
|
||||
# │ └── all.yml
|
||||
# ├── receptor
|
||||
# │ ├── tls
|
||||
# │ │ ├── ca
|
||||
# │ │ │ └── receptor-ca.crt
|
||||
# │ │ ├── receptor.crt
|
||||
# │ │ └── receptor.key
|
||||
# │ └── work-public-key.pem
|
||||
# └── requirements.yml
|
||||
class InstanceInstallBundle(GenericAPIView):
|
||||
|
||||
name = _('Install Bundle')
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
instance_obj = self.get_object()
|
||||
|
||||
if instance_obj.node_type not in ('execution',):
|
||||
return Response(
|
||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
with io.BytesIO() as f:
|
||||
with tarfile.open(fileobj=f, mode='w:gz') as tar:
|
||||
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
|
||||
tar.add(
|
||||
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
|
||||
)
|
||||
|
||||
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
|
||||
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
|
||||
|
||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||
key, cert = generate_receptor_tls(instance_obj)
|
||||
|
||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||
key_tarinfo.size = len(key)
|
||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||
|
||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||
cert_tarinfo.size = len(cert)
|
||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||
|
||||
# generate and write install_receptor.yml to the tar file
|
||||
playbook = generate_playbook().encode('utf-8')
|
||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||
playbook_tarinfo.size = len(playbook)
|
||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||
|
||||
# generate and write inventory.yml to the tar file
|
||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||
|
||||
# generate and write group_vars/all.yml to the tar file
|
||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||
group_vars_tarinfo.size = len(group_vars)
|
||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||
|
||||
# generate and write requirements.yml to the tar file
|
||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||
|
||||
# respond with the tarfile
|
||||
f.seek(0)
|
||||
response = HttpResponse(f.read(), status=status.HTTP_200_OK)
|
||||
response['Content-Disposition'] = f"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz"
|
||||
return response
|
||||
|
||||
|
||||
def generate_playbook():
|
||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||
|
||||
|
||||
def generate_requirements_yml():
|
||||
return render_to_string("instance_install_bundle/requirements.yml")
|
||||
|
||||
|
||||
def generate_inventory_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/inventory.yml", context=dict(instance=instance_obj))
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||
|
||||
|
||||
def generate_receptor_tls(instance_obj):
|
||||
# generate private key for the receptor
|
||||
key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
|
||||
|
||||
# encode receptor hostname to asn1
|
||||
hostname = instance_obj.hostname
|
||||
encoder = asn1.Encoder()
|
||||
encoder.start()
|
||||
encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)
|
||||
hostname_asn1 = encoder.output()
|
||||
|
||||
san_params = [
|
||||
DNSName(hostname),
|
||||
OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),
|
||||
]
|
||||
|
||||
try:
|
||||
san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))
|
||||
except ipaddress.AddressValueError:
|
||||
pass
|
||||
|
||||
# generate certificate for the receptor
|
||||
csr = (
|
||||
x509.CertificateSigningRequestBuilder()
|
||||
.subject_name(
|
||||
x509.Name(
|
||||
[
|
||||
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
|
||||
]
|
||||
)
|
||||
)
|
||||
.add_extension(
|
||||
x509.SubjectAlternativeName(san_params),
|
||||
critical=False,
|
||||
)
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
|
||||
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
|
||||
ca_key = serialization.load_pem_private_key(
|
||||
f.read(),
|
||||
password=None,
|
||||
)
|
||||
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
|
||||
ca_cert = x509.load_pem_x509_certificate(f.read())
|
||||
|
||||
cert = (
|
||||
x509.CertificateBuilder()
|
||||
.subject_name(csr.subject)
|
||||
.issuer_name(ca_cert.issuer)
|
||||
.public_key(csr.public_key())
|
||||
.serial_number(x509.random_serial_number())
|
||||
.not_valid_before(datetime.datetime.utcnow())
|
||||
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
|
||||
.add_extension(
|
||||
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
|
||||
critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
|
||||
)
|
||||
.sign(ca_key, hashes.SHA256())
|
||||
)
|
||||
|
||||
key = key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
|
||||
cert = cert.public_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
)
|
||||
|
||||
return key, cert
|
||||
@@ -18,8 +18,6 @@ from rest_framework import status
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
|
||||
from awx.main.models.label import Label
|
||||
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
@@ -27,9 +25,8 @@ from awx.api.generics import (
|
||||
SubListAttachDetachAPIView,
|
||||
ResourceAccessList,
|
||||
CopyAPIView,
|
||||
DeleteLastUnattachLabelMixin,
|
||||
SubListCreateAttachDetachAPIView,
|
||||
)
|
||||
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
|
||||
|
||||
from awx.api.serializers import (
|
||||
@@ -39,9 +36,8 @@ from awx.api.serializers import (
|
||||
InstanceGroupSerializer,
|
||||
InventoryUpdateEventSerializer,
|
||||
JobTemplateSerializer,
|
||||
LabelSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, ControlledByScmMixin
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
|
||||
|
||||
from awx.api.pagination import UnifiedJobEventPagination
|
||||
|
||||
@@ -75,7 +71,7 @@ class InventoryList(ListCreateAPIView):
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
@@ -157,28 +153,9 @@ class InventoryJobTemplateList(SubListAPIView):
|
||||
return qs.filter(inventory=parent)
|
||||
|
||||
|
||||
class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView):
|
||||
class InventoryLabelList(LabelSubListCreateAttachDetachView):
|
||||
|
||||
model = Label
|
||||
serializer_class = LabelSerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'labels'
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
# If a label already exists in the database, attach it instead of erroring out
|
||||
# that it already exists
|
||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
||||
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
||||
if existing.exists():
|
||||
existing = existing[0]
|
||||
request.data['id'] = existing.id
|
||||
del request.data['name']
|
||||
del request.data['organization']
|
||||
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
|
||||
return Response(
|
||||
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
return super(InventoryLabelList, self).post(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryCopy(CopyAPIView):
|
||||
|
||||
71
awx/api/views/labels.py
Normal file
71
awx/api/views/labels.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# AWX
|
||||
from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView
|
||||
from awx.main.models import Label
|
||||
from awx.api.serializers import LabelSerializer
|
||||
|
||||
# Django
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.status import HTTP_400_BAD_REQUEST
|
||||
|
||||
|
||||
class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
|
||||
"""
|
||||
For related labels lists like /api/v2/inventories/N/labels/
|
||||
|
||||
We want want the last instance to be deleted from the database
|
||||
when the last disassociate happens.
|
||||
|
||||
Subclasses need to define parent_model
|
||||
"""
|
||||
|
||||
model = Label
|
||||
serializer_class = LabelSerializer
|
||||
relationship = 'labels'
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
(sub_id, res) = super().unattach_validate(request)
|
||||
if res:
|
||||
return res
|
||||
|
||||
res = super().unattach_by_id(request, sub_id)
|
||||
|
||||
obj = self.model.objects.get(id=sub_id)
|
||||
|
||||
if obj.is_detached():
|
||||
obj.delete()
|
||||
|
||||
return res
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
# If a label already exists in the database, attach it instead of erroring out
|
||||
# that it already exists
|
||||
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
|
||||
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
|
||||
if existing.exists():
|
||||
existing = existing[0]
|
||||
request.data['id'] = existing.id
|
||||
del request.data['name']
|
||||
del request.data['organization']
|
||||
|
||||
# Give a 400 error if we have attached too many labels to this object
|
||||
label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name
|
||||
if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100:
|
||||
return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST)
|
||||
|
||||
return super().post(request, *args, **kwargs)
|
||||
|
||||
|
||||
class LabelDetail(RetrieveUpdateAPIView):
|
||||
|
||||
model = Label
|
||||
serializer_class = LabelSerializer
|
||||
|
||||
|
||||
class LabelList(ListCreateAPIView):
|
||||
|
||||
name = _("Labels")
|
||||
model = Label
|
||||
serializer_class = LabelSerializer
|
||||
@@ -10,13 +10,12 @@ from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import get_object_or_400, parse_yaml_or_json
|
||||
from awx.main.utils import get_object_or_400
|
||||
from awx.main.models.ha import Instance, InstanceGroup
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
@@ -68,49 +67,27 @@ class InstanceGroupMembershipMixin(object):
|
||||
membership.
|
||||
"""
|
||||
|
||||
def attach_validate(self, request):
|
||||
parent = self.get_parent_object()
|
||||
sub_id, res = super().attach_validate(request)
|
||||
if res: # handle an error
|
||||
return sub_id, res
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(parent, sub)
|
||||
if attach_errors:
|
||||
return sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
return sub_id, res
|
||||
|
||||
def attach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
instance_groups_queryset = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
ig_obj = get_object_or_400(instance_groups_queryset, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {self.lookup_field: self.kwargs.get(self.lookup_field, None)}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
ig_obj = get_object_or_404(instance_groups_queryset, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def unattach_validate(self, request):
|
||||
parent = self.get_parent_object()
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(parent, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
if status.is_success(response.status_code):
|
||||
@@ -120,13 +97,13 @@ class InstanceGroupMembershipMixin(object):
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
instance_groups_queryset = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
ig_obj = get_object_or_400(instance_groups_queryset, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {self.lookup_field: self.kwargs.get(self.lookup_field, None)}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
ig_obj = get_object_or_404(instance_groups_queryset, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
@@ -208,35 +185,6 @@ class OrganizationCountsMixin(object):
|
||||
return full_context
|
||||
|
||||
|
||||
class ControlledByScmMixin(object):
|
||||
"""
|
||||
Special method to reset SCM inventory commit hash
|
||||
if anything that it manages changes.
|
||||
"""
|
||||
|
||||
def _reset_inv_src_rev(self, obj):
|
||||
if self.request.method in SAFE_METHODS or not obj:
|
||||
return
|
||||
project_following_sources = obj.inventory_sources.filter(update_on_project_update=True, source='scm')
|
||||
if project_following_sources:
|
||||
# Allow inventory changes unrelated to variables
|
||||
if self.model == Inventory and (
|
||||
not self.request or not self.request.data or parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)
|
||||
):
|
||||
return
|
||||
project_following_sources.update(scm_last_revision='')
|
||||
|
||||
def get_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
def get_parent_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_parent_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
|
||||
class NoTruncateMixin(object):
|
||||
def get_serializer_context(self):
|
||||
context = super().get_serializer_context()
|
||||
|
||||
@@ -204,7 +204,7 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||
return h.hexdigest()
|
||||
|
||||
def get_event_status_api(self):
|
||||
if self.get_event_type() != 'Merge Request Hook':
|
||||
if self.get_event_type() not in self.ref_keys.keys():
|
||||
return
|
||||
project = self.request.data.get('project', {})
|
||||
repo_url = project.get('web_url')
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
@@ -31,7 +30,7 @@ from awx.conf.models import Setting
|
||||
|
||||
logger = logging.getLogger('awx.conf.settings')
|
||||
|
||||
SETTING_MEMORY_TTL = 5 if 'callback_receiver' in ' '.join(sys.argv) else 0
|
||||
SETTING_MEMORY_TTL = 5
|
||||
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
@@ -81,17 +80,16 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
yield
|
||||
except DBError as exc:
|
||||
if trans_safe:
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
level = logger.exception
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
level = logger.warning
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
@@ -235,6 +233,8 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
self.__dict__['_awx_conf_init_readonly'] = False
|
||||
self.__dict__['cache'] = EncryptedCacheProxy(cache, registry)
|
||||
self.__dict__['registry'] = registry
|
||||
self.__dict__['_awx_conf_memoizedcache'] = cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL)
|
||||
self.__dict__['_awx_conf_memoizedcache_lock'] = threading.Lock()
|
||||
|
||||
# record the current pid so we compare it post-fork for
|
||||
# processes like the dispatcher and callback receiver
|
||||
@@ -397,12 +397,20 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
def SETTINGS_MODULE(self):
|
||||
return self._get_default('SETTINGS_MODULE')
|
||||
|
||||
@cachetools.cached(cache=cachetools.TTLCache(maxsize=2048, ttl=SETTING_MEMORY_TTL))
|
||||
@cachetools.cachedmethod(
|
||||
cache=lambda self: self.__dict__['_awx_conf_memoizedcache'],
|
||||
key=lambda *args, **kwargs: SettingsWrapper.hashkey(*args, **kwargs),
|
||||
lock=lambda self: self.__dict__['_awx_conf_memoizedcache_lock'],
|
||||
)
|
||||
def _get_local_with_cache(self, name):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
if name in self.all_supported_settings:
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
value = self._get_local(name)
|
||||
value = self._get_local_with_cache(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
@@ -476,6 +484,23 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
|
||||
return set_locally or set_on_default
|
||||
|
||||
@classmethod
|
||||
def hashkey(cls, *args, **kwargs):
|
||||
"""
|
||||
Usage of @cachetools.cached has changed to @cachetools.cachedmethod
|
||||
The previous cachetools decorator called the hash function and passed in (self, key).
|
||||
The new cachtools decorator calls the hash function with just (key).
|
||||
Ideally, we would continue to pass self, however, the cachetools decorator interface
|
||||
does not allow us to.
|
||||
|
||||
This hashkey function is to maintain that the key generated looks like
|
||||
('<SettingsWrapper>', key). The thought is that maybe it is important to namespace
|
||||
our cache to the SettingsWrapper scope in case some other usage of this cache exists.
|
||||
I can not think of how any other system could and would use our private cache, but
|
||||
for safety sake we are ensuring the key schema does not change.
|
||||
"""
|
||||
return cachetools.keys.hashkey(f"<{cls.__name__}>", *args, **kwargs)
|
||||
|
||||
|
||||
def __getattr_without_cache__(self, name):
|
||||
# Django 1.10 added an optimization to settings lookup:
|
||||
|
||||
@@ -28,6 +28,9 @@ def handle_setting_change(key, for_delete=False):
|
||||
cache_keys = {Setting.get_cache_key(k) for k in setting_keys}
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
# if we have changed a setting, we want to avoid mucking with the in-memory cache entirely
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
# Send setting_changed signal with new value for each setting.
|
||||
for setting_key in setting_keys:
|
||||
setting_changed.send(sender=Setting, setting=setting_key, value=getattr(settings, setting_key, None), enter=not bool(for_delete))
|
||||
|
||||
@@ -8,6 +8,8 @@ import codecs
|
||||
from uuid import uuid4
|
||||
import time
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
@@ -299,3 +301,33 @@ def test_readonly_sensitive_cache_data_is_encrypted(settings):
|
||||
cache.set('AWX_ENCRYPTED', 'SECRET!')
|
||||
assert cache.get('AWX_ENCRYPTED') == 'SECRET!'
|
||||
assert native_cache.get('AWX_ENCRYPTED') == 'FRPERG!'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_only_for_registered_settings(settings):
|
||||
"Test that we only make use of the in-memory TTL cache for registered settings"
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.MIDDLEWARE
|
||||
assert len(settings._awx_conf_memoizedcache) == 0 # does not cache MIDDLEWARE
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR'] # because it is cached_property
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
assert len(settings._awx_conf_memoizedcache) == 1 # caches registered settings
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR='DEFAULT')
|
||||
def test_in_memory_cache_works(settings):
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
settings.registry.register('AWX_VAR', field_class=fields.CharField, category=_('System'), category_slug='system')
|
||||
settings._wrapped.__dict__['all_supported_settings'] = ['AWX_VAR']
|
||||
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('awx.conf.settings.SettingsWrapper._get_local', return_value='DEFAULT') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_called_once_with('AWX_VAR')
|
||||
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -8,8 +11,7 @@ msgstr ""
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: es \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Language: \n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
@@ -21,9 +23,7 @@ msgstr "Tiempo de inactividad fuerza desconexión"
|
||||
msgid ""
|
||||
"Number of seconds that a user is inactive before they will need to login "
|
||||
"again."
|
||||
msgstr ""
|
||||
"Número de segundos que un usuario es inactivo antes de que ellos vuelvan a "
|
||||
"conectarse de nuevo."
|
||||
msgstr "Número de segundos que un usuario es inactivo antes de que ellos vuelvan a conectarse de nuevo."
|
||||
|
||||
#: awx/api/conf.py:21 awx/api/conf.py:31 awx/api/conf.py:42 awx/api/conf.py:50
|
||||
#: awx/api/conf.py:70 awx/api/conf.py:85 awx/api/conf.py:96 awx/sso/conf.py:105
|
||||
@@ -45,9 +45,7 @@ msgstr "Número máximo de sesiones activas en simultáneo"
|
||||
msgid ""
|
||||
"Maximum number of simultaneous logged in sessions a user may have. To "
|
||||
"disable enter -1."
|
||||
msgstr ""
|
||||
"Número máximo de sesiones activas en simultáneo que un usuario puede tener. "
|
||||
"Para deshabilitar, introduzca -1."
|
||||
msgstr "Número máximo de sesiones activas en simultáneo que un usuario puede tener. Para deshabilitar, introduzca -1."
|
||||
|
||||
#: awx/api/conf.py:37
|
||||
msgid "Disable the built-in authentication system"
|
||||
@@ -58,10 +56,7 @@ msgid ""
|
||||
"Controls whether users are prevented from using the built-in authentication "
|
||||
"system. You probably want to do this if you are using an LDAP or SAML "
|
||||
"integration."
|
||||
msgstr ""
|
||||
"Controla si se impide que los usuarios utilicen el sistema de autenticación "
|
||||
"integrado. Probablemente desee hacer esto si está usando una integración de "
|
||||
"LDAP o SAML."
|
||||
msgstr "Controla si se impide que los usuarios utilicen el sistema de autenticación integrado. Probablemente desee hacer esto si está usando una integración de LDAP o SAML."
|
||||
|
||||
#: awx/api/conf.py:48
|
||||
msgid "Enable HTTP Basic Auth"
|
||||
@@ -83,13 +78,7 @@ msgid ""
|
||||
"authorization codes in the number of seconds, and "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after "
|
||||
"expired access tokens, in the number of seconds."
|
||||
msgstr ""
|
||||
"Diccionario para personalizar los tiempos de espera de OAuth2; los elementos "
|
||||
"disponibles son `ACCESS_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de "
|
||||
"acceso en cantidad de segundos; `AUTHORIZATION_CODE_EXPIRE_SECONDS`, la "
|
||||
"duración de los códigos de autorización en cantidad de segundos; y "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de actualización, "
|
||||
"después de los tokens de acceso expirados, en cantidad de segundos."
|
||||
msgstr "Diccionario para personalizar los tiempos de espera de OAuth2; los elementos disponibles son `ACCESS_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de acceso en cantidad de segundos; `AUTHORIZATION_CODE_EXPIRE_SECONDS`, la duración de los códigos de autorización en cantidad de segundos; y `REFRESH_TOKEN_EXPIRE_SECONDS`, la duración de los tokens de actualización, después de los tokens de acceso expirados, en cantidad de segundos."
|
||||
|
||||
#: awx/api/conf.py:78
|
||||
msgid "Allow External Users to Create OAuth2 Tokens"
|
||||
@@ -101,11 +90,7 @@ msgid ""
|
||||
"Radius, and others) are not allowed to create OAuth2 tokens. To change this "
|
||||
"behavior, enable this setting. Existing tokens will not be deleted when this "
|
||||
"setting is toggled off."
|
||||
msgstr ""
|
||||
"Por motivos de seguridad, los usuarios de proveedores de autenticación "
|
||||
"externos (LDAP, SAML, SSO, Radius y otros) no tienen permitido crear tokens "
|
||||
"de OAuth2. Habilite este ajuste para cambiar este comportamiento. Los tokens "
|
||||
"existentes no se eliminarán cuando se desactive este ajuste."
|
||||
msgstr "Por motivos de seguridad, los usuarios de proveedores de autenticación externos (LDAP, SAML, SSO, Radius y otros) no tienen permitido crear tokens de OAuth2. Habilite este ajuste para cambiar este comportamiento. Los tokens existentes no se eliminarán cuando se desactive este ajuste."
|
||||
|
||||
#: awx/api/conf.py:94
|
||||
msgid "Login redirect override URL"
|
||||
@@ -115,10 +100,7 @@ msgstr "URL de invalidación de redireccionamiento de inicio de sesión"
|
||||
msgid ""
|
||||
"URL to which unauthorized users will be redirected to log in. If blank, "
|
||||
"users will be sent to the login page."
|
||||
msgstr ""
|
||||
"URL a la que los usuarios no autorizados serán redirigidos para iniciar "
|
||||
"sesión. Si está en blanco, los usuarios serán enviados a la página de inicio "
|
||||
"de sesión."
|
||||
msgstr "URL a la que los usuarios no autorizados serán redirigidos para iniciar sesión. Si está en blanco, los usuarios serán enviados a la página de inicio de sesión."
|
||||
|
||||
#: awx/api/conf.py:114
|
||||
msgid "There are no remote authentication systems configured."
|
||||
@@ -167,17 +149,13 @@ msgstr "ID{field_name} no válido: {field_id}"
|
||||
msgid ""
|
||||
"Cannot apply role_level filter to this list because its model does not use "
|
||||
"roles for access control."
|
||||
msgstr ""
|
||||
"No se puede aplicar el filtro role_level a esta lista debido a que su modelo "
|
||||
"no usa roles para el control de acceso."
|
||||
msgstr "No se puede aplicar el filtro role_level a esta lista debido a que su modelo no usa roles para el control de acceso."
|
||||
|
||||
#: awx/api/generics.py:179
|
||||
msgid ""
|
||||
"You did not use correct Content-Type in your HTTP request. If you are using "
|
||||
"our REST API, the Content-Type must be application/json"
|
||||
msgstr ""
|
||||
"No utilizó el Tipo de contenido correcto en su solicitud HTTP. Si está "
|
||||
"usando nuestra API REST, el Tipo de contenido debe ser aplicación/json."
|
||||
msgstr "No utilizó el Tipo de contenido correcto en su solicitud HTTP. Si está usando nuestra API REST, el Tipo de contenido debe ser aplicación/json."
|
||||
|
||||
#: awx/api/generics.py:220
|
||||
msgid " To establish a login session, visit"
|
||||
@@ -223,9 +201,7 @@ msgstr "Estructura de datos con URLs de recursos relacionados."
|
||||
msgid ""
|
||||
"Data structure with name/description for related resources. The output for "
|
||||
"some objects may be limited for performance reasons."
|
||||
msgstr ""
|
||||
"Estructura de datos con nombre/descripción de los recursos relacionados. La "
|
||||
"salida de algunos objetos puede estar limitada por motivos de rendimiento."
|
||||
msgstr "Estructura de datos con nombre/descripción de los recursos relacionados. La salida de algunos objetos puede estar limitada por motivos de rendimiento."
|
||||
|
||||
#: awx/api/metadata.py:75
|
||||
msgid "Timestamp when this {} was created."
|
||||
@@ -248,17 +224,14 @@ msgstr "Error de análisis JSON; no es un objeto JSON"
|
||||
msgid ""
|
||||
"JSON parse error - %s\n"
|
||||
"Possible cause: trailing comma."
|
||||
msgstr ""
|
||||
"Error de análisis JSON - %s\n"
|
||||
msgstr "Error de análisis JSON - %s\n"
|
||||
"Posible causa: coma final."
|
||||
|
||||
#: awx/api/serializers.py:205
|
||||
msgid ""
|
||||
"The original object is already named {}, a copy from it cannot have the same "
|
||||
"name."
|
||||
msgstr ""
|
||||
"El objeto original ya tiene el nombre {}, por lo que una copia de este no "
|
||||
"puede tener el mismo nombre."
|
||||
msgstr "El objeto original ya tiene el nombre {}, por lo que una copia de este no puede tener el mismo nombre."
|
||||
|
||||
#: awx/api/serializers.py:334
|
||||
#, python-format
|
||||
@@ -301,9 +274,7 @@ msgstr "Plantilla de trabajo"
|
||||
msgid ""
|
||||
"Indicates whether all of the events generated by this unified job have been "
|
||||
"saved to the database."
|
||||
msgstr ""
|
||||
"Indica si todos los eventos generados por esta tarea unificada se guardaron "
|
||||
"en la base de datos."
|
||||
msgstr "Indica si todos los eventos generados por esta tarea unificada se guardaron en la base de datos."
|
||||
|
||||
#: awx/api/serializers.py:939
|
||||
msgid "Write-only field used to change the password."
|
||||
@@ -324,8 +295,7 @@ msgstr "No se puede cambiar %s en el usuario gestionado por LDAP."
|
||||
|
||||
#: awx/api/serializers.py:1153
|
||||
msgid "Must be a simple space-separated string with allowed scopes {}."
|
||||
msgstr ""
|
||||
"Debe ser una cadena simple separada por espacios con alcances permitidos {}."
|
||||
msgstr "Debe ser una cadena simple separada por espacios con alcances permitidos {}."
|
||||
|
||||
#: awx/api/serializers.py:1238
|
||||
msgid "Authorization Grant Type"
|
||||
@@ -378,9 +348,7 @@ msgstr "SCM track_submodules solo puede usarse con proyectos git."
|
||||
msgid ""
|
||||
"Only Container Registry credentials can be associated with an Execution "
|
||||
"Environment"
|
||||
msgstr ""
|
||||
"Solo las credenciales del registro de contenedores pueden asociarse a un "
|
||||
"entorno de ejecución"
|
||||
msgstr "Solo las credenciales del registro de contenedores pueden asociarse a un entorno de ejecución"
|
||||
|
||||
#: awx/api/serializers.py:1440
|
||||
msgid "Cannot change the organization of an execution environment"
|
||||
@@ -390,9 +358,7 @@ msgstr "No se puede modificar la organización de un entorno de ejecución"
|
||||
msgid ""
|
||||
"One or more job templates depend on branch override behavior for this "
|
||||
"project (ids: {})."
|
||||
msgstr ""
|
||||
"Una o más plantillas de trabajo dependen del comportamiento de invalidación "
|
||||
"de ramas para este proyecto (ids: {})."
|
||||
msgstr "Una o más plantillas de trabajo dependen del comportamiento de invalidación de ramas para este proyecto (ids: {})."
|
||||
|
||||
#: awx/api/serializers.py:1530
|
||||
msgid "Update options must be set to false for manual projects."
|
||||
@@ -406,9 +372,7 @@ msgstr "Colección de playbooks disponibles dentro de este proyecto."
|
||||
msgid ""
|
||||
"Array of inventory files and directories available within this project, not "
|
||||
"comprehensive."
|
||||
msgstr ""
|
||||
"Colección de archivos de inventario y directorios disponibles dentro de este "
|
||||
"proyecto, no global."
|
||||
msgstr "Colección de archivos de inventario y directorios disponibles dentro de este proyecto, no global."
|
||||
|
||||
#: awx/api/serializers.py:1599 awx/api/serializers.py:3098
|
||||
#: awx/api/serializers.py:3311
|
||||
@@ -560,7 +524,7 @@ msgstr "Playbook no encontrado para el proyecto."
|
||||
|
||||
#: awx/api/serializers.py:2842
|
||||
msgid "Must select playbook for project."
|
||||
msgstr "Debe seleccionar un playbook para el proyecto"
|
||||
msgstr "Debe seleccionar un playbook para el proyecto."
|
||||
|
||||
#: awx/api/serializers.py:2844 awx/api/serializers.py:2846
|
||||
msgid "Project does not allow overriding branch."
|
||||
@@ -893,8 +857,7 @@ msgid "Containerized instances may not be managed via the API"
|
||||
msgstr "Las instancias contenedorizadas no pueden ser gestionadas a través de la API."
|
||||
|
||||
#: awx/api/serializers.py:4919 awx/api/serializers.py:4922
|
||||
#, fuzzy, python-format
|
||||
#| msgid "tower instance group name may not be changed."
|
||||
#, python-format
|
||||
msgid "%s instance group name may not be changed."
|
||||
msgstr "El nombre del grupo de instancia %s no puede modificarse."
|
||||
|
||||
@@ -1049,8 +1012,6 @@ msgid ""
|
||||
msgstr "No puede asignar credenciales de acceso a un equipo cuando el campo de organización no está establecido o pertenezca a una organización diferente."
|
||||
|
||||
#: awx/api/views/__init__.py:720
|
||||
#, fuzzy
|
||||
#| msgid "The instance that managed the execution environment."
|
||||
msgid "Only the 'pull' field can be edited for managed execution environments."
|
||||
msgstr "Sólo se puede editar el campo \"pull\" para los entornos de ejecución gestionados."
|
||||
|
||||
@@ -2087,8 +2048,6 @@ msgid "Unique identifier for an installation"
|
||||
msgstr "Identificador único para una instalación"
|
||||
|
||||
#: awx/main/conf.py:183
|
||||
#, fuzzy
|
||||
#| msgid "The Instance group the job was run under"
|
||||
msgid "The instance group where control plane tasks run"
|
||||
msgstr "Grupo de instancias donde se ejecutan las tareas del plano de control"
|
||||
|
||||
@@ -2796,8 +2755,6 @@ msgid "The identifier for the secret e.g., /some/identifier"
|
||||
msgstr "El identificador para el secreto; por ejemplo, /some/identifier"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:12
|
||||
#, fuzzy
|
||||
#| msgid "Tenant ID"
|
||||
msgid "Tenant"
|
||||
msgstr "Inquilino"
|
||||
|
||||
@@ -2816,8 +2773,6 @@ msgid ""
|
||||
msgstr "El TLD del inquilino, por ejemplo, \"com\" cuando la URL es https://ex.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:34
|
||||
#, fuzzy
|
||||
#| msgid "Secret Name"
|
||||
msgid "Secret Path"
|
||||
msgstr "Ruta secreta"
|
||||
|
||||
@@ -2826,8 +2781,6 @@ msgid "The secret path e.g. /test/secret1"
|
||||
msgstr "La ruta secreta, por ejemplo, /test/secret1"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:46
|
||||
#, fuzzy
|
||||
#| msgid "Job Template"
|
||||
msgid "URL template"
|
||||
msgstr "Plantilla URL"
|
||||
|
||||
@@ -2960,8 +2913,6 @@ msgid ""
|
||||
msgstr "Principales válidos (ya sea nombres de usuario o nombres de host) para los que se debería firmar el certificado:"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:10
|
||||
#, fuzzy
|
||||
#| msgid "Auth Server URL"
|
||||
msgid "Secret Server URL"
|
||||
msgstr "URL del servidor secreto"
|
||||
|
||||
@@ -2972,8 +2923,6 @@ msgid ""
|
||||
msgstr "La URL base del servidor secreto, por ejemplo, https://myserver/SecretServer o https://mytenant.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:17
|
||||
#, fuzzy
|
||||
#| msgid "Red Hat customer username"
|
||||
msgid "The (Application) user username"
|
||||
msgstr "El nombre de usuario (de la aplicación)"
|
||||
|
||||
@@ -2995,20 +2944,14 @@ msgid "The corresponding password"
|
||||
msgstr "La contraseña correspondiente"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:31
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret ID"
|
||||
msgstr "Identificación secreta"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:32
|
||||
#, fuzzy
|
||||
#| msgid "The name of the secret to look up."
|
||||
msgid "The integer ID of the secret"
|
||||
msgstr "El ID entero del secreto"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:37
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret Field"
|
||||
msgstr "Campo secreto"
|
||||
|
||||
@@ -3568,22 +3511,14 @@ msgstr "Ruta de archivo absoluta al archivo CA por usar (opcional)"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1023
|
||||
#: awx/main/models/credential/__init__.py:1029 awx/main/models/inventory.py:813
|
||||
#, fuzzy
|
||||
#| msgid "Gather data for Insights for Ansible Automation Platform"
|
||||
msgid "Red Hat Ansible Automation Platform"
|
||||
msgstr "Plataforma Red Hat Ansible Automation"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1031
|
||||
#, fuzzy
|
||||
#| msgid "The Ansible Tower base URL to authenticate with."
|
||||
msgid "Red Hat Ansible Automation Platform base URL to authenticate with."
|
||||
msgstr "URL base de Red Hat Ansible Automation Platform para autenticar."
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1038
|
||||
#, fuzzy
|
||||
#| msgid ""
|
||||
#| "The Ansible Tower user to authenticate as.This should not be set if an "
|
||||
#| "OAuth token is being used."
|
||||
msgid ""
|
||||
"Red Hat Ansible Automation Platform username id to authenticate as.This "
|
||||
"should not be set if an OAuth token is being used."
|
||||
@@ -4488,7 +4423,7 @@ msgstr "El número de segundos desde de que la última actualización del proyec
|
||||
msgid ""
|
||||
"Allow changing the SCM branch or revision in a job template that uses this "
|
||||
"project."
|
||||
msgstr "Permitir el cambio de la rama o revisión del SCM en una plantilla de trabajo que utilice este proyecto."
|
||||
msgstr "Permitir el cambio de la rama o revisión de SCM en una plantilla de trabajo que utilice este proyecto."
|
||||
|
||||
#: awx/main/models/projects.py:294
|
||||
msgid "The last revision fetched by a project update"
|
||||
@@ -4925,7 +4860,7 @@ msgid "Exception connecting to PagerDuty: {}"
|
||||
msgstr "Excepción conectando a PagerDuty: {}"
|
||||
|
||||
#: awx/main/notifications/pagerduty_backend.py:87
|
||||
#: awx/main/notifications/slack_backend.py:48
|
||||
#: awx/main/notifications/slack_backend.py:49
|
||||
#: awx/main/notifications/twilio_backend.py:47
|
||||
msgid "Exception sending messages: {}"
|
||||
msgstr "Excepción enviando mensajes: {}"
|
||||
@@ -5181,7 +5116,7 @@ msgstr "Al menos un certificado es necesario."
|
||||
msgid ""
|
||||
"At least %(min_certs)d certificates are required, only %(cert_count)d "
|
||||
"provided."
|
||||
msgstr "Se requieren al menos %(min_certs)d certificados, sólo %(cert_count)d proporcionados."
|
||||
msgstr "Al menos %(min_certs)d certificados son necesarios, solo se proporcionó %(cert_count)d."
|
||||
|
||||
#: awx/main/validators.py:152
|
||||
#, python-format
|
||||
@@ -5195,8 +5130,7 @@ msgid ""
|
||||
msgstr "No se permiten más de %(max_certs)d certificados, %(cert_count)d proporcionado."
|
||||
|
||||
#: awx/main/validators.py:289
|
||||
#, fuzzy, python-brace-format
|
||||
#| msgid "The container image to be used for execution."
|
||||
#, python-brace-format
|
||||
msgid "The container image name {value} is not valid"
|
||||
msgstr "El nombre de la imagen del contenedor {value} no es válido"
|
||||
|
||||
@@ -5666,17 +5600,17 @@ msgstr "La clave secreta OAuth2 (Clave secreta del cliente) from your GitHub org
|
||||
|
||||
#: awx/sso/conf.py:751
|
||||
msgid "GitHub Organization Name"
|
||||
msgstr "Nombre de la organización de GitHub"
|
||||
msgstr "Nombre para la organización GitHub"
|
||||
|
||||
#: awx/sso/conf.py:752
|
||||
msgid ""
|
||||
"The name of your GitHub organization, as used in your organization's URL: "
|
||||
"https://github.com/<yourorg>/."
|
||||
msgstr "El nombre de su organización de GitHub, como se utiliza en la URL de su organización: https://github.com/<votreorg>/."
|
||||
msgstr "El nombre de su organización de GitHub, como se utiliza en la URL de su organización: https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:762
|
||||
msgid "GitHub Organization OAuth2 Organization Map"
|
||||
msgstr "Mapa de organización de OAuth2 de la organización de GitHub"
|
||||
msgstr "Mapa de organización OAuth2 para organizaciones GitHub"
|
||||
|
||||
#: awx/sso/conf.py:774
|
||||
msgid "GitHub Organization OAuth2 Team Map"
|
||||
@@ -5684,7 +5618,7 @@ msgstr "Mapa de equipos OAuth2 para equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:790
|
||||
msgid "GitHub Team OAuth2 Callback URL"
|
||||
msgstr "URL de devolución de OAuth2 del equipo de GitHub"
|
||||
msgstr "URL callback OAuth2 para los equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:792 awx/sso/conf.py:1060
|
||||
msgid ""
|
||||
@@ -5692,12 +5626,12 @@ msgid ""
|
||||
"<yourorg>/settings/applications and obtain an OAuth2 key (Client ID) and "
|
||||
"secret (Client Secret). Provide this URL as the callback URL for your "
|
||||
"application."
|
||||
msgstr "Cree una aplicación propiedad de la organización en https://github.com/organizations/<votreorg>/settings/applications y obtenga una clave de OAuth2 (ID del cliente) y secreta (clave secreta de cliente). Proporcione esta URL como URL de devolución para su aplicación."
|
||||
msgstr "Cree una aplicación propiedad de la organización en https://github.com/organizations/<yourorg>/settings/applications y obtenga una clave de OAuth2 (ID del cliente) y secreta (clave secreta de cliente). Proporcione esta URL como URL de devolución para su aplicación."
|
||||
|
||||
#: awx/sso/conf.py:797 awx/sso/conf.py:809 awx/sso/conf.py:820
|
||||
#: awx/sso/conf.py:832 awx/sso/conf.py:843 awx/sso/conf.py:855
|
||||
msgid "GitHub Team OAuth2"
|
||||
msgstr "OAuth2 de equipo de GitHub"
|
||||
msgstr "OAuth2 para equipos GitHub"
|
||||
|
||||
#: awx/sso/conf.py:807
|
||||
msgid "GitHub Team OAuth2 Key"
|
||||
@@ -5828,7 +5762,7 @@ msgstr "Nombre de la organización de GitHub Enterprise"
|
||||
msgid ""
|
||||
"The name of your GitHub Enterprise organization, as used in your "
|
||||
"organization's URL: https://github.com/<yourorg>/."
|
||||
msgstr "El nombre de su organización de GitHub Enterprise, como se utiliza en la URL de su organización: https://github.com/<votreorg>/."
|
||||
msgstr "El nombre de su organización de GitHub Enterprise, como se utiliza en la URL de su organización: https://github.com/<yourorg>/."
|
||||
|
||||
#: awx/sso/conf.py:1030
|
||||
msgid "GitHub Enterprise Organization OAuth2 Organization Map"
|
||||
@@ -6303,68 +6237,4 @@ msgstr "%s se está actualizando."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
#~ msgid "SSLError while trying to connect to {}"
|
||||
#~ msgstr "SSLError al intentar conectarse a {}"
|
||||
|
||||
#~ msgid "Request to {} timed out."
|
||||
#~ msgstr "El tiempo de solicitud {} caducó."
|
||||
|
||||
#~ msgid "Unknown exception {} while trying to GET {}"
|
||||
#~ msgstr "Excepción desconocida {} al intentar usar GET {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Unauthorized access. Please check your Insights Credential username and "
|
||||
#~ "password."
|
||||
#~ msgstr ""
|
||||
#~ "Acceso no autorizado. Verifique su nombre de usuario y contraseña de "
|
||||
#~ "Insights."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Failed to access the Insights API at URL {}. Server responded with {} "
|
||||
#~ "status code and message {}"
|
||||
#~ msgstr ""
|
||||
#~ "No se pudo acceder a la API de Insights en la URL {}. El servidor "
|
||||
#~ "respondió con el código de estado {} y el mensaje {}"
|
||||
|
||||
#~ msgid "Expected JSON response from Insights at URL {} but instead got {}"
|
||||
#~ msgstr ""
|
||||
#~ "Respuesta JSON esperada de Insights en la URL {}; en cambio, se recibió {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Could not translate Insights system ID {} into an Insights platform ID."
|
||||
#~ msgstr ""
|
||||
#~ "No se pudo traducir el ID del sistema Insights {} en un ID de plataforma "
|
||||
#~ "de Insights."
|
||||
|
||||
#~ msgid "This host is not recognized as an Insights host."
|
||||
#~ msgstr "Este host no se reconoce como un host de Insights."
|
||||
|
||||
#~ msgid "The Insights Credential for \"{}\" was not found."
|
||||
#~ msgstr "No se encontró la credencial de Insights para \"{}\"."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "The path to the secret stored in the secret backend e.g, /some/secret/"
|
||||
#~ msgstr ""
|
||||
#~ "La ruta al secreto almacenado en el backend de secretos; por ejemplo, /"
|
||||
#~ "some/secret/"
|
||||
|
||||
#~ msgid "Ansible Tower"
|
||||
#~ msgstr "Ansible Tower"
|
||||
|
||||
#~ msgid "Ansible Tower Hostname"
|
||||
#~ msgstr "Nombre de host de Ansible Tower"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Credentials to be used by hosts belonging to this inventory when "
|
||||
#~ "accessing Red Hat Insights API."
|
||||
#~ msgstr ""
|
||||
#~ "Credenciales que utilizarán los hosts que pertenecen a este inventario "
|
||||
#~ "cuando accedan a la API de Red Hat Insights."
|
||||
|
||||
#~ msgid "Assignment not allowed for Smart Inventory"
|
||||
#~ msgstr "Tarea no permitida para el inventario inteligente"
|
||||
|
||||
#~ msgid "Red Hat Insights host unique identifier."
|
||||
#~ msgstr "Identificador único de host de Red Hat Insights."
|
||||
msgstr "Esta página se actualizará cuando se complete."
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
6240
awx/locale/ko/LC_MESSAGES/django.po
Normal file
6240
awx/locale/ko/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,6 @@
|
||||
# SOME DESCRIPTIVE TITLE.
|
||||
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
|
||||
# This file is distributed under the same license as the PACKAGE package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
|
||||
#
|
||||
msgid ""
|
||||
@@ -8,8 +11,7 @@ msgstr ""
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: nl \n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Language: \n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
|
||||
@@ -21,9 +23,7 @@ msgstr "Niet-actieve tijd voor forceren van afmelding"
|
||||
msgid ""
|
||||
"Number of seconds that a user is inactive before they will need to login "
|
||||
"again."
|
||||
msgstr ""
|
||||
"Maximumaantal seconden dat een gebruiker niet-actief is voordat deze zich "
|
||||
"opnieuw moet aanmelden."
|
||||
msgstr "Maximumaantal seconden dat een gebruiker niet-actief is voordat deze zich opnieuw moet aanmelden."
|
||||
|
||||
#: awx/api/conf.py:21 awx/api/conf.py:31 awx/api/conf.py:42 awx/api/conf.py:50
|
||||
#: awx/api/conf.py:70 awx/api/conf.py:85 awx/api/conf.py:96 awx/sso/conf.py:105
|
||||
@@ -45,9 +45,7 @@ msgstr "Maximumaantal gelijktijdige aangemelde sessies"
|
||||
msgid ""
|
||||
"Maximum number of simultaneous logged in sessions a user may have. To "
|
||||
"disable enter -1."
|
||||
msgstr ""
|
||||
"Maximumaantal gelijktijdige aangemelde sessies dat een gebruiker kan hebben. "
|
||||
"Voer -1 in om dit uit te schakelen."
|
||||
msgstr "Maximumaantal gelijktijdige aangemelde sessies dat een gebruiker kan hebben. Voer -1 in om dit uit te schakelen."
|
||||
|
||||
#: awx/api/conf.py:37
|
||||
msgid "Disable the built-in authentication system"
|
||||
@@ -58,10 +56,7 @@ msgid ""
|
||||
"Controls whether users are prevented from using the built-in authentication "
|
||||
"system. You probably want to do this if you are using an LDAP or SAML "
|
||||
"integration."
|
||||
msgstr ""
|
||||
"Bepaalt of gebruikers het ingebouwde authenticatiesysteem niet mogen "
|
||||
"gebruiken. U wilt dit waarschijnlijk doen als u een LDAP- of SAML-integratie "
|
||||
"gebruikt."
|
||||
msgstr "Bepaalt of gebruikers het ingebouwde authenticatiesysteem niet mogen gebruiken. U wilt dit waarschijnlijk doen als u een LDAP- of SAML-integratie gebruikt."
|
||||
|
||||
#: awx/api/conf.py:48
|
||||
msgid "Enable HTTP Basic Auth"
|
||||
@@ -83,13 +78,7 @@ msgid ""
|
||||
"authorization codes in the number of seconds, and "
|
||||
"`REFRESH_TOKEN_EXPIRE_SECONDS`, the duration of refresh tokens, after "
|
||||
"expired access tokens, in the number of seconds."
|
||||
msgstr ""
|
||||
"Termenlijst voor het aanpassen van OAuth 2-time-outs. Beschikbare items zijn "
|
||||
"`ACCESS_TOKEN_EXPIRE_SECONDS`, de duur van de toegangstokens in het aantal "
|
||||
"seconden, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, de duur van de "
|
||||
"autorisatiecodes in het aantal seconden. en `REFRESH_TOKEN_EXPIRE_SECONDS`, "
|
||||
"de duur van de verversingstokens na verlopen toegangstokens, in het aantal "
|
||||
"seconden."
|
||||
msgstr "Termenlijst voor het aanpassen van OAuth 2-time-outs. Beschikbare items zijn `ACCESS_TOKEN_EXPIRE_SECONDS`, de duur van de toegangstokens in het aantal seconden, `AUTHORIZATION_CODE_EXPIRE_SECONDS`, de duur van de autorisatiecodes in het aantal seconden. en `REFRESH_TOKEN_EXPIRE_SECONDS`, de duur van de verversingstokens na verlopen toegangstokens, in het aantal seconden."
|
||||
|
||||
#: awx/api/conf.py:78
|
||||
msgid "Allow External Users to Create OAuth2 Tokens"
|
||||
@@ -476,7 +465,7 @@ msgstr "'ask_at_runtime' wordt niet ondersteund voor aangepaste referenties."
|
||||
|
||||
#: awx/api/serializers.py:2547
|
||||
msgid "Credential Type"
|
||||
msgstr "Type toegangsgegevens"
|
||||
msgstr "Soort toegangsgegevens"
|
||||
|
||||
#: awx/api/serializers.py:2614
|
||||
msgid "Modifications not allowed for managed credentials"
|
||||
@@ -868,8 +857,7 @@ msgid "Containerized instances may not be managed via the API"
|
||||
msgstr "Geclusterde instanties worden mogelijk niet beheerd via de API"
|
||||
|
||||
#: awx/api/serializers.py:4919 awx/api/serializers.py:4922
|
||||
#, fuzzy, python-format
|
||||
#| msgid "tower instance group name may not be changed."
|
||||
#, python-format
|
||||
msgid "%s instance group name may not be changed."
|
||||
msgstr "Naam van de %s-instantiegroep mag niet worden gewijzigd."
|
||||
|
||||
@@ -1024,8 +1012,6 @@ msgid ""
|
||||
msgstr "U kunt een team geen referentietoegang verlenen wanneer het veld Organisatie niet is ingesteld of behoort tot een andere organisatie"
|
||||
|
||||
#: awx/api/views/__init__.py:720
|
||||
#, fuzzy
|
||||
#| msgid "The instance that managed the execution environment."
|
||||
msgid "Only the 'pull' field can be edited for managed execution environments."
|
||||
msgstr "Alleen het veld \"pull\" kan worden bewerkt voor beheerde uitvoeringsomgevingen."
|
||||
|
||||
@@ -2062,8 +2048,6 @@ msgid "Unique identifier for an installation"
|
||||
msgstr "Unieke identificatiecode voor installatie"
|
||||
|
||||
#: awx/main/conf.py:183
|
||||
#, fuzzy
|
||||
#| msgid "The Instance group the job was run under"
|
||||
msgid "The instance group where control plane tasks run"
|
||||
msgstr "De instantiegroep waar control plane-taken worden uitgevoerd"
|
||||
|
||||
@@ -2637,7 +2621,7 @@ msgstr "Vault-URL (DNS-naam)"
|
||||
#: awx/main/credential_plugins/dsv.py:23
|
||||
#: awx/main/models/credential/__init__.py:895
|
||||
msgid "Client ID"
|
||||
msgstr "Client-id"
|
||||
msgstr "Klant-ID"
|
||||
|
||||
#: awx/main/credential_plugins/azure_kv.py:29
|
||||
#: awx/main/models/credential/__init__.py:902
|
||||
@@ -2771,8 +2755,6 @@ msgid "The identifier for the secret e.g., /some/identifier"
|
||||
msgstr "De identificatiecode voor het geheim, bijv. /some/identifier"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:12
|
||||
#, fuzzy
|
||||
#| msgid "Tenant ID"
|
||||
msgid "Tenant"
|
||||
msgstr "Tenant"
|
||||
|
||||
@@ -2791,8 +2773,6 @@ msgid ""
|
||||
msgstr "Het TLD van de tenant, bv. \"com\" wanneer de URL https://ex.secretservercloud.com is"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:34
|
||||
#, fuzzy
|
||||
#| msgid "Secret Name"
|
||||
msgid "Secret Path"
|
||||
msgstr "Geheim pad"
|
||||
|
||||
@@ -2801,8 +2781,6 @@ msgid "The secret path e.g. /test/secret1"
|
||||
msgstr "Het geheime pad, bv. /test/secret1"
|
||||
|
||||
#: awx/main/credential_plugins/dsv.py:46
|
||||
#, fuzzy
|
||||
#| msgid "Job Template"
|
||||
msgid "URL template"
|
||||
msgstr "URL-sjabloon"
|
||||
|
||||
@@ -2935,8 +2913,6 @@ msgid ""
|
||||
msgstr "Geldige principes (gebruikersnamen of hostnamen) waarvoor het certificaat moet worden ondertekend."
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:10
|
||||
#, fuzzy
|
||||
#| msgid "Auth Server URL"
|
||||
msgid "Secret Server URL"
|
||||
msgstr "Geheime-server-URL"
|
||||
|
||||
@@ -2947,8 +2923,6 @@ msgid ""
|
||||
msgstr "De basis-URL van de geheime server, bv. https://myserver/SecretServer of https://mytenant.secretservercloud.com"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:17
|
||||
#, fuzzy
|
||||
#| msgid "Red Hat customer username"
|
||||
msgid "The (Application) user username"
|
||||
msgstr "De gebruikersnaam van de (applicatie) gebruiker"
|
||||
|
||||
@@ -2970,20 +2944,14 @@ msgid "The corresponding password"
|
||||
msgstr "Het bijbehorende wachtwoord"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:31
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret ID"
|
||||
msgstr "Geheime id"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:32
|
||||
#, fuzzy
|
||||
#| msgid "The name of the secret to look up."
|
||||
msgid "The integer ID of the secret"
|
||||
msgstr "De id van het geheim als geheel getal"
|
||||
|
||||
#: awx/main/credential_plugins/tss.py:37
|
||||
#, fuzzy
|
||||
#| msgid "Secret Key"
|
||||
msgid "Secret Field"
|
||||
msgstr "Geheim veld"
|
||||
|
||||
@@ -3543,22 +3511,14 @@ msgstr "Absoluut bestandspad naar het CA-bestand om te gebruiken (optioneel)"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1023
|
||||
#: awx/main/models/credential/__init__.py:1029 awx/main/models/inventory.py:813
|
||||
#, fuzzy
|
||||
#| msgid "Gather data for Insights for Ansible Automation Platform"
|
||||
msgid "Red Hat Ansible Automation Platform"
|
||||
msgstr "Automatiseringsplatform voor Red Hat Ansible"
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1031
|
||||
#, fuzzy
|
||||
#| msgid "The Ansible Tower base URL to authenticate with."
|
||||
msgid "Red Hat Ansible Automation Platform base URL to authenticate with."
|
||||
msgstr "De basis-URL van het automatiseringsplatform voor Red Hat Ansible voor authenticatie."
|
||||
|
||||
#: awx/main/models/credential/__init__.py:1038
|
||||
#, fuzzy
|
||||
#| msgid ""
|
||||
#| "The Ansible Tower user to authenticate as.This should not be set if an "
|
||||
#| "OAuth token is being used."
|
||||
msgid ""
|
||||
"Red Hat Ansible Automation Platform username id to authenticate as.This "
|
||||
"should not be set if an OAuth token is being used."
|
||||
@@ -4463,7 +4423,7 @@ msgstr "Het aantal seconden na uitvoering van de laatste projectupdate waarna ee
|
||||
msgid ""
|
||||
"Allow changing the SCM branch or revision in a job template that uses this "
|
||||
"project."
|
||||
msgstr "Wijzigen van de SCM-vertakking of de revisie toelaten in een taaksjabloon die gebruik maakt van dit project."
|
||||
msgstr "Maak het mogelijk om de SCM-tak of de revisie te wijzigen in een taaksjabloon die gebruik maakt van dit project."
|
||||
|
||||
#: awx/main/models/projects.py:294
|
||||
msgid "The last revision fetched by a project update"
|
||||
@@ -4900,7 +4860,7 @@ msgid "Exception connecting to PagerDuty: {}"
|
||||
msgstr "Uitzondering bij het maken van de verbinding met PagerDuty: {}"
|
||||
|
||||
#: awx/main/notifications/pagerduty_backend.py:87
|
||||
#: awx/main/notifications/slack_backend.py:48
|
||||
#: awx/main/notifications/slack_backend.py:49
|
||||
#: awx/main/notifications/twilio_backend.py:47
|
||||
msgid "Exception sending messages: {}"
|
||||
msgstr "Uitzondering bij het verzenden van berichten: {}"
|
||||
@@ -5170,8 +5130,7 @@ msgid ""
|
||||
msgstr "Niet meer dan %(max_certs)d certificaten zijn toegestaan, %(cert_count)d geleverd."
|
||||
|
||||
#: awx/main/validators.py:289
|
||||
#, fuzzy, python-brace-format
|
||||
#| msgid "The container image to be used for execution."
|
||||
#, python-brace-format
|
||||
msgid "The container image name {value} is not valid"
|
||||
msgstr "De naam van de containerafbeelding {value} is ongeldig"
|
||||
|
||||
@@ -6278,68 +6237,4 @@ msgstr "Er wordt momenteel een upgrade van%s geïnstalleerd."
|
||||
|
||||
#: awx/ui/urls.py:24
|
||||
msgid "This page will refresh when complete."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
#~ msgid "SSLError while trying to connect to {}"
|
||||
#~ msgstr "SSLError tijdens poging om verbinding te maken met {}"
|
||||
|
||||
#~ msgid "Request to {} timed out."
|
||||
#~ msgstr "Er is een time-out opgetreden voor de aanvraag naar {}"
|
||||
|
||||
#~ msgid "Unknown exception {} while trying to GET {}"
|
||||
#~ msgstr "Onbekende uitzondering {} tijdens poging tot OPHALEN {}"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Unauthorized access. Please check your Insights Credential username and "
|
||||
#~ "password."
|
||||
#~ msgstr ""
|
||||
#~ "Geen toegang. Controleer uw Insights Credential gebruikersnaam en "
|
||||
#~ "wachtwoord."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Failed to access the Insights API at URL {}. Server responded with {} "
|
||||
#~ "status code and message {}"
|
||||
#~ msgstr ""
|
||||
#~ "Openen van Insights API via URL {} mislukt. Server reageerde met {} "
|
||||
#~ "statuscode en de melding {}"
|
||||
|
||||
#~ msgid "Expected JSON response from Insights at URL {} but instead got {}"
|
||||
#~ msgstr ""
|
||||
#~ "Verwachte JSON-reactie van Insights via URL {}, maar in plaats daarvan {} "
|
||||
#~ "verkregen."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Could not translate Insights system ID {} into an Insights platform ID."
|
||||
#~ msgstr ""
|
||||
#~ "Omzetten van Insights systeem-ID {} naar een Insights platform-ID mislukt."
|
||||
|
||||
#~ msgid "This host is not recognized as an Insights host."
|
||||
#~ msgstr "Deze host wordt niet herkend als een Insights-host."
|
||||
|
||||
#~ msgid "The Insights Credential for \"{}\" was not found."
|
||||
#~ msgstr "De Insights-referentie voor ‘{}‘ is niet gevonden."
|
||||
|
||||
#~ msgid ""
|
||||
#~ "The path to the secret stored in the secret backend e.g, /some/secret/"
|
||||
#~ msgstr ""
|
||||
#~ "De pad naar het geheim dat in de geheime back-end is opgeslagen, bijv. /"
|
||||
#~ "some/secret/"
|
||||
|
||||
#~ msgid "Ansible Tower"
|
||||
#~ msgstr "Ansible Tower"
|
||||
|
||||
#~ msgid "Ansible Tower Hostname"
|
||||
#~ msgstr "Hostnaam Ansible Tower"
|
||||
|
||||
#~ msgid ""
|
||||
#~ "Credentials to be used by hosts belonging to this inventory when "
|
||||
#~ "accessing Red Hat Insights API."
|
||||
#~ msgstr ""
|
||||
#~ "Referenties die worden gebruikt door hosts die behoren tot deze "
|
||||
#~ "inventaris bij toegang tot de Red Hat Insights API."
|
||||
|
||||
#~ msgid "Assignment not allowed for Smart Inventory"
|
||||
#~ msgstr "Toewijzing niet toegestaan voor Smart-inventaris"
|
||||
|
||||
#~ msgid "Red Hat Insights host unique identifier."
|
||||
#~ msgstr "Unieke id van Red Hat Insights-host."
|
||||
msgstr "Deze pagina wordt vernieuwd als hij klaar is."
|
||||
|
||||
6241
awx/locale/translations/es/django.po
Normal file
6241
awx/locale/translations/es/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10833
awx/locale/translations/es/messages.po
Normal file
10833
awx/locale/translations/es/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
6243
awx/locale/translations/fr/django.po
Normal file
6243
awx/locale/translations/fr/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10713
awx/locale/translations/fr/messages.po
Normal file
10713
awx/locale/translations/fr/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
6240
awx/locale/translations/ja/django.po
Normal file
6240
awx/locale/translations/ja/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10739
awx/locale/translations/ja/messages.po
Normal file
10739
awx/locale/translations/ja/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
6240
awx/locale/translations/ko/django.po
Normal file
6240
awx/locale/translations/ko/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10700
awx/locale/translations/ko/messages.po
Normal file
10700
awx/locale/translations/ko/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
6241
awx/locale/translations/nl/django.po
Normal file
6241
awx/locale/translations/nl/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10725
awx/locale/translations/nl/messages.po
Normal file
10725
awx/locale/translations/nl/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
6242
awx/locale/translations/zh/django.po
Normal file
6242
awx/locale/translations/zh/django.po
Normal file
File diff suppressed because it is too large
Load Diff
10698
awx/locale/translations/zh/messages.po
Normal file
10698
awx/locale/translations/zh/messages.po
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@ from django.conf import settings
|
||||
from django.db.models import Q, Prefetch
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
@@ -281,13 +281,23 @@ class BaseAccess(object):
|
||||
"""
|
||||
return True
|
||||
|
||||
def assure_relationship_exists(self, obj, relationship):
|
||||
if '.' in relationship:
|
||||
return # not attempting validation for complex relationships now
|
||||
try:
|
||||
obj._meta.get_field(relationship)
|
||||
except FieldDoesNotExist:
|
||||
raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
self.assure_relationship_exists(obj, relationship)
|
||||
if skip_sub_obj_read_check:
|
||||
return self.can_change(obj, None)
|
||||
else:
|
||||
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||
self.assure_relationship_exists(obj, relationship)
|
||||
return self.can_change(obj, data)
|
||||
|
||||
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
|
||||
@@ -328,6 +338,8 @@ class BaseAccess(object):
|
||||
role = getattr(resource, role_field, None)
|
||||
if role is None:
|
||||
# Handle special case where resource does not have direct roles
|
||||
if role_field == 'read_role':
|
||||
return self.user.can_access(type(resource), 'read', resource)
|
||||
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
|
||||
return self.user.can_access(type(resource), access_method_type, resource, None)
|
||||
return self.user in role
|
||||
@@ -499,6 +511,21 @@ class BaseAccess(object):
|
||||
return False
|
||||
|
||||
|
||||
class UnifiedCredentialsMixin(BaseAccess):
|
||||
"""
|
||||
The credentials many-to-many is a standard relationship for JT, jobs, and others
|
||||
Permission to attach is always use permission, and permission to unattach is admin to the parent object
|
||||
"""
|
||||
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == 'credentials':
|
||||
if not isinstance(sub_obj, Credential):
|
||||
raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}')
|
||||
return self.can_change(obj, None) and (self.user in sub_obj.use_role)
|
||||
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
|
||||
class NotificationAttachMixin(BaseAccess):
|
||||
"""For models that can have notifications attached
|
||||
|
||||
@@ -552,7 +579,8 @@ class InstanceAccess(BaseAccess):
|
||||
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
return self.user.is_superuser
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
@@ -965,9 +993,6 @@ class HostAccess(BaseAccess):
|
||||
if data and 'name' in data:
|
||||
self.check_license(add_host_name=data['name'])
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory}, add_host_name=data['name'])
|
||||
|
||||
# Checks for admin or change permission on inventory, controls whether
|
||||
# the user can edit variable data.
|
||||
return obj and self.user in obj.inventory.admin_role
|
||||
@@ -1031,7 +1056,7 @@ class GroupAccess(BaseAccess):
|
||||
return bool(obj and self.user in obj.inventory.admin_role)
|
||||
|
||||
|
||||
class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
||||
class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
|
||||
"""
|
||||
I can see inventory sources whenever I can see their inventory.
|
||||
I can change inventory sources whenever I can change their inventory.
|
||||
@@ -1075,18 +1100,6 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
||||
return self.user in obj.inventory.update_role
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role
|
||||
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
@check_superuser
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return obj and obj.inventory and self.user in obj.inventory.admin_role
|
||||
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryUpdateAccess(BaseAccess):
|
||||
"""
|
||||
@@ -1485,7 +1498,7 @@ class ProjectUpdateAccess(BaseAccess):
|
||||
return obj and self.user in obj.project.admin_role
|
||||
|
||||
|
||||
class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
|
||||
"""
|
||||
I can see job templates when:
|
||||
- I have read role for the job template.
|
||||
@@ -1549,8 +1562,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.user not in inventory.use_role:
|
||||
return False
|
||||
|
||||
ee = get_value(ExecutionEnvironment, 'execution_environment')
|
||||
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
|
||||
return False
|
||||
|
||||
project = get_value(Project, 'project')
|
||||
@@ -1600,10 +1612,8 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
return True
|
||||
|
||||
if data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'):
|
||||
return False
|
||||
|
||||
for required_field, cls in (('inventory', Inventory), ('project', Project)):
|
||||
is_mandatory = True
|
||||
@@ -1667,17 +1677,13 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return self.user in obj.admin_role and self.user in sub_obj.use_role
|
||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
@check_superuser
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if relationship == "instance_groups":
|
||||
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
if relationship == 'credentials' and isinstance(sub_obj, Credential):
|
||||
return self.user in obj.admin_role
|
||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
|
||||
class JobAccess(BaseAccess):
|
||||
@@ -1824,7 +1830,7 @@ class SystemJobAccess(BaseAccess):
|
||||
return False # no relaunching of system jobs
|
||||
|
||||
|
||||
class JobLaunchConfigAccess(BaseAccess):
|
||||
class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
"""
|
||||
Launch configs must have permissions checked for
|
||||
- relaunching
|
||||
@@ -1832,63 +1838,69 @@ class JobLaunchConfigAccess(BaseAccess):
|
||||
|
||||
In order to create a new object with a copy of this launch config, I need:
|
||||
- use access to related inventory (if present)
|
||||
- read access to Execution Environment (if present), unless the specified ee is already in the template
|
||||
- use role to many-related credentials (if any present)
|
||||
- read access to many-related labels (if any present), unless the specified label is already in the template
|
||||
- read access to many-related instance groups (if any present), unless the specified instance group is already in the template
|
||||
"""
|
||||
|
||||
model = JobLaunchConfig
|
||||
select_related = 'job'
|
||||
prefetch_related = ('credentials', 'inventory')
|
||||
|
||||
def _unusable_creds_exist(self, qs):
|
||||
return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists()
|
||||
M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup}
|
||||
|
||||
def has_credentials_access(self, obj):
|
||||
# user has access if no related credentials exist that the user lacks use role for
|
||||
return not self._unusable_creds_exist(obj.credentials)
|
||||
def _related_filtered_queryset(self, cls):
|
||||
if cls is Label:
|
||||
return LabelAccess(self.user).filtered_queryset()
|
||||
elif cls is InstanceGroup:
|
||||
return InstanceGroupAccess(self.user).filtered_queryset()
|
||||
else:
|
||||
return cls._accessible_pk_qs(cls, self.user, 'use_role')
|
||||
|
||||
def has_obj_m2m_access(self, obj):
|
||||
for relationship, cls in self.M2M_CHECKS.items():
|
||||
if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
|
||||
return False
|
||||
return True
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data, template=None):
|
||||
# This is a special case, we don't check related many-to-many elsewhere
|
||||
# launch RBAC checks use this
|
||||
if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
|
||||
if 'reference_obj' in data:
|
||||
prompted_cred_qs = data['reference_obj'].credentials.all()
|
||||
else:
|
||||
# If given model objects, only use the primary key from them
|
||||
cred_pks = [cred.pk for cred in data['credentials']]
|
||||
if template:
|
||||
for cred in template.credentials.all():
|
||||
if cred.pk in cred_pks:
|
||||
cred_pks.remove(cred.pk)
|
||||
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
|
||||
if self._unusable_creds_exist(prompted_cred_qs):
|
||||
if 'reference_obj' in data:
|
||||
if not self.has_obj_m2m_access(data['reference_obj']):
|
||||
return False
|
||||
return self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
else:
|
||||
for relationship, cls in self.M2M_CHECKS.items():
|
||||
if relationship in data and data[relationship]:
|
||||
# If given model objects, only use the primary key from them
|
||||
sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]]
|
||||
if template:
|
||||
for sub_obj in getattr(template, relationship).all():
|
||||
if sub_obj.pk in sub_obj_pks:
|
||||
sub_obj_pks.remove(sub_obj.pk)
|
||||
if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
|
||||
return False
|
||||
return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related(
|
||||
'execution_environment', ExecutionEnvironment, data, role_field='read_role'
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_use(self, obj):
|
||||
return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj)
|
||||
return (
|
||||
self.has_obj_m2m_access(obj)
|
||||
and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role')
|
||||
)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if isinstance(sub_obj, Credential) and relationship == 'credentials':
|
||||
return self.user in sub_obj.use_role
|
||||
else:
|
||||
raise NotImplementedError('Only credentials can be attached to launch configurations.')
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if isinstance(sub_obj, Credential) and relationship == 'credentials':
|
||||
if skip_sub_obj_read_check:
|
||||
return True
|
||||
else:
|
||||
return self.user in sub_obj.read_role
|
||||
else:
|
||||
raise NotImplementedError('Only credentials can be attached to launch configurations.')
|
||||
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related(
|
||||
'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'
|
||||
)
|
||||
|
||||
|
||||
class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
"""
|
||||
I can see/use a WorkflowJobTemplateNode if I have read permission
|
||||
to associated Workflow Job Template
|
||||
@@ -1911,7 +1923,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
"""
|
||||
|
||||
model = WorkflowJobTemplateNode
|
||||
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template')
|
||||
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
|
||||
@@ -1923,7 +1935,8 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
return (
|
||||
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
|
||||
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
|
||||
and JobLaunchConfigAccess(self.user).can_add(data)
|
||||
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||
)
|
||||
|
||||
def wfjt_admin(self, obj):
|
||||
@@ -1932,17 +1945,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
else:
|
||||
return self.user in obj.workflow_job_template.admin_role
|
||||
|
||||
def ujt_execute(self, obj):
|
||||
def ujt_execute(self, obj, data=None):
|
||||
if not obj.unified_job_template:
|
||||
return True
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True)
|
||||
return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
if not data:
|
||||
return True
|
||||
|
||||
# should not be able to edit the prompts if lacking access to UJT or WFJT
|
||||
return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
|
||||
return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
|
||||
|
||||
def can_delete(self, obj):
|
||||
return self.wfjt_admin(obj)
|
||||
@@ -1955,29 +1965,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
return True
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if not self.wfjt_admin(obj):
|
||||
return False
|
||||
if relationship == 'credentials':
|
||||
# Need permission to related template to attach a credential
|
||||
if not self.ujt_execute(obj):
|
||||
return False
|
||||
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||
return self.check_same_WFJT(obj, sub_obj)
|
||||
else:
|
||||
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
|
||||
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||
return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj)
|
||||
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if not self.wfjt_admin(obj):
|
||||
return False
|
||||
if relationship == 'credentials':
|
||||
if not self.ujt_execute(obj):
|
||||
return False
|
||||
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||
return self.check_same_WFJT(obj, sub_obj)
|
||||
else:
|
||||
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||
if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
|
||||
return self.wfjt_admin(obj)
|
||||
return super().can_unattach(obj, sub_obj, relationship, data=None)
|
||||
|
||||
|
||||
class WorkflowJobNodeAccess(BaseAccess):
|
||||
@@ -2052,13 +2047,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
|
||||
|
||||
if data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related(
|
||||
'inventory', Inventory, data, role_field='use_role'
|
||||
return bool(
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
|
||||
and self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
|
||||
)
|
||||
|
||||
def can_copy(self, obj):
|
||||
@@ -2104,14 +2096,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
if data and data.get('execution_environment'):
|
||||
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
|
||||
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
|
||||
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
|
||||
and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role')
|
||||
and self.user in obj.admin_role
|
||||
)
|
||||
|
||||
@@ -2518,7 +2506,7 @@ class UnifiedJobAccess(BaseAccess):
|
||||
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
|
||||
|
||||
|
||||
class ScheduleAccess(BaseAccess):
|
||||
class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
"""
|
||||
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
|
||||
"""
|
||||
@@ -2559,12 +2547,6 @@ class ScheduleAccess(BaseAccess):
|
||||
def can_delete(self, obj):
|
||||
return self.can_change(obj, {})
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
|
||||
class NotificationTemplateAccess(BaseAccess):
|
||||
"""
|
||||
|
||||
@@ -12,12 +12,11 @@ from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from psycopg2.errors import UntranslatableCharacter
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version, camelcase_to_underscore, datetime_hook
|
||||
from awx.main import models
|
||||
from awx.main.analytics import register
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerInstances
|
||||
|
||||
"""
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
@@ -84,7 +83,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.3', description=_('General platform configuration.'))
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -104,6 +103,22 @@ def config(since, **kwargs):
|
||||
'tower_url_base': settings.TOWER_URL_BASE,
|
||||
'tower_version': get_awx_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'license_date': license_info.get('license_date'),
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
'trial': license_info.get('trial'),
|
||||
'grace_period_remaining': license_info.get('grace_period_remaining'),
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@@ -115,7 +130,7 @@ def config(since, **kwargs):
|
||||
}
|
||||
|
||||
|
||||
@register('counts', '1.1', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
@register('counts', '1.2', description=_('Counts of objects such as organizations, inventories, and projects'))
|
||||
def counts(since, **kwargs):
|
||||
counts = {}
|
||||
for cls in (
|
||||
@@ -158,6 +173,13 @@ def counts(since, **kwargs):
|
||||
.count()
|
||||
)
|
||||
counts['pending_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').filter(status__in=('pending',)).count()
|
||||
if connection.vendor == 'postgresql':
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"select count(*) from pg_stat_activity where datname=\'{connection.settings_dict['NAME']}\'")
|
||||
counts['database_connections'] = cursor.fetchone()[0]
|
||||
else:
|
||||
# We should be using postgresql, but if we do that change that ever we should change the below value
|
||||
counts['database_connections'] = 1
|
||||
return counts
|
||||
|
||||
|
||||
@@ -214,25 +236,25 @@ def projects_by_scm_type(since, **kwargs):
|
||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'enabled'
|
||||
)
|
||||
for instance in instances:
|
||||
consumed_capacity = sum(x.task_impact for x in models.UnifiedJob.objects.filter(execution_node=instance['hostname'], status__in=('running', 'waiting')))
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
active_tasks = models.UnifiedJob.objects.filter(status__in=['running', 'waiting']).only('task_impact', 'controller_node', 'execution_node')
|
||||
tm_instances = TaskManagerInstances(active_tasks, instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
for tm_instance in tm_instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
'uuid': instance['uuid'],
|
||||
'version': instance['version'],
|
||||
'capacity': instance['capacity'],
|
||||
'cpu': instance['cpu'],
|
||||
'memory': instance['memory'],
|
||||
'managed_by_policy': instance['managed_by_policy'],
|
||||
'enabled': instance['enabled'],
|
||||
'consumed_capacity': consumed_capacity,
|
||||
'remaining_capacity': instance['capacity'] - consumed_capacity,
|
||||
'uuid': instance.uuid,
|
||||
'version': instance.version,
|
||||
'capacity': instance.capacity,
|
||||
'cpu': instance.cpu,
|
||||
'memory': instance.memory,
|
||||
'managed_by_policy': instance.managed_by_policy,
|
||||
'enabled': instance.enabled,
|
||||
'consumed_capacity': tm_instance.consumed_capacity,
|
||||
'remaining_capacity': instance.capacity - tm_instance.consumed_capacity,
|
||||
}
|
||||
if include_hostnames is True:
|
||||
instance_info['hostname'] = instance['hostname']
|
||||
info[instance['uuid']] = instance_info
|
||||
instance_info['hostname'] = instance.hostname
|
||||
info[instance.uuid] = instance_info
|
||||
return info
|
||||
|
||||
|
||||
@@ -338,6 +360,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
{tbl}.event,
|
||||
task_action,
|
||||
resolved_action,
|
||||
resolved_role,
|
||||
-- '-' operator listed here:
|
||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
||||
-- note that operator is only supported by jsonb objects
|
||||
@@ -357,27 +380,24 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
x.duration AS duration,
|
||||
x.res->'warnings' AS warnings,
|
||||
x.res->'deprecations' AS deprecations
|
||||
FROM {tbl}, jsonb_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "resolved_action" text, "start" text, "end" text)
|
||||
FROM {tbl}, jsonb_to_record({event_data}) AS x("res" json, "duration" text, "task_action" text, "resolved_action" text, "resolved_role" text, "start" text, "end" text)
|
||||
WHERE ({tbl}.{where_column} > '{since.isoformat()}' AND {tbl}.{where_column} <= '{until.isoformat()}')) TO STDOUT WITH CSV HEADER'''
|
||||
return query
|
||||
|
||||
try:
|
||||
return _copy_table(table='events', query=query(f"{tbl}.event_data::jsonb"), path=full_path)
|
||||
except UntranslatableCharacter:
|
||||
return _copy_table(table='events', query=query(f"replace({tbl}.event_data::text, '\\u0000', '')::jsonb"), path=full_path)
|
||||
return _copy_table(table='events', query=query(fr"replace({tbl}.event_data, '\u', '\u005cu')::jsonb"), path=full_path)
|
||||
|
||||
|
||||
@register('events_table', '1.4', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
@register('events_table', '1.5', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_unpartitioned(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, '_unpartitioned_main_jobevent', 'created', **kwargs)
|
||||
|
||||
|
||||
@register('events_table', '1.4', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
@register('events_table', '1.5', format='csv', description=_('Automation task records'), expensive=four_hour_slicing)
|
||||
def events_table_partitioned_modified(since, full_path, until, **kwargs):
|
||||
return _events_table(since, full_path, until, 'main_jobevent', 'modified', project_job_created=True, **kwargs)
|
||||
|
||||
|
||||
@register('unified_jobs_table', '1.3', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
@register('unified_jobs_table', '1.4', format='csv', description=_('Data on jobs run'), expensive=four_hour_slicing)
|
||||
def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
@@ -403,7 +423,8 @@ def unified_jobs_table(since, full_path, until, **kwargs):
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id,
|
||||
main_unifiedjob.installed_collections,
|
||||
main_unifiedjob.ansible_version
|
||||
main_unifiedjob.ansible_version,
|
||||
main_job.forks
|
||||
FROM main_unifiedjob
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
LEFT JOIN main_job ON main_unifiedjob.id = main_job.unifiedjob_ptr_id
|
||||
|
||||
@@ -177,7 +177,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
logger.log(log_level, "Insights for Ansible Automation Platform not enabled. Use --dry-run to gather locally without sending.")
|
||||
logger.log(log_level, "Automation Analytics not enabled. Use --dry-run to gather locally without sending.")
|
||||
return None
|
||||
|
||||
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
||||
@@ -332,10 +332,10 @@ def ship(path):
|
||||
Ship gathered metrics to the Insights API
|
||||
"""
|
||||
if not path:
|
||||
logger.error('Insights for Ansible Automation Platform TAR not found')
|
||||
logger.error('Automation Analytics TAR not found')
|
||||
return False
|
||||
if not os.path.exists(path):
|
||||
logger.error('Insights for Ansible Automation Platform TAR {} not found'.format(path))
|
||||
logger.error('Automation Analytics TAR {} not found'.format(path))
|
||||
return False
|
||||
if "Error:" in str(path):
|
||||
return False
|
||||
|
||||
@@ -3,6 +3,7 @@ from prometheus_client import CollectorRegistry, Gauge, Info, generate_latest
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.analytics.collectors import (
|
||||
counts,
|
||||
instance_info,
|
||||
@@ -126,6 +127,8 @@ def metrics():
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
||||
|
||||
DATABASE_CONNECTIONS = Gauge('awx_database_connections_total', 'Number of connections to database', registry=REGISTRY)
|
||||
|
||||
license_info = get_license()
|
||||
SYSTEM_INFO.info(
|
||||
{
|
||||
@@ -163,10 +166,13 @@ def metrics():
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions'])
|
||||
|
||||
DATABASE_CONNECTIONS.set(current_counts['database_connections'])
|
||||
|
||||
all_job_data = job_counts(None)
|
||||
statuses = all_job_data.get('status', {})
|
||||
for status, value in statuses.items():
|
||||
STATUS.labels(status=status).set(value)
|
||||
states = set(dict(UnifiedJob.STATUS_CHOICES).keys()) - set(['new'])
|
||||
for state in states:
|
||||
STATUS.labels(status=state).set(statuses.get(state, 0))
|
||||
|
||||
RUNNING_JOBS.set(current_counts['running_jobs'])
|
||||
PENDING_JOBS.set(current_counts['pending_jobs'])
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class BaseM:
|
||||
@@ -16,16 +16,22 @@ class BaseM:
|
||||
self.field = field
|
||||
self.help_text = help_text
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.current_value = 0
|
||||
|
||||
def inc(self, value):
|
||||
self.current_value += value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def set(self, value):
|
||||
self.current_value = value
|
||||
self.metric_has_changed = True
|
||||
|
||||
def get(self):
|
||||
return self.current_value
|
||||
|
||||
def decode(self, conn):
|
||||
value = conn.hget(root_key, self.field)
|
||||
@@ -34,7 +40,9 @@ class BaseM:
|
||||
def to_prometheus(self, instance_data):
|
||||
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
|
||||
for instance in instance_data:
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
if self.field in instance_data[instance]:
|
||||
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
|
||||
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||
return output_text
|
||||
|
||||
|
||||
@@ -46,8 +54,10 @@ class FloatM(BaseM):
|
||||
return 0.0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrbyfloat(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class IntM(BaseM):
|
||||
@@ -58,8 +68,10 @@ class IntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
if self.metric_has_changed:
|
||||
conn.hincrby(root_key, self.field, self.current_value)
|
||||
self.current_value = 0
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetIntM(BaseM):
|
||||
@@ -70,10 +82,9 @@ class SetIntM(BaseM):
|
||||
return 0
|
||||
|
||||
def store_value(self, conn):
|
||||
# do not set value if it has not changed since last time this was called
|
||||
if self.current_value is not None:
|
||||
if self.metric_has_changed:
|
||||
conn.hset(root_key, self.field, self.current_value)
|
||||
self.current_value = None
|
||||
self.metric_has_changed = False
|
||||
|
||||
|
||||
class SetFloatM(SetIntM):
|
||||
@@ -94,13 +105,13 @@ class HistogramM(BaseM):
|
||||
self.sum = IntM(field + '_sum', '')
|
||||
super(HistogramM, self).__init__(field, help_text)
|
||||
|
||||
def clear_value(self, conn):
|
||||
def reset_value(self, conn):
|
||||
conn.hset(root_key, self.field, 0)
|
||||
self.inf.clear_value(conn)
|
||||
self.sum.clear_value(conn)
|
||||
self.inf.reset_value(conn)
|
||||
self.sum.reset_value(conn)
|
||||
for b in self.buckets_to_keys.values():
|
||||
b.clear_value(conn)
|
||||
super(HistogramM, self).clear_value(conn)
|
||||
b.reset_value(conn)
|
||||
super(HistogramM, self).reset_value(conn)
|
||||
|
||||
def observe(self, value):
|
||||
for b in self.buckets:
|
||||
@@ -136,7 +147,7 @@ class HistogramM(BaseM):
|
||||
|
||||
|
||||
class Metrics:
|
||||
def __init__(self, auto_pipe_execute=True, instance_name=None):
|
||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.last_pipe_execute = time.time()
|
||||
@@ -152,8 +163,10 @@ class Metrics:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif settings.IS_TESTING():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.me().hostname
|
||||
self.instance_name = Instance.objects.my_hostname()
|
||||
|
||||
# metric name, help_text
|
||||
METRICSLIST = [
|
||||
@@ -161,15 +174,39 @@ class Metrics:
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Time spent saving events to database'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
@@ -179,29 +216,39 @@ class Metrics:
|
||||
# track last time metrics were sent to other nodes
|
||||
self.previous_send_metrics = SetFloatM('send_metrics_time', 'Timestamp of previous send_metrics call')
|
||||
|
||||
def clear_values(self):
|
||||
def reset_values(self):
|
||||
# intended to be called once on app startup to reset all metric
|
||||
# values to 0
|
||||
for m in self.METRICS.values():
|
||||
m.clear_value(self.conn)
|
||||
m.reset_value(self.conn)
|
||||
self.metrics_have_changed = True
|
||||
self.conn.delete(root_key + "_lock")
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
self.conn.delete(m)
|
||||
|
||||
def inc(self, field, value):
|
||||
if value != 0:
|
||||
self.METRICS[field].inc(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def set(self, field, value):
|
||||
self.METRICS[field].set(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def get(self, field):
|
||||
return self.METRICS[field].get()
|
||||
|
||||
def decode(self, field):
|
||||
return self.METRICS[field].decode(self.conn)
|
||||
|
||||
def observe(self, field, value):
|
||||
self.METRICS[field].observe(value)
|
||||
self.metrics_have_changed = True
|
||||
if self.auto_pipe_execute is True and self.should_pipe_execute() is True:
|
||||
if self.auto_pipe_execute is True:
|
||||
self.pipe_execute()
|
||||
|
||||
def serialize_local_metrics(self):
|
||||
@@ -249,8 +296,8 @@ class Metrics:
|
||||
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# get acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock', thread_local=False)
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
try:
|
||||
@@ -266,7 +313,12 @@ class Metrics:
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
lock.release()
|
||||
try:
|
||||
lock.release()
|
||||
except Exception as exc:
|
||||
# After system failures, we might throw redis.exceptions.LockNotOwnedError
|
||||
# this is to avoid print a Traceback, and importantly, avoid raising an exception into parent context
|
||||
logger.warning(f'Error releasing subsystem metrics redis lock, error: {str(exc)}')
|
||||
|
||||
def load_other_metrics(self, request):
|
||||
# data received from other nodes are stored in their own keys
|
||||
|
||||
@@ -112,7 +112,7 @@ register(
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer username'),
|
||||
help_text=_('This username is used to send data to Insights for Ansible Automation Platform'),
|
||||
help_text=_('This username is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -125,7 +125,7 @@ register(
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer password'),
|
||||
help_text=_('This password is used to send data to Insights for Ansible Automation Platform'),
|
||||
help_text=_('This password is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -162,8 +162,8 @@ register(
|
||||
default='https://example.com',
|
||||
schemes=('http', 'https'),
|
||||
allow_plain_hostname=True, # Allow hostname only without TLD.
|
||||
label=_('Insights for Ansible Automation Platform upload URL'),
|
||||
help_text=_('This setting is used to to configure the upload URL for data collection for Red Hat Insights.'),
|
||||
label=_('Automation Analytics upload URL'),
|
||||
help_text=_('This setting is used to to configure the upload URL for data collection for Automation Analytics.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -282,12 +282,25 @@ register(
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_TASK_ENV',
|
||||
field_class=fields.KeyValueField,
|
||||
label=_('Environment Variables for Galaxy Commands'),
|
||||
help_text=_(
|
||||
'Additional environment variables set for invocations of ansible-galaxy within project updates. '
|
||||
'Useful if you must use a proxy server for ansible-galaxy but not git.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'INSIGHTS_TRACKING_STATE',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Gather data for Insights for Ansible Automation Platform'),
|
||||
help_text=_('Enables the service to gather data on automation and send it to Red Hat Insights.'),
|
||||
label=_('Gather data for Automation Analytics'),
|
||||
help_text=_('Enables the service to gather data on automation and send it to Automation Analytics.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -433,7 +446,7 @@ register(
|
||||
label=_('Default Job Idle Timeout'),
|
||||
help_text=_(
|
||||
'If no output is detected from ansible in this number of seconds the execution will be terminated. '
|
||||
'Use value of 0 to used default idle_timeout is 600s.'
|
||||
'Use value of 0 to indicate that no idle timeout should be imposed.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
@@ -714,7 +727,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last gather date for Insights for Ansible Automation Platform.'),
|
||||
label=_('Last gather date for Automation Analytics.'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
@@ -722,7 +735,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
field_class=fields.CharField,
|
||||
label=_('Last gathered entries from the data collection service of Insights for Ansible Automation Platform'),
|
||||
label=_('Last gathered entries from the data collection service of Automation Analytics'),
|
||||
default='',
|
||||
allow_blank=True,
|
||||
category=_('System'),
|
||||
@@ -733,7 +746,7 @@ register(
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_GATHER_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('Insights for Ansible Automation Platform Gather Interval'),
|
||||
label=_('Automation Analytics Gather Interval'),
|
||||
help_text=_('Interval (in seconds) between data gathering.'),
|
||||
default=14400, # every 4 hours
|
||||
min_value=1800, # every 30 minutes
|
||||
|
||||
@@ -100,3 +100,9 @@ JOB_VARIABLE_PREFIXES = [
|
||||
'awx',
|
||||
'tower',
|
||||
]
|
||||
|
||||
# Note, the \u001b[... are ansi color codes. We don't currenly import any of the python modules which define the codes.
|
||||
# Importing a library just for this message seemed like overkill
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
@@ -9,10 +9,16 @@ aim_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'url',
|
||||
'label': _('CyberArk AIM URL'),
|
||||
'label': _('CyberArk CCP URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
},
|
||||
{
|
||||
'id': 'webservice_id',
|
||||
'label': _('Web Service ID'),
|
||||
'type': 'string',
|
||||
'help_text': _('The CCP Web Service ID. Leave blank to default to AIMWebService.'),
|
||||
},
|
||||
{
|
||||
'id': 'app_id',
|
||||
'label': _('Application ID'),
|
||||
@@ -64,10 +70,13 @@ def aim_backend(**kwargs):
|
||||
client_cert = kwargs.get('client_cert', None)
|
||||
client_key = kwargs.get('client_key', None)
|
||||
verify = kwargs['verify']
|
||||
webservice_id = kwargs['webservice_id']
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
|
||||
query_params = {
|
||||
'AppId': app_id,
|
||||
@@ -78,7 +87,7 @@ def aim_backend(**kwargs):
|
||||
query_params['reason'] = reason
|
||||
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
request_url = urljoin(url, '/'.join([webservice_id, 'api', 'Accounts']))
|
||||
|
||||
with CertFiles(client_cert, client_key) as cert:
|
||||
res = requests.get(
|
||||
@@ -92,4 +101,4 @@ def aim_backend(**kwargs):
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk AIM Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from .plugin import CredentialPlugin, CertFiles, raise_for_status
|
||||
|
||||
import base64
|
||||
from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
@@ -61,7 +60,7 @@ def conjur_backend(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
auth_kwargs = {
|
||||
'headers': {'Content-Type': 'text/plain'},
|
||||
'headers': {'Content-Type': 'text/plain', 'Accept-Encoding': 'base64'},
|
||||
'data': api_key,
|
||||
'allow_redirects': False,
|
||||
}
|
||||
@@ -69,9 +68,9 @@ def conjur_backend(**kwargs):
|
||||
with CertFiles(cacert) as cert:
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
auth_kwargs['verify'] = cert
|
||||
resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
|
||||
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
|
||||
raise_for_status(resp)
|
||||
token = base64.b64encode(resp.content).decode('utf-8')
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
@@ -79,9 +78,10 @@ def conjur_backend(**kwargs):
|
||||
}
|
||||
|
||||
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
|
||||
path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
|
||||
path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
|
||||
if version:
|
||||
path = '?'.join([path, version])
|
||||
ver = "version={}".format(version)
|
||||
path = '?'.join([path, ver])
|
||||
|
||||
with CertFiles(cacert) as cert:
|
||||
lookup_kwargs['verify'] = cert
|
||||
@@ -90,4 +90,4 @@ def conjur_backend(**kwargs):
|
||||
return resp.text
|
||||
|
||||
|
||||
conjur_plugin = CredentialPlugin('CyberArk Conjur Secret Lookup', inputs=conjur_inputs, backend=conjur_backend)
|
||||
conjur_plugin = CredentialPlugin('CyberArk Conjur Secrets Manager Lookup', inputs=conjur_inputs, backend=conjur_backend)
|
||||
|
||||
@@ -4,6 +4,7 @@ import select
|
||||
from contextlib import contextmanager
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
@@ -15,7 +16,6 @@ def get_local_queuename():
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
assert conn.autocommit, "Connection must be in autocommit mode."
|
||||
self.conn = conn
|
||||
|
||||
def listen(self, channel):
|
||||
@@ -31,6 +31,9 @@ class PubSub(object):
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
if not self.conn.autocommit:
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
@@ -45,11 +48,32 @@ class PubSub(object):
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn():
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {}))
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
||||
conn.set_session(autocommit=True)
|
||||
def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
Any listeners probably want to establish a new database connection,
|
||||
separate from the Django connection used for queries, because that will prevent
|
||||
losing connection to the channel whenever a .close() happens.
|
||||
|
||||
Any publishers probably want to use the existing connection
|
||||
so that messages follow postgres transaction rules
|
||||
https://www.postgresql.org/docs/current/sql-notify.html
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
else:
|
||||
if pg_connection.connection is None:
|
||||
pg_connection.connect()
|
||||
if pg_connection.connection is None:
|
||||
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||
conn = pg_connection.connection
|
||||
|
||||
pubsub = PubSub(conn)
|
||||
yield pubsub
|
||||
conn.close()
|
||||
if new_connection:
|
||||
conn.close()
|
||||
|
||||
@@ -3,6 +3,7 @@ import uuid
|
||||
import json
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
@@ -37,18 +38,27 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def generate_reply_queue_name(cls):
|
||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
def control_with_reply(self, command, timeout=5, extra_data=None):
|
||||
logger.warning('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Control.generate_reply_queue_name()
|
||||
self.result = None
|
||||
|
||||
if not connection.get_autocommit():
|
||||
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen(reply_queue)
|
||||
conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
send_data = {'control': command, 'reply_to': reply_queue}
|
||||
if extra_data:
|
||||
send_data.update(extra_data)
|
||||
conn.notify(self.queuename, json.dumps(send_data))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
if reply is None:
|
||||
|
||||
@@ -16,13 +16,14 @@ from queue import Full as QueueFull, Empty as QueueEmpty
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django_guid import set_guid
|
||||
from jinja2 import Template
|
||||
import psutil
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity, log_excess_runtime
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -328,12 +329,16 @@ class AutoscalePool(WorkerPool):
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
return super(AutoscalePool, self).debug(*args, **kwargs)
|
||||
# the task manager enforces settings.TASK_MANAGER_TIMEOUT on its own
|
||||
# but if the task takes longer than the time defined here, we will force it to stop here
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
@@ -351,6 +356,7 @@ class AutoscalePool(WorkerPool):
|
||||
def debug_meta(self):
|
||||
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def cleanup(self):
|
||||
"""
|
||||
Perform some internal account and cleanup. This is run on
|
||||
@@ -359,8 +365,6 @@ class AutoscalePool(WorkerPool):
|
||||
1. Discover worker processes that exited, and recover messages they
|
||||
were handling.
|
||||
2. Clean up unnecessary, idle workers.
|
||||
3. Check to see if the database says this node is running any tasks
|
||||
that aren't actually running. If so, reap them.
|
||||
|
||||
IMPORTANT: this function is one of the few places in the dispatcher
|
||||
(aside from setting lookups) where we talk to the database. As such,
|
||||
@@ -383,6 +387,8 @@ class AutoscalePool(WorkerPool):
|
||||
reaper.reap_job(j, 'failed')
|
||||
except Exception:
|
||||
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
|
||||
else:
|
||||
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
@@ -401,13 +407,15 @@ class AutoscalePool(WorkerPool):
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||
current_task_name = current_task.get('task', '')
|
||||
if any(current_task_name.endswith(e) for e in endings):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}') # noqa
|
||||
if age > self.task_manager_timeout:
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
@@ -417,13 +425,17 @@ class AutoscalePool(WorkerPool):
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
self.write(idx, m)
|
||||
|
||||
# if the database says a job is running on this node, but it's *not*,
|
||||
# then reap it
|
||||
running_uuids = []
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
running_uuids.extend(list(worker.managed_tasks.keys()))
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
def add_bind_kwargs(self, body):
|
||||
bind_kwargs = body.pop('bind_kwargs', [])
|
||||
body.setdefault('kwargs', {})
|
||||
if 'dispatch_time' in bind_kwargs:
|
||||
body['kwargs']['dispatch_time'] = tz_now().isoformat()
|
||||
if 'worker_tasks' in bind_kwargs:
|
||||
worker_tasks = {}
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
worker_tasks[worker.pid] = list(worker.managed_tasks.keys())
|
||||
body['kwargs']['worker_tasks'] = worker_tasks
|
||||
|
||||
def up(self):
|
||||
if self.full:
|
||||
@@ -438,9 +450,8 @@ class AutoscalePool(WorkerPool):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
try:
|
||||
# when the cluster heartbeat occurs, clean up internally
|
||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||
self.cleanup()
|
||||
if isinstance(body, dict) and body.get('bind_kwargs'):
|
||||
self.add_bind_kwargs(body)
|
||||
if self.should_grow:
|
||||
self.up()
|
||||
# we don't care about "preferred queue" round robin distribution, just
|
||||
@@ -452,6 +463,10 @@ class AutoscalePool(WorkerPool):
|
||||
w.put(body)
|
||||
break
|
||||
else:
|
||||
task_name = 'unknown'
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warn(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -2,6 +2,7 @@ import inspect
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
@@ -49,13 +50,21 @@ class task:
|
||||
@task(queue='tower_broadcast')
|
||||
def announce():
|
||||
print("Run this everywhere!")
|
||||
|
||||
# The special parameter bind_kwargs tells the main dispatcher process to add certain kwargs
|
||||
|
||||
@task(bind_kwargs=['dispatch_time'])
|
||||
def print_time(dispatch_time=None):
|
||||
print(f"Time I was dispatched: {dispatch_time}")
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None):
|
||||
def __init__(self, queue=None, bind_kwargs=None):
|
||||
self.queue = queue
|
||||
self.bind_kwargs = bind_kwargs
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
bind_kwargs = self.bind_kwargs
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
@@ -75,10 +84,12 @@ class task:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name}
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||
guid = get_guid()
|
||||
if guid:
|
||||
obj['guid'] = guid
|
||||
if bind_kwargs:
|
||||
obj['bind_kwargs'] = bind_kwargs
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
@@ -10,42 +11,78 @@ from awx.main.models import Instance, UnifiedJob, WorkflowJob
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
||||
def startup_reaping():
|
||||
"""
|
||||
If this particular instance is starting, then we know that any running jobs are invalid
|
||||
so we will reap those jobs as a special action here
|
||||
"""
|
||||
jobs = UnifiedJob.objects.filter(status='running', controller_node=Instance.objects.my_hostname())
|
||||
job_ids = []
|
||||
for j in jobs:
|
||||
job_ids.append(j.id)
|
||||
reap_job(
|
||||
j,
|
||||
'failed',
|
||||
job_explanation='Task was marked as running at system start up. The system must have not shut down properly, so it has been marked as failed.',
|
||||
)
|
||||
if job_ids:
|
||||
logger.error(f'Unified jobs {job_ids} were reaped on dispatch startup')
|
||||
|
||||
|
||||
def reap_job(j, status, job_explanation=None):
|
||||
j.refresh_from_db(fields=['status', 'job_explanation'])
|
||||
status_before = j.status
|
||||
if status_before not in ('running', 'waiting'):
|
||||
# just in case, don't reap jobs that aren't running
|
||||
return
|
||||
j.status = status
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join(
|
||||
(
|
||||
'Task was marked as running but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
)
|
||||
)
|
||||
if j.job_explanation:
|
||||
j.job_explanation += ' ' # Separate messages for readability
|
||||
if job_explanation is None:
|
||||
j.job_explanation += 'Task was marked as running but was not present in the job queue, so it has been marked as failed.'
|
||||
else:
|
||||
j.job_explanation += job_explanation
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status(status)
|
||||
logger.error('{} is no longer running; reaping'.format(j.log_format))
|
||||
logger.error(f'{j.log_format} is no longer {status_before}; reaping')
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
def reap_waiting(instance=None, status='failed', job_explanation=None, grace_period=None, excluded_uuids=None, ref_time=None):
|
||||
"""
|
||||
Reap all jobs in waiting|running for this instance.
|
||||
Reap all jobs in waiting for this instance.
|
||||
"""
|
||||
me = instance
|
||||
if me is None:
|
||||
try:
|
||||
me = Instance.objects.me()
|
||||
except RuntimeError as e:
|
||||
logger.warning(f'Local instance is not registered, not running reaper: {e}')
|
||||
return
|
||||
now = tz_now()
|
||||
if grace_period is None:
|
||||
grace_period = settings.JOB_WAITING_GRACE_PERIOD + settings.TASK_MANAGER_TIMEOUT
|
||||
|
||||
if instance is None:
|
||||
hostname = Instance.objects.my_hostname()
|
||||
else:
|
||||
hostname = instance.hostname
|
||||
if ref_time is None:
|
||||
ref_time = tz_now()
|
||||
jobs = UnifiedJob.objects.filter(status='waiting', modified__lte=ref_time - timedelta(seconds=grace_period), controller_node=hostname)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
|
||||
"""
|
||||
Reap all jobs in running for this instance.
|
||||
"""
|
||||
if instance is None:
|
||||
hostname = Instance.objects.my_hostname()
|
||||
else:
|
||||
hostname = instance.hostname
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
(Q(status='running') | Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
|
||||
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
|
||||
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
).exclude(celery_task_id__in=excluded_uuids)
|
||||
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status)
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
@@ -17,6 +17,7 @@ from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -62,7 +63,7 @@ class AWXConsumerBase(object):
|
||||
def control(self, body):
|
||||
logger.warning(f'Received control signal:\n{body}')
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
if control in ('status', 'running', 'cancel'):
|
||||
reply_queue = body['reply_to']
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
@@ -71,6 +72,17 @@ class AWXConsumerBase(object):
|
||||
for worker in self.pool.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
msg.extend(worker.managed_tasks.keys())
|
||||
elif control == 'cancel':
|
||||
msg = []
|
||||
task_ids = set(body['task_ids'])
|
||||
for worker in self.pool.workers:
|
||||
task = worker.current_task
|
||||
if task and task['uuid'] in task_ids:
|
||||
logger.warn(f'Sending SIGTERM to task id={task["uuid"]}, task={task.get("task")}, args={task.get("args")}')
|
||||
os.kill(worker.pid, signal.SIGTERM)
|
||||
msg.append(task['uuid'])
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
@@ -81,6 +93,9 @@ class AWXConsumerBase(object):
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
|
||||
def process_task(self, body):
|
||||
if isinstance(body, dict):
|
||||
body['time_ack'] = time.time()
|
||||
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
@@ -99,8 +114,8 @@ class AWXConsumerBase(object):
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
self.record_statistics()
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
@@ -140,6 +155,16 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
# if no successful loops have ran since startup, then we should fail right away
|
||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = time.time()
|
||||
|
||||
def run_periodic_tasks(self):
|
||||
self.record_statistics() # maintains time buffer in method
|
||||
|
||||
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
|
||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||
# so that we scale down workers and free up connections
|
||||
self.pool.cleanup()
|
||||
self.last_cleanup = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
@@ -149,14 +174,16 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
while True:
|
||||
try:
|
||||
with pg_bus_conn() as conn:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
for queue in self.queues:
|
||||
conn.listen(queue)
|
||||
if init is False:
|
||||
self.worker.on_start()
|
||||
init = True
|
||||
for e in conn.events():
|
||||
self.process_task(json.loads(e.payload))
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
self.process_task(json.loads(e.payload))
|
||||
self.run_periodic_tasks()
|
||||
self.pg_is_down = False
|
||||
if self.should_stop:
|
||||
return
|
||||
@@ -169,8 +196,9 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
|
||||
self.pg_down_time = time.time()
|
||||
self.pg_is_down = True
|
||||
if time.time() - self.pg_down_time > self.pg_max_wait:
|
||||
logger.warning(f"Postgres event consumer has not recovered in {self.pg_max_wait} s, exiting")
|
||||
current_downtime = time.time() - self.pg_down_time
|
||||
if current_downtime > self.pg_max_wait:
|
||||
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
|
||||
raise
|
||||
# Wait for a second before next attempt, but still listen for any shutdown signals
|
||||
for i in range(10):
|
||||
@@ -179,6 +207,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
time.sleep(0.1)
|
||||
for conn in db.connections.all():
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in dispatcher main loop')
|
||||
raise
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
@@ -208,6 +240,8 @@ class BaseWorker(object):
|
||||
# so we can establish a new connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.perform_work(body, *args)
|
||||
except Exception:
|
||||
logger.exception(f'Unhandled exception in perform_work in worker pid={os.getpid()}')
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
|
||||
@@ -4,10 +4,12 @@ import os
|
||||
import signal
|
||||
import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db import DatabaseError, OperationalError, transaction, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django_guid import set_guid
|
||||
|
||||
@@ -16,8 +18,8 @@ import psutil
|
||||
import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
|
||||
from awx.main.tasks.system import handle_success_and_failure_notifications
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
@@ -26,6 +28,32 @@ from .base import BaseWorker
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
def job_stats_wrapup(job_identifier, event=None):
|
||||
"""Fill in the unified job host_status_counts, fire off notifications if needed"""
|
||||
try:
|
||||
# empty dict (versus default of None) can still indicate that events have been processed
|
||||
# for job types like system jobs, and jobs with no hosts matched
|
||||
host_status_counts = {}
|
||||
if event:
|
||||
host_status_counts = event.get_host_status_counts()
|
||||
|
||||
# Update host_status_counts while holding the row lock
|
||||
with transaction.atomic():
|
||||
uj = UnifiedJob.objects.select_for_update().get(pk=job_identifier)
|
||||
uj.host_status_counts = host_status_counts
|
||||
uj.save(update_fields=['host_status_counts'])
|
||||
|
||||
uj.log_lifecycle("stats_wrapup_finished")
|
||||
|
||||
# If the status was a finished state before this update was made, send notifications
|
||||
# If not, we will send notifications when the status changes
|
||||
if uj.status not in ACTIVE_STATES:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
|
||||
except Exception:
|
||||
logger.exception('Worker failed to save stats or emit notifications: Job {}'.format(job_identifier))
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
"""
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
@@ -44,7 +72,6 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.pid = os.getpid()
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.queue_pop = 0
|
||||
@@ -53,6 +80,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
|
||||
self.redis.delete(key)
|
||||
|
||||
@cached_property
|
||||
def pid(self):
|
||||
"""This needs to be obtained after forking, or else it will give the parent process"""
|
||||
return os.getpid()
|
||||
|
||||
def read(self, queue):
|
||||
try:
|
||||
res = self.redis.blpop(self.queue_name, timeout=1)
|
||||
@@ -120,32 +152,49 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
metrics_singular_events_saved = 0
|
||||
metrics_events_batch_save_errors = 0
|
||||
metrics_events_broadcast = 0
|
||||
metrics_events_missing_created = 0
|
||||
metrics_total_job_event_processing_seconds = datetime.timedelta(seconds=0)
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
e.modified = now # this can be set before created because now is set above on line 149
|
||||
if not e.created:
|
||||
e.created = now
|
||||
e.modified = now
|
||||
metrics_events_missing_created += 1
|
||||
else: # only calculate the seconds if the created time already has been set
|
||||
metrics_total_job_event_processing_seconds += e.modified - e.created
|
||||
metrics_duration_to_save = time.perf_counter()
|
||||
try:
|
||||
cls.objects.bulk_create(events)
|
||||
metrics_bulk_events_saved += len(events)
|
||||
except Exception:
|
||||
except Exception as exc:
|
||||
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
|
||||
# if an exception occurs, we should re-attempt to save the
|
||||
# events one-by-one, because something in the list is
|
||||
# broken/stale
|
||||
consecutive_errors = 0
|
||||
events_saved = 0
|
||||
metrics_events_batch_save_errors += 1
|
||||
for e in events:
|
||||
try:
|
||||
e.save()
|
||||
metrics_singular_events_saved += 1
|
||||
except Exception:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
events_saved += 1
|
||||
consecutive_errors = 0
|
||||
except Exception as exc_indv:
|
||||
consecutive_errors += 1
|
||||
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
|
||||
if consecutive_errors >= 5:
|
||||
raise
|
||||
metrics_singular_events_saved += events_saved
|
||||
if events_saved == 0:
|
||||
raise
|
||||
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
|
||||
for e in events:
|
||||
if not getattr(e, '_skip_websocket_message', False):
|
||||
metrics_events_broadcast += 1
|
||||
emit_event_detail(e)
|
||||
if getattr(e, '_notification_trigger_event', False):
|
||||
job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
|
||||
self.buff = {}
|
||||
self.last_flush = time.time()
|
||||
# only update metrics if we saved events
|
||||
@@ -156,6 +205,11 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', metrics_bulk_events_saved)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(metrics_bulk_events_saved + metrics_singular_events_saved))
|
||||
self.subsystem_metrics.inc('callback_receiver_events_broadcast', metrics_events_broadcast)
|
||||
self.subsystem_metrics.set(
|
||||
'callback_receiver_event_processing_avg_seconds',
|
||||
metrics_total_job_event_processing_seconds.total_seconds()
|
||||
/ (metrics_bulk_events_saved + metrics_singular_events_saved - metrics_events_missing_created),
|
||||
)
|
||||
if self.subsystem_metrics.should_pipe_execute() is True:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
|
||||
@@ -165,47 +219,32 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
if flush:
|
||||
self.last_event = ''
|
||||
if not flush:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
for cls in (JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent):
|
||||
if cls.JOB_REFERENCE in body:
|
||||
job_identifier = body[cls.JOB_REFERENCE]
|
||||
break
|
||||
|
||||
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
|
||||
|
||||
notification_trigger_event = bool(body.get('event') == cls.WRAPUP_EVENT)
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
logger.info('Starting EOF event processing for Job {}'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter))
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
|
||||
if isinstance(uj, Job):
|
||||
# *actual playbooks* send their success/failure
|
||||
# notifications in response to the playbook_on_stats
|
||||
# event handling code in main.models.events
|
||||
pass
|
||||
elif hasattr(uj, 'send_notification_templates'):
|
||||
handle_success_and_failure_notifications.apply_async([uj.id])
|
||||
if notification_trigger_event:
|
||||
job_stats_wrapup(job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
logger.exception('Worker failed to perform EOF tasks: Job {}'.format(job_identifier))
|
||||
finally:
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -1)
|
||||
set_guid('')
|
||||
@@ -215,9 +254,12 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
event = cls.create_from_data(**body)
|
||||
|
||||
if skip_websocket_message:
|
||||
if skip_websocket_message: # if this event sends websocket messages, fire them off on flush
|
||||
event._skip_websocket_message = True
|
||||
|
||||
if notification_trigger_event: # if this is an Ansible stats event, ensure notifications on flush
|
||||
event._notification_trigger_event = True
|
||||
|
||||
self.buff.setdefault(cls, []).append(event)
|
||||
|
||||
retries = 0
|
||||
@@ -225,17 +267,18 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
try:
|
||||
self.flush(force=flush)
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
except (OperationalError, InterfaceError, InternalError) as exc:
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
|
||||
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}')
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event')
|
||||
logger.exception('Database Error Flushing Job Events')
|
||||
django_connection.close()
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
import importlib
|
||||
import sys
|
||||
import traceback
|
||||
import time
|
||||
|
||||
from kubernetes.config import kube_config
|
||||
|
||||
@@ -60,8 +61,19 @@ class TaskWorker(BaseWorker):
|
||||
# the callable is a class, e.g., RunJob; instantiate and
|
||||
# return its `run()` method
|
||||
_call = _call().run
|
||||
|
||||
log_extra = ''
|
||||
logger_method = logger.debug
|
||||
if ('time_ack' in body) and ('time_pub' in body):
|
||||
time_publish = body['time_ack'] - body['time_pub']
|
||||
time_waiting = time.time() - body['time_ack']
|
||||
if time_waiting > 5.0 or time_publish > 5.0:
|
||||
# If task too a very long time to process, add this information to the log
|
||||
log_extra = f' took {time_publish:.4f} to ack, {time_waiting:.4f} in local dispatcher'
|
||||
logger_method = logger.info
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
|
||||
logger_method(f'task {uuid} starting {task}(*{args}){log_extra}')
|
||||
|
||||
return _call(*args, **kwargs)
|
||||
|
||||
def perform_work(self, body):
|
||||
|
||||
@@ -25,7 +25,7 @@ class Command(BaseCommand):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
f'''
|
||||
SELECT
|
||||
SELECT
|
||||
b.id, b.job_id, b.host_name, b.created - a.created delta,
|
||||
b.task task,
|
||||
b.event_data::json->'task_action' task_action,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user