mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
936 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e3872ebd58 | ||
|
|
eee716644b | ||
|
|
2758a38485 | ||
|
|
9104f485e6 | ||
|
|
ae7361f82d | ||
|
|
42562e86e4 | ||
|
|
982ed37b06 | ||
|
|
c0c666cc87 | ||
|
|
8a284889f5 | ||
|
|
b891e2c204 | ||
|
|
a8bf7366cf | ||
|
|
e517f81b8f | ||
|
|
c4c99332fc | ||
|
|
40b5ce4b2e | ||
|
|
d2cd337c1f | ||
|
|
b9913fb4f9 | ||
|
|
d1705dd0cc | ||
|
|
816cc29132 | ||
|
|
f09b8efa87 | ||
|
|
6ebc6809eb | ||
|
|
4b31367945 | ||
|
|
2a62e300a2 | ||
|
|
f6b075843e | ||
|
|
4723773354 | ||
|
|
70be95cec5 | ||
|
|
201b17012d | ||
|
|
47264b0809 | ||
|
|
c51f235fab | ||
|
|
f1b1224a27 | ||
|
|
63b0796738 | ||
|
|
5961e3ef2e | ||
|
|
246d80f177 | ||
|
|
8005b47c14 | ||
|
|
1317572979 | ||
|
|
b763c51f8a | ||
|
|
e70055a333 | ||
|
|
52f86a206a | ||
|
|
7252883094 | ||
|
|
afa7c2d69f | ||
|
|
9c44d1f526 | ||
|
|
473ce95c86 | ||
|
|
3e1e068013 | ||
|
|
746a154f2b | ||
|
|
28733800c4 | ||
|
|
cf2deefa41 | ||
|
|
e5645dd798 | ||
|
|
6205a5db83 | ||
|
|
e50dd92425 | ||
|
|
4955fc8bc4 | ||
|
|
15adb1e828 | ||
|
|
c90d81b914 | ||
|
|
8e9c28701e | ||
|
|
abc74fc9b8 | ||
|
|
21fce00102 | ||
|
|
d347a06e3d | ||
|
|
0391dbc292 | ||
|
|
349c7efa69 | ||
|
|
0f451595d7 | ||
|
|
fcb6ce2907 | ||
|
|
273d7a83f2 | ||
|
|
916c92ffc7 | ||
|
|
38bf174bda | ||
|
|
09dff99340 | ||
|
|
7f178ef28b | ||
|
|
68328109d7 | ||
|
|
d573a9a346 | ||
|
|
d6e89689ae | ||
|
|
d1d97598e2 | ||
|
|
f57fa9d1fb | ||
|
|
83760deb9d | ||
|
|
3893e29a33 | ||
|
|
feeaa0bf5c | ||
|
|
22802e7a64 | ||
|
|
1ac5bc5e2b | ||
|
|
362a3753d0 | ||
|
|
71ee9d28b9 | ||
|
|
d8d89d253d | ||
|
|
a72f3d2f2f | ||
|
|
1adeb833fb | ||
|
|
a810aaf319 | ||
|
|
d9866c35b4 | ||
|
|
4e45c3a66c | ||
|
|
87b55dc413 | ||
|
|
2e3949d612 | ||
|
|
d928ccd922 | ||
|
|
a9c51b737c | ||
|
|
51669c9765 | ||
|
|
17cc82d946 | ||
|
|
10de5b6866 | ||
|
|
55dc27f243 | ||
|
|
6fc2ba3495 | ||
|
|
7bad01e193 | ||
|
|
62a1f10c42 | ||
|
|
3975a2ecdb | ||
|
|
bfa361c87f | ||
|
|
d5f07a9652 | ||
|
|
65ec1d18ad | ||
|
|
7b4521f980 | ||
|
|
3762ba7b24 | ||
|
|
762c882cd7 | ||
|
|
343639d4b7 | ||
|
|
38dc0b8e90 | ||
|
|
ed40ba6267 | ||
|
|
54d56f2284 | ||
|
|
1477bbae30 | ||
|
|
625c6c30fc | ||
|
|
228e412478 | ||
|
|
f8f2e005ba | ||
|
|
d8bf82a8cb | ||
|
|
2eeca3cfd7 | ||
|
|
28a4bbbe8a | ||
|
|
1cfcaa72ad | ||
|
|
4c14727762 | ||
|
|
0c8dde9718 | ||
|
|
febf051748 | ||
|
|
56885a5da1 | ||
|
|
623cf54766 | ||
|
|
a804c854bf | ||
|
|
7b087d4a6c | ||
|
|
cfa098479e | ||
|
|
3c510e6344 | ||
|
|
4c9a1d6b90 | ||
|
|
d1aa52a2a6 | ||
|
|
f30f52a0a8 | ||
|
|
5b459e3c5d | ||
|
|
676c068b71 | ||
|
|
00d71cea50 | ||
|
|
72263c5c7b | ||
|
|
281345dd67 | ||
|
|
1a85fcd2d5 | ||
|
|
c1171fe4ff | ||
|
|
d6a8ad0b33 | ||
|
|
4a6a3b27fa | ||
|
|
266831e26d | ||
|
|
a6e20eeaaa | ||
|
|
6529c1bb46 | ||
|
|
ae0d0db62c | ||
|
|
b81d795c00 | ||
|
|
1b87e11d8f | ||
|
|
8bb9cfd62a | ||
|
|
a176a4b8cf | ||
|
|
3f4d14e48d | ||
|
|
0499d419c3 | ||
|
|
700860e040 | ||
|
|
3dadeb3037 | ||
|
|
16a60412cf | ||
|
|
9f3e272665 | ||
|
|
b84fc3b111 | ||
|
|
e1e8d3b372 | ||
|
|
05f4d94db2 | ||
|
|
61fb3eb390 | ||
|
|
7b95d2114d | ||
|
|
07db7a41b3 | ||
|
|
1120f8b1e1 | ||
|
|
17b3996568 | ||
|
|
584b3f4e3d | ||
|
|
f8c53f4933 | ||
|
|
6e40e9c856 | ||
|
|
2f9dc4d075 | ||
|
|
9afc38b714 | ||
|
|
dfccc9e07d | ||
|
|
7b22d1b874 | ||
|
|
29b4979736 | ||
|
|
87d6253176 | ||
|
|
1e10d4323f | ||
|
|
4111e53113 | ||
|
|
02df0c29e9 | ||
|
|
475c90fd00 | ||
|
|
2742b00a65 | ||
|
|
ea29e66a41 | ||
|
|
6ef6b649e8 | ||
|
|
9bf2a49e0f | ||
|
|
914892c3ac | ||
|
|
77661c6032 | ||
|
|
b4fc585495 | ||
|
|
ff6db37a95 | ||
|
|
1a064bdc59 | ||
|
|
ebabec0dad | ||
|
|
3506b9a7d8 | ||
|
|
cc374ca705 | ||
|
|
ad56a27cc0 | ||
|
|
779e1a34db | ||
|
|
447dfbb64d | ||
|
|
a9365a3967 | ||
|
|
f5c10f99b0 | ||
|
|
c53ccc8d4a | ||
|
|
e214dcac85 | ||
|
|
de77f6bd1f | ||
|
|
18c4771a38 | ||
|
|
042c7ffe5b | ||
|
|
f25c6effa3 | ||
|
|
46d303ceee | ||
|
|
b1bd87bcd2 | ||
|
|
50b0a5a54d | ||
|
|
4f731017ea | ||
|
|
9eb2c02e92 | ||
|
|
55e5432027 | ||
|
|
a9ae4dc5a8 | ||
|
|
0398b744a1 | ||
|
|
012511e4f0 | ||
|
|
4483d0320f | ||
|
|
32e7ddd43a | ||
|
|
0b32733dc8 | ||
|
|
d310c48988 | ||
|
|
5a6eefaf2c | ||
|
|
8c5a94fa64 | ||
|
|
05d988349c | ||
|
|
bb1473f67f | ||
|
|
79f483a66d | ||
|
|
1b09a0230d | ||
|
|
f3344e9816 | ||
|
|
554e4d45aa | ||
|
|
bfe86cbc95 | ||
|
|
29e4160d3e | ||
|
|
b4f906ceb1 | ||
|
|
e099fc58c7 | ||
|
|
e342ef5cfa | ||
|
|
8997fca457 | ||
|
|
435ab4ad67 | ||
|
|
d7a28dcea4 | ||
|
|
5f3024d395 | ||
|
|
3126480d1e | ||
|
|
ca84d312ce | ||
|
|
b790b50a1a | ||
|
|
6df26eb7a3 | ||
|
|
fccaebdc8e | ||
|
|
45728dc1bb | ||
|
|
9cd8aa1667 | ||
|
|
b74597f4dd | ||
|
|
ce3d3c3490 | ||
|
|
a9fe1ad9c1 | ||
|
|
22e7083d71 | ||
|
|
951515da2f | ||
|
|
9e2f4cff08 | ||
|
|
0c1a4439ba | ||
|
|
c2a1603a56 | ||
|
|
13e715aeb9 | ||
|
|
2bc75270e7 | ||
|
|
fabe56088d | ||
|
|
0e3bf6db09 | ||
|
|
c6a7d0859d | ||
|
|
fed00a18ad | ||
|
|
ecbdc55955 | ||
|
|
bca9bcf6dd | ||
|
|
018a8e12de | ||
|
|
e0a28e32eb | ||
|
|
c105885c7b | ||
|
|
89a0be64af | ||
|
|
c1d85f568c | ||
|
|
75566bad39 | ||
|
|
75c2d1eda1 | ||
|
|
9a4667c6c7 | ||
|
|
9917841585 | ||
|
|
fbc3cd3758 | ||
|
|
d65687f14a | ||
|
|
4ea7511ae8 | ||
|
|
a8d22b9459 | ||
|
|
f8453ffe68 | ||
|
|
38f43c147a | ||
|
|
38fbcf8ee6 | ||
|
|
2bd25b1fba | ||
|
|
7178fb83b0 | ||
|
|
2376013d49 | ||
|
|
a94042def5 | ||
|
|
2d2164a4ba | ||
|
|
3c980d373c | ||
|
|
5b3ce1e999 | ||
|
|
6d4469ebbd | ||
|
|
eb58a6cc0e | ||
|
|
a60401abb9 | ||
|
|
1203c8c0ee | ||
|
|
0c52d17951 | ||
|
|
44fa3b18a9 | ||
|
|
33328c4ad7 | ||
|
|
11adcb9800 | ||
|
|
932a1c6386 | ||
|
|
d1791fc48c | ||
|
|
5b274cfc2a | ||
|
|
1d7d2820fd | ||
|
|
605c1355a8 | ||
|
|
a6e00df041 | ||
|
|
67219e743f | ||
|
|
67273ff8c3 | ||
|
|
a3bbe308a8 | ||
|
|
f1b5bbb1f6 | ||
|
|
7330102961 | ||
|
|
61916b86b5 | ||
|
|
35d5bde690 | ||
|
|
b17c477af7 | ||
|
|
01d891cd6e | ||
|
|
e36335f68c | ||
|
|
39369c7721 | ||
|
|
4fbc39991d | ||
|
|
91075e8332 | ||
|
|
0ed50b380a | ||
|
|
53716a4c5a | ||
|
|
f30bbad07d | ||
|
|
b923efad37 | ||
|
|
3b36372880 | ||
|
|
661cc896a9 | ||
|
|
e9c3623dfd | ||
|
|
c65b362841 | ||
|
|
6a0e11a233 | ||
|
|
7417f9925f | ||
|
|
2f669685d8 | ||
|
|
86510029e1 | ||
|
|
37234ca66e | ||
|
|
4ae1fdef05 | ||
|
|
95e94a8ab5 | ||
|
|
ea35d9713a | ||
|
|
949cf53b89 | ||
|
|
a68e22b114 | ||
|
|
d70cd113e1 | ||
|
|
f737fc066f | ||
|
|
9b992c971e | ||
|
|
e0d59766e0 | ||
|
|
a9d88f728d | ||
|
|
e24d63ea9a | ||
|
|
1833a5b78b | ||
|
|
4213a00548 | ||
|
|
7345512785 | ||
|
|
d3dc126d45 | ||
|
|
758a488aee | ||
|
|
c0c358b640 | ||
|
|
49f4ed10ca | ||
|
|
5e18eccd19 | ||
|
|
c4e9daca4e | ||
|
|
5443e10697 | ||
|
|
a3f9c0b012 | ||
|
|
8517053934 | ||
|
|
0506968d4f | ||
|
|
3bb91b20d0 | ||
|
|
268b1ff436 | ||
|
|
f16a72081a | ||
|
|
cceac8d907 | ||
|
|
8d012de3e2 | ||
|
|
cee7ac9511 | ||
|
|
36faaf4720 | ||
|
|
33b8e7624b | ||
|
|
a213e01491 | ||
|
|
a00ed8e297 | ||
|
|
aeaebcd81a | ||
|
|
b5849f3712 | ||
|
|
658f87953e | ||
|
|
5562e636ea | ||
|
|
80fcdae50b | ||
|
|
2c5f209996 | ||
|
|
692b55311e | ||
|
|
10667fc855 | ||
|
|
1fc33b551d | ||
|
|
729256c3d1 | ||
|
|
23e1feba96 | ||
|
|
e3614c3012 | ||
|
|
f37391397e | ||
|
|
6a8454f748 | ||
|
|
d1328c7625 | ||
|
|
d1cce109fb | ||
|
|
32bd8b6473 | ||
|
|
001bd4ca59 | ||
|
|
541b503e06 | ||
|
|
093c29e315 | ||
|
|
eec7d7199b | ||
|
|
7dbb862673 | ||
|
|
be1422d021 | ||
|
|
459ac0e5d9 | ||
|
|
16c9d043c0 | ||
|
|
1b465c4ed9 | ||
|
|
198a0db808 | ||
|
|
91dda0a164 | ||
|
|
9341480209 | ||
|
|
21877b3378 | ||
|
|
03169a96ef | ||
|
|
ebc3dbe7b6 | ||
|
|
1bed5d4af2 | ||
|
|
0783d86c6c | ||
|
|
a3d5705cea | ||
|
|
2ae8583a86 | ||
|
|
edda4bb265 | ||
|
|
d068481aec | ||
|
|
e20d8c8e81 | ||
|
|
f6cc351f7f | ||
|
|
c2d4887043 | ||
|
|
4428dbf1ff | ||
|
|
e225489f43 | ||
|
|
5169fe3484 | ||
|
|
01d1470544 | ||
|
|
faa6ee47c5 | ||
|
|
aa1d71148c | ||
|
|
e86ded6c68 | ||
|
|
365bf4eb53 | ||
|
|
ceb9bfe486 | ||
|
|
0f85c867a0 | ||
|
|
f8a8186bd1 | ||
|
|
33dfb6bf76 | ||
|
|
28cd762dd7 | ||
|
|
217cca47f5 | ||
|
|
8faa5d8b7a | ||
|
|
2c6711e183 | ||
|
|
45328b6e6d | ||
|
|
1523feee91 | ||
|
|
856dc3645e | ||
|
|
5e4dd54112 | ||
|
|
5860689619 | ||
|
|
da7834476b | ||
|
|
d5ba981515 | ||
|
|
afe07bd874 | ||
|
|
f916bd7994 | ||
|
|
3fef7acaa8 | ||
|
|
95190c5509 | ||
|
|
76e887f46d | ||
|
|
0c2b1b7747 | ||
|
|
4c74c8c40c | ||
|
|
3a929919a3 | ||
|
|
c25af96c56 | ||
|
|
f28f1e434d | ||
|
|
b3c5df193a | ||
|
|
8645602b0a | ||
|
|
05156a5991 | ||
|
|
049d642df8 | ||
|
|
951ebf146a | ||
|
|
7a67e0f3d6 | ||
|
|
37def8cf7c | ||
|
|
e4c28fed03 | ||
|
|
f8b7259d7f | ||
|
|
6ae1e156c8 | ||
|
|
9a055dbf78 | ||
|
|
80ac44565a | ||
|
|
a338199198 | ||
|
|
47fc0a759f | ||
|
|
5eb4b35508 | ||
|
|
d93eedaedb | ||
|
|
a748a272fb | ||
|
|
a28f8c43cb | ||
|
|
fbec6a60bf | ||
|
|
6a4f3c8758 | ||
|
|
04625f566b | ||
|
|
895a567ed1 | ||
|
|
e152b30fc1 | ||
|
|
a1fe60da78 | ||
|
|
d8d710a83d | ||
|
|
92f0893764 | ||
|
|
479448ff09 | ||
|
|
62a36e3704 | ||
|
|
2d286c5f68 | ||
|
|
f435e577b2 | ||
|
|
236b332a8b | ||
|
|
a7028df828 | ||
|
|
a59017ceef | ||
|
|
affacb8ab5 | ||
|
|
37f9024940 | ||
|
|
f72fca5fcf | ||
|
|
21aeda0f45 | ||
|
|
65a0e5ed45 | ||
|
|
571e34bf79 | ||
|
|
6dc58af8e1 | ||
|
|
bbd3edba47 | ||
|
|
46d6dce738 | ||
|
|
475a701f78 | ||
|
|
dccd7f2e9d | ||
|
|
47711bc007 | ||
|
|
04eec61387 | ||
|
|
ef4a2cbebb | ||
|
|
c8d76dbe78 | ||
|
|
61a706274b | ||
|
|
20226f8984 | ||
|
|
7ff04dafd3 | ||
|
|
f9bdb1da15 | ||
|
|
dab678c5cc | ||
|
|
44ffcf86de | ||
|
|
8a18984be1 | ||
|
|
0b1776098b | ||
|
|
5da13683ce | ||
|
|
89c2038ea3 | ||
|
|
a1012b365c | ||
|
|
673068464a | ||
|
|
484ef1b6a8 | ||
|
|
7fc269b65a | ||
|
|
694e494484 | ||
|
|
ddda6b3d21 | ||
|
|
80adbf9c03 | ||
|
|
264f35d259 | ||
|
|
e513f8fe31 | ||
|
|
b9f35e5b50 | ||
|
|
002f463ffd | ||
|
|
28512e042b | ||
|
|
482395eb6a | ||
|
|
e1d44d6d14 | ||
|
|
19030b9d5f | ||
|
|
3e4738d948 | ||
|
|
94083f55c7 | ||
|
|
6ecd18b2e2 | ||
|
|
4e9c705997 | ||
|
|
1803a76a4d | ||
|
|
86ca1875f1 | ||
|
|
bf5c259d92 | ||
|
|
9bca937fad | ||
|
|
526ca3ae42 | ||
|
|
695c7ade86 | ||
|
|
c133b35162 | ||
|
|
afb3c0e31e | ||
|
|
8965f1934e | ||
|
|
556040fb8b | ||
|
|
8b3e49cb24 | ||
|
|
331e272be0 | ||
|
|
9e7808f2c9 | ||
|
|
0bb2de24f3 | ||
|
|
72ce7b194f | ||
|
|
85958c51a8 | ||
|
|
1dd44df471 | ||
|
|
b132f855a0 | ||
|
|
a361b5da6e | ||
|
|
7df63830ed | ||
|
|
b3cf93256b | ||
|
|
c695ba2e10 | ||
|
|
c44160933d | ||
|
|
9ae3e1c40f | ||
|
|
c7c5a9d2f7 | ||
|
|
a56a231869 | ||
|
|
5087ca7f62 | ||
|
|
3b7336c570 | ||
|
|
9b413afb2e | ||
|
|
eec05eac3c | ||
|
|
41671b5868 | ||
|
|
0bbf1d7014 | ||
|
|
c5ce62e11d | ||
|
|
9316c9ea3e | ||
|
|
427b8bdabb | ||
|
|
cce470a5f8 | ||
|
|
92baea2ee6 | ||
|
|
3be9113d6b | ||
|
|
785c6fe846 | ||
|
|
0d29bbfdc6 | ||
|
|
ce8117ef19 | ||
|
|
bb921af146 | ||
|
|
5e0ecc7f43 | ||
|
|
73dc58e810 | ||
|
|
89344c2eee | ||
|
|
d61cd519d7 | ||
|
|
8057438c67 | ||
|
|
110671532d | ||
|
|
92ac3054c6 | ||
|
|
c95c2a4580 | ||
|
|
2c01476eca | ||
|
|
8adbc8a026 | ||
|
|
98b8d7fb69 | ||
|
|
477551325d | ||
|
|
fdedc472d1 | ||
|
|
88819ada6b | ||
|
|
f3ee93b67f | ||
|
|
b4549e5581 | ||
|
|
3afed6adb7 | ||
|
|
1bc78fd429 | ||
|
|
6ce1b50751 | ||
|
|
6c87b88e2c | ||
|
|
10f21b8817 | ||
|
|
0d1b25131d | ||
|
|
d2118b8d25 | ||
|
|
b852caaaa3 | ||
|
|
b0dd10b538 | ||
|
|
8f4aa5511b | ||
|
|
4b26ac06ba | ||
|
|
4dc6452dea | ||
|
|
5a17acb131 | ||
|
|
6cfd9dbfe4 | ||
|
|
110c5a8e84 | ||
|
|
b185c1e0a2 | ||
|
|
f1a4a62304 | ||
|
|
9f3e3bad54 | ||
|
|
4198227116 | ||
|
|
56525bc34f | ||
|
|
3f2068e74e | ||
|
|
6117f8297e | ||
|
|
8953d06905 | ||
|
|
bf39a2a747 | ||
|
|
f27ec8cd89 | ||
|
|
aec3244f52 | ||
|
|
c8e208dea7 | ||
|
|
f2cec03900 | ||
|
|
07aaad53aa | ||
|
|
42a0192425 | ||
|
|
ac08033d3e | ||
|
|
6d0fed6d9a | ||
|
|
53ae05094e | ||
|
|
78c4d5005e | ||
|
|
0b4e0678e9 | ||
|
|
79002ae563 | ||
|
|
6c868c7552 | ||
|
|
6e4f3efc4b | ||
|
|
ce9da4edb7 | ||
|
|
6ff1fe8548 | ||
|
|
140b85688f | ||
|
|
0477581dea | ||
|
|
d5c557c639 | ||
|
|
8e60cb1270 | ||
|
|
906eb98d8e | ||
|
|
119b9475ea | ||
|
|
12c8994faf | ||
|
|
f3e73bbed8 | ||
|
|
e2a1b7902c | ||
|
|
d65a3fa037 | ||
|
|
f6600887bc | ||
|
|
96c18fa311 | ||
|
|
9645e5bcd3 | ||
|
|
0a09d98fe8 | ||
|
|
1224e2c889 | ||
|
|
c8e6fa3bb3 | ||
|
|
00cae104b3 | ||
|
|
6e3b2a5c2d | ||
|
|
3d378077d9 | ||
|
|
f27a34cd1c | ||
|
|
720a634702 | ||
|
|
c722e50595 | ||
|
|
716d440a76 | ||
|
|
9d81727d16 | ||
|
|
1cecfd9771 | ||
|
|
011c8ae822 | ||
|
|
d5626a4f3e | ||
|
|
73f54b2237 | ||
|
|
6073e8e3b6 | ||
|
|
867ff5da71 | ||
|
|
c8b2ca7fed | ||
|
|
0a964b2bf6 | ||
|
|
d4e3127fb4 | ||
|
|
fa18b94725 | ||
|
|
5ab6255c67 | ||
|
|
ac80bc874a | ||
|
|
2e98446394 | ||
|
|
c4afbbc2ca | ||
|
|
517043e209 | ||
|
|
e7c52bc5e7 | ||
|
|
c25d208465 | ||
|
|
503a47c509 | ||
|
|
921231fe3d | ||
|
|
6721ea54e9 | ||
|
|
99a42e91fe | ||
|
|
9a580ba644 | ||
|
|
74fcdabc22 | ||
|
|
9bec7cf3b0 | ||
|
|
f9e402658b | ||
|
|
9570981c7f | ||
|
|
f79debac42 | ||
|
|
a9f3eeef05 | ||
|
|
6eb1feffcd | ||
|
|
6f55cde6d3 | ||
|
|
48511b6c33 | ||
|
|
771daefcfd | ||
|
|
1167361128 | ||
|
|
3a4cc0d464 | ||
|
|
78901ab48e | ||
|
|
938bf1b531 | ||
|
|
27da141889 | ||
|
|
2bf2412759 | ||
|
|
1e3c229460 | ||
|
|
cfa93b52b7 | ||
|
|
96ad2b2b28 | ||
|
|
f53a1fedf6 | ||
|
|
8fceaf8810 | ||
|
|
c39370dbd0 | ||
|
|
bdc7efb274 | ||
|
|
10c76e2337 | ||
|
|
ff1e8cc356 | ||
|
|
da74f1d01f | ||
|
|
8ad46436df | ||
|
|
be01bed34b | ||
|
|
71577bb00d | ||
|
|
8a763d6cf8 | ||
|
|
cfb58eb145 | ||
|
|
1165dcfa07 | ||
|
|
f9928eef70 | ||
|
|
ee1d5e43b9 | ||
|
|
5994c35975 | ||
|
|
e94e79d57a | ||
|
|
f87a09c46a | ||
|
|
535e16c6cf | ||
|
|
5ae19fd9c2 | ||
|
|
7d5f6aa49d | ||
|
|
c0fc3a74ee | ||
|
|
22c831ff31 | ||
|
|
17dc6bf5a1 | ||
|
|
e7fb82ffe7 | ||
|
|
70ae546dee | ||
|
|
5d22fc2bd7 | ||
|
|
b4919f9ebd | ||
|
|
9033b3f2a5 | ||
|
|
de60165a49 | ||
|
|
b02677a8d0 | ||
|
|
b8c1724880 | ||
|
|
6baa2a109d | ||
|
|
7a5cfd05a3 | ||
|
|
b9279ebd5e | ||
|
|
1b25dd0127 | ||
|
|
49396178ca | ||
|
|
a4dfd96a8d | ||
|
|
a2f4e36e47 | ||
|
|
ad566cc651 | ||
|
|
4d9523afa4 | ||
|
|
40602875e0 | ||
|
|
d0572cf170 | ||
|
|
1edede213e | ||
|
|
e0c7a7bece | ||
|
|
640f9474fc | ||
|
|
29b90b700e | ||
|
|
f7c5289195 | ||
|
|
ee11341430 | ||
|
|
56263a5fea | ||
|
|
89e41f7524 | ||
|
|
3a8bacb8ef | ||
|
|
f328f8cad4 | ||
|
|
7752446067 | ||
|
|
96bc0f1578 | ||
|
|
9f25fdd079 | ||
|
|
7249b21214 | ||
|
|
2d642b95ae | ||
|
|
b94d5c7f20 | ||
|
|
02c23fc1c6 | ||
|
|
b75f8ceca6 | ||
|
|
bfc74497b0 | ||
|
|
c8c982428d | ||
|
|
e6dbf71252 | ||
|
|
3701567ad7 | ||
|
|
86140dec08 | ||
|
|
50fe0392ed | ||
|
|
f18c965a8a | ||
|
|
f874e55051 | ||
|
|
1dcd2b1883 | ||
|
|
7684579464 | ||
|
|
16e89ed081 | ||
|
|
3aa07baf26 | ||
|
|
f1c53fcd85 | ||
|
|
6c98e6c3a0 | ||
|
|
8aec4ed72e | ||
|
|
0a0cdc2e21 | ||
|
|
62e3b9e3b6 | ||
|
|
dc3f81920e | ||
|
|
f0776d6838 | ||
|
|
9de63832ce | ||
|
|
8a66213dbe | ||
|
|
23d4122574 | ||
|
|
5900af726b | ||
|
|
9fc4c03e5b | ||
|
|
f8d2a32756 | ||
|
|
0bb1b0ed45 | ||
|
|
3b11219fff | ||
|
|
1b4c3f56fa | ||
|
|
6c5334c7d3 | ||
|
|
1371e394de | ||
|
|
ec67feef2f | ||
|
|
89e656b2a4 | ||
|
|
5910b8c562 | ||
|
|
580004b395 | ||
|
|
70629ef7f3 | ||
|
|
1d8bb47726 | ||
|
|
5e16c72d30 | ||
|
|
02f709f8d1 | ||
|
|
90bd27f5a8 | ||
|
|
593ab90f92 | ||
|
|
27c06a7285 | ||
|
|
b2c755ba76 | ||
|
|
c88cab7d31 | ||
|
|
f82f4a9993 | ||
|
|
5a6f1a342f | ||
|
|
910663764f | ||
|
|
43aa0fc741 | ||
|
|
b9b9fc1934 | ||
|
|
093f453073 | ||
|
|
0e696d0515 | ||
|
|
c4a29ded1c | ||
|
|
f402ff0ee7 | ||
|
|
b982793a3a | ||
|
|
b3f2f7efe5 | ||
|
|
19f9a3f918 | ||
|
|
4b2e709e8d | ||
|
|
146590d0c2 | ||
|
|
247ee4ddac | ||
|
|
2f2294b65a | ||
|
|
ec873dd28c | ||
|
|
c2bd36e580 | ||
|
|
1f8736ce1d | ||
|
|
50a9f0be6b | ||
|
|
8f3c5be04e | ||
|
|
34ceaf4551 | ||
|
|
786e907e3b | ||
|
|
c5aa9ee12b | ||
|
|
24f8cb49b5 | ||
|
|
54d967af0d | ||
|
|
3c91370cab | ||
|
|
e22dc3dc7b | ||
|
|
9ae41dc3ba | ||
|
|
f175d6dfae | ||
|
|
34c659d8b6 | ||
|
|
6eb406ac39 | ||
|
|
cddceb0e06 | ||
|
|
a549bea815 | ||
|
|
860fbdad02 | ||
|
|
f639e46718 | ||
|
|
56dc08683e | ||
|
|
148daec49b | ||
|
|
2d03938451 | ||
|
|
bc7b586803 | ||
|
|
1408200927 | ||
|
|
3576e192f4 | ||
|
|
70d930e019 | ||
|
|
c69e41b261 | ||
|
|
2949efd6ec | ||
|
|
2592613bde | ||
|
|
aa717a2728 | ||
|
|
de158cb41d | ||
|
|
f7737e2f94 | ||
|
|
4e45b6ba6d | ||
|
|
5885654405 | ||
|
|
227960e3ea | ||
|
|
d6ba3e1fc2 | ||
|
|
2643a1b3d6 | ||
|
|
6afb47789a | ||
|
|
2acc488adf | ||
|
|
d0598e720d | ||
|
|
162ef08cef | ||
|
|
aa0d2cff5c | ||
|
|
d608402dc1 | ||
|
|
04dbc2fcc4 | ||
|
|
0bc9b1d431 | ||
|
|
138f8a45ae | ||
|
|
ee348b7169 | ||
|
|
38b9b47e6b | ||
|
|
2187655c68 | ||
|
|
13203af353 | ||
|
|
4781df62ec | ||
|
|
72372b3810 | ||
|
|
b742746e5d | ||
|
|
74fc0fef04 | ||
|
|
bb8025c1af | ||
|
|
d824508cfb | ||
|
|
077e541876 | ||
|
|
4561fd7270 | ||
|
|
50786f201f | ||
|
|
5561eb30f7 | ||
|
|
e2c4fd5ebb | ||
|
|
7226acb2b6 | ||
|
|
7ef8e147f4 | ||
|
|
45db305e69 | ||
|
|
52abb29091 | ||
|
|
d564a268fd | ||
|
|
8280aff612 | ||
|
|
b35d6b7425 | ||
|
|
7b692b0c31 | ||
|
|
a271837007 | ||
|
|
a3d0e10f51 | ||
|
|
5e8f7b76f1 | ||
|
|
c67e9143fb | ||
|
|
5abe045e6c | ||
|
|
4bc63cc37e | ||
|
|
5cdd947196 | ||
|
|
67d1267d98 | ||
|
|
66db615c0c | ||
|
|
598449c2ce | ||
|
|
4119c1dd0b | ||
|
|
2acf055f6a | ||
|
|
4eeb62766e | ||
|
|
d995068396 | ||
|
|
ee139b306c | ||
|
|
a36b0061fa | ||
|
|
eb0cf945cf | ||
|
|
2e7ab57645 | ||
|
|
d8f6c0aebc | ||
|
|
f8e5e38614 | ||
|
|
3f841180da | ||
|
|
9a85578925 | ||
|
|
f1e0c1e977 | ||
|
|
1b8cb45024 | ||
|
|
fb9e508b6b | ||
|
|
0868f97335 | ||
|
|
30fbeb43bb | ||
|
|
d2aea30d3d | ||
|
|
cdb347cba5 | ||
|
|
c95c7c8b18 | ||
|
|
14043f792a | ||
|
|
9632f3b69e | ||
|
|
da1da6f530 | ||
|
|
5b93007ba1 | ||
|
|
e87633f1d8 | ||
|
|
ca35eb39d2 | ||
|
|
5d84863237 | ||
|
|
f4728149d9 | ||
|
|
4c7c8b6db3 | ||
|
|
db8ee2810a | ||
|
|
5ba8bbb08b | ||
|
|
87adfe5889 | ||
|
|
07cb2aa9bb | ||
|
|
19c5564ec8 | ||
|
|
e05d071dab | ||
|
|
6ba1b170d2 | ||
|
|
63d7abc7e4 | ||
|
|
b318fa7814 | ||
|
|
5f6907ba83 | ||
|
|
cffa324762 | ||
|
|
b690e61576 | ||
|
|
ae207b5f33 | ||
|
|
5c23c63e6d | ||
|
|
3a133836dc | ||
|
|
ab7cc88caf | ||
|
|
3b997cdd3a | ||
|
|
59f246d297 | ||
|
|
14a8258835 | ||
|
|
897fb96f94 | ||
|
|
79a29ebcc8 | ||
|
|
950e4dab04 | ||
|
|
ab82cc3ba3 | ||
|
|
93a8a952f1 | ||
|
|
2b9954c373 | ||
|
|
21f0c1d1d7 | ||
|
|
379979511b | ||
|
|
2e6a7205e7 | ||
|
|
14685901aa | ||
|
|
15480a56db | ||
|
|
9f54ba069e | ||
|
|
42f01b7f05 | ||
|
|
6cf1fb3c10 | ||
|
|
03058cd1e8 | ||
|
|
a30c2fe227 | ||
|
|
d8e890b651 | ||
|
|
95735ee01a | ||
|
|
61931d0b6c | ||
|
|
516607551c | ||
|
|
5e974d84b6 | ||
|
|
91bc39be6b | ||
|
|
49222d5e72 | ||
|
|
686e5ac545 | ||
|
|
0c3d6e7c33 | ||
|
|
e1b7e7f6ce | ||
|
|
a294a6f06e | ||
|
|
97c8005d00 | ||
|
|
d2ab7bd54d | ||
|
|
e02e8994ad | ||
|
|
ada2d65547 | ||
|
|
0443bd3099 | ||
|
|
adaa164a19 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,3 +1,8 @@
|
||||
# Ignore generated schema
|
||||
swagger.json
|
||||
schema.json
|
||||
reference-schema.json
|
||||
|
||||
# Tags
|
||||
.tags
|
||||
.tags1
|
||||
@@ -52,6 +57,7 @@ __pycache__
|
||||
**/node_modules/**
|
||||
/tmp
|
||||
**/npm-debug.log*
|
||||
**/package-lock.json
|
||||
|
||||
# UI build flag files
|
||||
awx/ui/.deps_built
|
||||
@@ -112,7 +118,6 @@ local/
|
||||
*.mo
|
||||
requirements/vendor
|
||||
.i18n_built
|
||||
VERSION
|
||||
.idea/*
|
||||
|
||||
# AWX python libs populated by requirements.txt
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
Hi there! We're excited to have you as a contributor.
|
||||
|
||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project) .
|
||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
## Table of contents
|
||||
|
||||
* [Things to know prior to submitting code](#things-to-know-prior-to-contributing-code)
|
||||
* [Things to know prior to submitting code](#things-to-know-prior-to-submitting-code)
|
||||
* [Setting up your development environment](#setting-up-your-development-environment)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Docker](#docker)
|
||||
@@ -17,14 +17,14 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
* [Create local settings](#create-local-settings)
|
||||
* [Build the base image](#build-the-base-image)
|
||||
* [Build the user interface](#build-the-user-interface)
|
||||
# [Running the environment](#running-the-environment)
|
||||
* [Running the environment](#running-the-environment)
|
||||
* [Start the containers](#start-the-containers)
|
||||
* [Start from the container shell](#start-from-the-container-shell)
|
||||
* [Post Build Steps](#post-build-steps)
|
||||
* [Start a shell](#start-the-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Building API Documentation](#build-documentation)
|
||||
* [Start a shell](#start-a-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Building API Documentation](#build-api-documentation)
|
||||
* [Accessing the AWX web interface](#accessing-the-awx-web-interface)
|
||||
* [Purging containers and images](#purging-containers-and-images)
|
||||
* [What should I work on?](#what-should-i-work-on)
|
||||
@@ -34,10 +34,11 @@ Have questions about this document or anything not covered here? Come chat with
|
||||
## Things to know prior to submitting code
|
||||
|
||||
- All code submissions are done through pull requests against the `devel` branch.
|
||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||
- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.freenode.net, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
## Setting up your development environment
|
||||
|
||||
@@ -49,7 +50,7 @@ The AWX development environment workflow and toolchain is based on Docker, and t
|
||||
|
||||
Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases.
|
||||
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
|
||||
respectively.
|
||||
|
||||
For Linux platforms, refer to the following from Docker:
|
||||
@@ -86,8 +87,8 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
|
||||
The AWX UI requires the following:
|
||||
|
||||
- Node 6.x LTS version
|
||||
- NPM 3.x LTS
|
||||
- Node 8.x LTS
|
||||
- NPM 6.x LTS
|
||||
|
||||
### Build the environment
|
||||
|
||||
@@ -137,21 +138,21 @@ Run the following to build the AWX UI:
|
||||
```
|
||||
### Running the environment
|
||||
|
||||
#### Start the containers
|
||||
#### Start the containers
|
||||
|
||||
Start the development containers by running the following:
|
||||
|
||||
```bash
|
||||
(host)$ make docker-compose
|
||||
```
|
||||
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django, celery, and the front end build process.
|
||||
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django and the front end build process.
|
||||
|
||||
If you start a second terminal session, you can take a look at the running containers using the `docker ps` command. For example:
|
||||
|
||||
```bash
|
||||
# List running containers
|
||||
(host)$ docker ps
|
||||
(host)$ docker ps
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
@@ -174,7 +175,7 @@ The first time you start the environment, database migrations need to run in ord
|
||||
```bash
|
||||
awx_1 | Operations to perform:
|
||||
awx_1 | Synchronize unmigrated apps: solo, api, staticfiles, debug_toolbar, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Synchronizing apps without migrations:
|
||||
awx_1 | Creating tables...
|
||||
awx_1 | Running deferred SQL...
|
||||
@@ -219,7 +220,7 @@ If you want to start and use the development environment, you'll first need to b
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes.
|
||||
|
||||
|
||||
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
|
||||
|
||||
```bash
|
||||
@@ -248,9 +249,9 @@ Before you can log into AWX, you need to create an admin user. With this user yo
|
||||
(container)# awx-manage createsuperuser
|
||||
```
|
||||
You will be prompted for a username, an email address, and a password, and you will be asked to confirm the password. The email address is not important, so just enter something that looks like an email address. Remember the username and password, as you will use them to log into the web interface for the first time.
|
||||
|
||||
|
||||
##### Load demo data
|
||||
|
||||
|
||||
You can optionally load some demo data. This will create a demo project, inventory, and job template. From within the container shell, run the following to load the data:
|
||||
|
||||
```bash
|
||||
@@ -276,7 +277,7 @@ in OpenAPI format. A variety of online tools are available for translating
|
||||
this data into more consumable formats (such as HTML). http://editor.swagger.io
|
||||
is an example of one such service.
|
||||
|
||||
### Accessing the AWX web interface
|
||||
### Accessing the AWX web interface
|
||||
|
||||
You can now log into the AWX web interface at [https://localhost:8043](https://localhost:8043), and access the API directly at [https://localhost:8043/api/](https://localhost:8043/api/).
|
||||
|
||||
@@ -289,7 +290,7 @@ When necessary, remove any AWX containers and images by running the following:
|
||||
```bash
|
||||
(host)$ make docker-clean
|
||||
```
|
||||
|
||||
|
||||
## What should I work on?
|
||||
|
||||
For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement).
|
||||
@@ -329,7 +330,24 @@ We like to keep our commit history clean, and will require resubmission of pull
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefully. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.
|
||||
All submitted PRs will have the linter and unit tests run against them via Zuul, and the status reported in the PR.
|
||||
|
||||
## PR Checks ran by Zuul
|
||||
Zuul jobs for awx are defined in the [zuul-jobs](https://github.com/ansible/zuul-jobs) repo.
|
||||
|
||||
Zuul runs the following checks that must pass:
|
||||
1) `tox-awx-api-lint`
|
||||
2) `tox-awx-ui-lint`
|
||||
3) `tox-awx-api`
|
||||
4) `tox-awx-ui`
|
||||
5) `tox-awx-swagger`
|
||||
|
||||
Zuul runs the following checks that are non-voting (can not pass but serve to inform PR reviewers):
|
||||
1) `tox-awx-detect-schema-change`
|
||||
This check generates the schema and diffs it against a reference copy of the `devel` version of the schema.
|
||||
Reviewers should inspect the `job-output.txt.gz` related to the check if their is a failure (grep for `diff -u -b` to find beginning of diff).
|
||||
If the schema change is expected and makes sense in relation to the changes made by the PR, then you are good to go!
|
||||
If not, the schema changes should be fixed, but this decision must be enforced by reviewers.
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ Export all objects
|
||||
|
||||
Clean up remnants of the old AWX install:
|
||||
|
||||
```docker rm -f $(ps -aq)``` # remove all old awx containers
|
||||
```docker rm -f $(docker ps -aq)``` # remove all old awx containers
|
||||
|
||||
```make clean-ui``` # clean up ui artifacts
|
||||
|
||||
|
||||
18
INSTALL.md
18
INSTALL.md
@@ -62,8 +62,8 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 6.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 3.x LTS](https://docs.npmjs.com/)
|
||||
- [Node 8.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -119,12 +119,12 @@ To complete a deployment to OpenShift, you will obviously need access to an Open
|
||||
|
||||
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
|
||||
|
||||
The default resource requests per-pod requires:
|
||||
The default resource requests per-deployment requires:
|
||||
|
||||
> Memory: 6GB
|
||||
> CPU: 3 cores
|
||||
|
||||
This can be tuned by overriding the variables found in [/installer/openshift/defaults/main.yml](/installer/openshift/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
This can be tuned by overriding the variables found in [/installer/roles/kubernetes/defaults/main.yml](/installer/roles/kubernetes/defaults/main.yml). Special care should be taken when doing this as undersized instances will experience crashes and resource exhaustion.
|
||||
|
||||
For more detail on how resource requests are formed see: [https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources](https://docs.openshift.com/container-platform/latest/dev_guide/compute_resources.html#dev-compute-resources)
|
||||
|
||||
@@ -236,7 +236,7 @@ Using /etc/ansible/ansible.cfg as config file
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
@@ -426,7 +426,7 @@ If you choose to use the official images then the remote host will be the one to
|
||||
|
||||
> As mentioned above, in [Prerequisites](#prerequisites-1), the prerequisites are required on the remote host.
|
||||
|
||||
> When deploying to a remote host, the playook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
> When deploying to a remote host, the playbook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
|
||||
|
||||
#### Inventory variables
|
||||
@@ -449,6 +449,10 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
> If you're using a non trusted CA, provide a path where the untrusted Certs are stored on your Host.
|
||||
|
||||
#### Docker registry
|
||||
|
||||
If you wish to tag and push built images to a Docker registry, set the following variables in the inventory file:
|
||||
@@ -548,7 +552,7 @@ Using /etc/ansible/ansible.cfg as config file
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Apply all migrations: sso, taggit, sessions, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
|
||||
106
Makefile
106
Makefile
@@ -11,11 +11,7 @@ GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
IMAGE_REPOSITORY_AUTH ?=
|
||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||
|
||||
VERSION=$(shell git describe --long --first-parent)
|
||||
VERSION3=$(shell git describe --long --first-parent | sed 's/\-g.*//')
|
||||
VERSION3DOT=$(shell git describe --long --first-parent | sed 's/\-g.*//' | sed 's/\-/\./')
|
||||
RELEASE_VERSION=$(shell git describe --long --first-parent | sed 's@\([0-9.]\{1,\}\).*@\1@')
|
||||
VERSION := $(shell cat VERSION)
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -30,8 +26,6 @@ DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
|
||||
CURWD = $(shell pwd)
|
||||
|
||||
# Determine appropriate shasum command
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
@@ -48,20 +42,9 @@ DATE := $(shell date -u +%Y%m%d%H%M)
|
||||
NAME ?= awx
|
||||
GIT_REMOTE_URL = $(shell git config --get remote.origin.url)
|
||||
|
||||
ifeq ($(OFFICIAL),yes)
|
||||
VERSION_TARGET ?= $(RELEASE_VERSION)
|
||||
else
|
||||
VERSION_TARGET ?= $(VERSION3DOT)
|
||||
endif
|
||||
|
||||
# TAR build parameters
|
||||
ifeq ($(OFFICIAL),yes)
|
||||
SDIST_TAR_NAME=$(NAME)-$(RELEASE_VERSION)
|
||||
WHEEL_NAME=$(NAME)-$(RELEASE_VERSION)
|
||||
else
|
||||
SDIST_TAR_NAME=$(NAME)-$(VERSION3DOT)
|
||||
WHEEL_NAME=$(NAME)-$(VERSION3DOT)
|
||||
endif
|
||||
SDIST_TAR_NAME=$(NAME)-$(VERSION)
|
||||
WHEEL_NAME=$(NAME)-$(VERSION)
|
||||
|
||||
SDIST_COMMAND ?= sdist
|
||||
WHEEL_COMMAND ?= bdist_wheel
|
||||
@@ -75,7 +58,7 @@ UI_RELEASE_FLAG_FILE = awx/ui/.release_built
|
||||
I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange dbshell runserver celeryd \
|
||||
develop refresh adduser migrate dbchange dbshell runserver \
|
||||
receiver test test_unit test_ansible test_coverage coverage_html \
|
||||
dev_build release_build release_clean sdist \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
@@ -101,6 +84,11 @@ clean-venv:
|
||||
clean-dist:
|
||||
rm -rf dist
|
||||
|
||||
clean-schema:
|
||||
rm -rf swagger.json
|
||||
rm -rf schema.json
|
||||
rm -rf reference-schema.json
|
||||
|
||||
# Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-dist
|
||||
rm -rf awx/public
|
||||
@@ -112,7 +100,6 @@ clean: clean-ui clean-dist
|
||||
rm -rf requirements/vendor
|
||||
rm -rf tmp
|
||||
rm -rf $(I18N_FLAG_FILE)
|
||||
rm -f VERSION
|
||||
mkdir tmp
|
||||
rm -rf build $(NAME)-$(VERSION) *.egg-info
|
||||
find . -type f -regex ".*\.py[co]$$" -delete
|
||||
@@ -182,7 +169,7 @@ requirements_awx: virtualenv_awx
|
||||
else \
|
||||
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
#$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
|
||||
|
||||
requirements_awx_dev:
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
|
||||
@@ -221,7 +208,7 @@ init:
|
||||
if [ "$(AWX_GROUP_QUEUES)" == "tower,thepentagon" ]; then \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=isolated; \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename='thepentagon' --hostnames=isolated --controller=tower; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key | ssh -o "StrictHostKeyChecking no" root@isolated 'cat >> /root/.ssh/authorized_keys'; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key > /awx_devel/awx/main/expect/authorized_keys; \
|
||||
fi;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
@@ -250,7 +237,7 @@ server_noattach:
|
||||
tmux new-session -d -s awx 'exec make uwsgi'
|
||||
tmux rename-window 'AWX'
|
||||
tmux select-window -t awx:0
|
||||
tmux split-window -v 'exec make celeryd'
|
||||
tmux split-window -v 'exec make dispatcher'
|
||||
tmux new-window 'exec make daphne'
|
||||
tmux select-window -t awx:1
|
||||
tmux rename-window 'WebSockets'
|
||||
@@ -282,12 +269,6 @@ honcho:
|
||||
fi; \
|
||||
honcho start -f tools/docker-compose/Procfile
|
||||
|
||||
flower:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
|
||||
|
||||
collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -298,7 +279,7 @@ uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:/bin/sh -c '[ -f /tmp/celery_pid ] && kill -1 `cat /tmp/celery_pid` || true'"
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:awx-manage run_dispatcher --reload"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -319,13 +300,13 @@ runserver:
|
||||
fi; \
|
||||
$(PYTHON) manage.py runserver
|
||||
|
||||
# Run to start the background celery worker for development.
|
||||
celeryd:
|
||||
rm -f /tmp/celery_pid
|
||||
# Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
celery worker -A awx -l DEBUG -B -Ofair --autoscale=100,4 --schedule=$(CELERY_SCHEDULE_FILE) -n celery@$(COMPOSE_HOST) --pidfile /tmp/celery_pid
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
# Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@@ -361,18 +342,21 @@ pyflakes: reports
|
||||
pylint: reports
|
||||
@(set -o pipefail && $@ | reports/$@.report)
|
||||
|
||||
genschema: reports
|
||||
$(MAKE) swagger PYTEST_ARGS="--genschema"
|
||||
|
||||
swagger: reports
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
(set -o pipefail && py.test awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
|
||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
|
||||
|
||||
check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
awx-link:
|
||||
cp -R /tmp/awx.egg-info /awx_devel/ || true
|
||||
sed -i "s/placeholder/$(shell git describe --long | sed 's/\./\\./g')/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
cp /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
|
||||
cp -f /tmp/awx.egg-link /venv/awx/lib/python2.7/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
|
||||
@@ -381,7 +365,8 @@ test:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test -n auto $(TEST_DIRS)
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
test_combined: test_ansible test
|
||||
|
||||
@@ -482,7 +467,7 @@ $(I18N_FLAG_FILE): $(UI_DEPS_FLAG_FILE)
|
||||
ui-deps: $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
$(UI_DEPS_FLAG_FILE):
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui
|
||||
$(NPM_BIN) --unsafe-perm --prefix awx/ui install --no-save awx/ui
|
||||
touch $(UI_DEPS_FLAG_FILE)
|
||||
|
||||
ui-docker-machine: $(UI_DEPS_FLAG_FILE)
|
||||
@@ -563,22 +548,37 @@ docker-isolated:
|
||||
docker start tools_awx_1
|
||||
docker start tools_isolated_1
|
||||
echo "__version__ = '`git describe --long | cut -d - -f 1-1`'" | docker exec -i tools_isolated_1 /bin/bash -c "cat > /venv/awx/lib/python2.7/site-packages/awx.py"
|
||||
if [ "`docker exec -i -t tools_isolated_1 cat /root/.ssh/authorized_keys`" == "`docker exec -t tools_awx_1 cat /root/.ssh/id_rsa.pub`" ]; then \
|
||||
echo "SSH keys already copied to isolated instance"; \
|
||||
else \
|
||||
docker exec "tools_isolated_1" bash -c "mkdir -p /root/.ssh && rm -f /root/.ssh/authorized_keys && echo $$(docker exec -t tools_awx_1 cat /root/.ssh/id_rsa.pub) >> /root/.ssh/authorized_keys"; \
|
||||
fi
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
docker-compose-test: docker-auth
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
|
||||
docker-compose-runtest:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
|
||||
docker-compose-genschema:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh genschema
|
||||
mv swagger.json schema.json
|
||||
|
||||
docker-compose-detect-schema-change:
|
||||
$(MAKE) docker-compose-genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
# Ignore differences in whitespace with -b
|
||||
diff -u -b schema.json reference-schema.json
|
||||
|
||||
docker-compose-clean:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm -w /awx_devel --service-ports awx make clean
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
docker-compose-build: awx-devel-build
|
||||
|
||||
@@ -604,14 +604,13 @@ docker-refresh: docker-clean docker-compose
|
||||
|
||||
# Docker Development Environment with Elastic Stack Connected
|
||||
docker-compose-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURWD) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -625,5 +624,4 @@ psql-container:
|
||||
docker run -it --net tools_default --rm postgres:9.6 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
@echo $(VERSION_TARGET) > $@
|
||||
@echo "awx: $(VERSION_TARGET)"
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[](https://app.shippable.com/github/ansible/awx)
|
||||
[](https://ansible.softwarefactory-project.io/zuul/status)
|
||||
|
||||
AWX
|
||||
===
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
|
||||
|
||||
@@ -11,6 +10,8 @@ To learn more about using AWX, and Tower, view the [Tower docs site](http://docs
|
||||
|
||||
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
||||
|
||||
The AWX logos and branding assets are covered by [our trademark guidelines](https://github.com/ansible/awx-logos/blob/master/TRADEMARKS.md).
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
|
||||
@@ -12,14 +12,6 @@ __version__ = get_distribution('awx').version
|
||||
__all__ = ['__version__']
|
||||
|
||||
|
||||
# Isolated nodes do not have celery installed
|
||||
try:
|
||||
from .celery import app as celery_app # noqa
|
||||
__all__.append('celery_app')
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# Check for the presence/absence of "devonly" module to determine if running
|
||||
# from a source code checkout or release packaage.
|
||||
try:
|
||||
@@ -29,6 +21,48 @@ except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django
|
||||
from django.utils.encoding import force_bytes
|
||||
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
|
||||
from django.db.backends.base import schema
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# This line exists to make sure we don't regress on FIPS support if we
|
||||
# upgrade Django; if you're upgrading Django and see this error,
|
||||
# update the version check below, and confirm that FIPS still works.
|
||||
if django.__version__ != '1.11.16':
|
||||
raise RuntimeError("Django version other than 1.11.16 detected {}. \
|
||||
Subclassing BaseDatabaseSchemaEditor is known to work for Django 1.11.16 \
|
||||
and may not work in newer Django versions.".format(django.__version__))
|
||||
|
||||
|
||||
class FipsBaseDatabaseSchemaEditor(BaseDatabaseSchemaEditor):
|
||||
|
||||
@classmethod
|
||||
def _digest(cls, *args):
|
||||
"""
|
||||
Generates a 32-bit digest of a set of arguments that can be used to
|
||||
shorten identifying names.
|
||||
"""
|
||||
try:
|
||||
h = hashlib.md5()
|
||||
except ValueError:
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(force_bytes(arg))
|
||||
return h.hexdigest()[:8]
|
||||
|
||||
|
||||
schema.BaseDatabaseSchemaEditor = FipsBaseDatabaseSchemaEditor
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
command_dir = os.path.join(management_dir, 'commands')
|
||||
|
||||
@@ -97,7 +97,7 @@ class DeprecatedCredentialField(serializers.IntegerField):
|
||||
kwargs['allow_null'] = True
|
||||
kwargs['default'] = None
|
||||
kwargs['min_value'] = 1
|
||||
kwargs['help_text'] = 'This resource has been deprecated and will be removed in a future release'
|
||||
kwargs.setdefault('help_text', 'This resource has been deprecated and will be removed in a future release')
|
||||
super(DeprecatedCredentialField, self).__init__(**kwargs)
|
||||
|
||||
def to_internal_value(self, pk):
|
||||
|
||||
@@ -25,7 +25,6 @@ from rest_framework.filters import BaseFilterBackend
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.models.credential import CredentialType
|
||||
from awx.main.models.rbac import RoleAncestorEntry
|
||||
|
||||
|
||||
class V1CredentialFilterBackend(BaseFilterBackend):
|
||||
@@ -347,12 +346,12 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
else:
|
||||
args.append(Q(**{k:v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_(
|
||||
'Cannot apply role_level filter to this list because its model '
|
||||
'does not use roles for access control.'))
|
||||
args.append(
|
||||
Q(pk__in=RoleAncestorEntry.objects.filter(
|
||||
ancestor__in=request.user.roles.all(),
|
||||
content_type_id=ContentType.objects.get_for_model(queryset.model).id,
|
||||
role_field=role_name
|
||||
).values_list('object_id').distinct())
|
||||
Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name))
|
||||
)
|
||||
if or_filters:
|
||||
q = Q()
|
||||
|
||||
@@ -6,7 +6,7 @@ import inspect
|
||||
import logging
|
||||
import time
|
||||
import six
|
||||
import urllib
|
||||
import urllib
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -56,7 +56,7 @@ __all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
|
||||
'ParentMixin',
|
||||
'DeleteLastUnattachLabelMixin',
|
||||
'SubListAttachDetachAPIView',
|
||||
'CopyAPIView']
|
||||
'CopyAPIView', 'BaseUsersList',]
|
||||
|
||||
logger = logging.getLogger('awx.api.generics')
|
||||
analytics_logger = logging.getLogger('awx.analytics.performance')
|
||||
@@ -92,8 +92,7 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
current_user = UserSerializer(self.request.user)
|
||||
current_user = JSONRenderer().render(current_user.data)
|
||||
current_user = urllib.quote('%s' % current_user, '')
|
||||
ret.set_cookie('current_user', current_user)
|
||||
|
||||
ret.set_cookie('current_user', current_user, secure=settings.SESSION_COOKIE_SECURE or None)
|
||||
return ret
|
||||
else:
|
||||
ret.status_code = 401
|
||||
@@ -390,7 +389,6 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
]:
|
||||
d[key] = self.metadata_class().get_serializer_info(serializer, method=method)
|
||||
d['settings'] = settings
|
||||
d['has_named_url'] = self.model in settings.NAMED_URL_GRAPH
|
||||
return d
|
||||
|
||||
|
||||
@@ -553,9 +551,8 @@ class SubListDestroyAPIView(DestroyAPIView, SubListAPIView):
|
||||
|
||||
def perform_list_destroy(self, instance_list):
|
||||
if self.check_sub_obj_permission:
|
||||
# Check permissions for all before deleting, avoiding half-deleted lists
|
||||
for instance in instance_list:
|
||||
if self.has_delete_permission(instance):
|
||||
if not self.has_delete_permission(instance):
|
||||
raise PermissionDenied()
|
||||
for instance in instance_list:
|
||||
self.perform_destroy(instance, check_permission=False)
|
||||
@@ -991,3 +988,22 @@ class CopyAPIView(GenericAPIView):
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
class BaseUsersList(SubListCreateAttachDetachAPIView):
|
||||
def post(self, request, *args, **kwargs):
|
||||
ret = super(BaseUsersList, self).post( request, *args, **kwargs)
|
||||
if ret.status_code != 201:
|
||||
return ret
|
||||
try:
|
||||
if ret.data is not None and request.data.get('is_system_auditor', False):
|
||||
# This is a faux-field that just maps to checking the system
|
||||
# auditor role member list.. unfortunately this means we can't
|
||||
# set it on creation, and thus needs to be set here.
|
||||
user = User.objects.get(id=ret.data['id'])
|
||||
user.is_system_auditor = request.data['is_system_auditor']
|
||||
ret.data['is_system_auditor'] = request.data['is_system_auditor']
|
||||
except AttributeError as exc:
|
||||
print(exc)
|
||||
pass
|
||||
return ret
|
||||
|
||||
@@ -63,12 +63,15 @@ class Metadata(metadata.SimpleMetadata):
|
||||
verbose_name = smart_text(opts.verbose_name)
|
||||
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
|
||||
|
||||
for model_field in serializer.Meta.model._meta.fields:
|
||||
if field.field_name == model_field.name:
|
||||
field_info['filterable'] = True
|
||||
break
|
||||
if field.field_name == 'type':
|
||||
field_info['filterable'] = True
|
||||
else:
|
||||
field_info['filterable'] = False
|
||||
for model_field in serializer.Meta.model._meta.fields:
|
||||
if field.field_name == model_field.name:
|
||||
field_info['filterable'] = True
|
||||
break
|
||||
else:
|
||||
field_info['filterable'] = False
|
||||
|
||||
# Indicate if a field has a default value.
|
||||
# FIXME: Still isn't showing all default values?
|
||||
|
||||
@@ -61,7 +61,7 @@ from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
|
||||
from awx.conf.license import feature_enabled
|
||||
from awx.conf.license import feature_enabled, LicenseForbids
|
||||
from awx.api.versioning import reverse, get_request_version
|
||||
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
|
||||
VerbatimField, DeprecatedCredentialField)
|
||||
@@ -104,7 +104,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -1311,16 +1311,12 @@ class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
|
||||
'admin', 'update',
|
||||
{'copy': 'organization.project_admin'}
|
||||
]
|
||||
scm_delete_on_next_update = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch',
|
||||
fields = ('*', 'organization', 'scm_update_on_launch',
|
||||
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
|
||||
('last_update_failed', 'last_updated') # Backwards compatibility
|
||||
read_only_fields = ('scm_delete_on_next_update',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(ProjectSerializer, self).get_related(obj)
|
||||
@@ -1503,6 +1499,12 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
'admin', 'adhoc',
|
||||
{'copy': 'organization.inventory_admin'}
|
||||
]
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
@@ -1724,6 +1726,11 @@ class AnsibleFactsSerializer(BaseSerializer):
|
||||
|
||||
class GroupSerializer(BaseSerializerWithVariables):
|
||||
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
|
||||
groups_with_active_failures = serializers.IntegerField(
|
||||
read_only=True,
|
||||
min_value=0,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Group
|
||||
@@ -1903,7 +1910,9 @@ class CustomInventoryScriptSerializer(BaseSerializer):
|
||||
|
||||
|
||||
class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
credential = DeprecatedCredentialField()
|
||||
credential = DeprecatedCredentialField(
|
||||
help_text=_('Cloud credential to use for inventory updates.')
|
||||
)
|
||||
|
||||
class Meta:
|
||||
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
|
||||
@@ -2974,12 +2983,16 @@ class JobTemplateMixin(object):
|
||||
'''
|
||||
|
||||
def _recent_jobs(self, obj):
|
||||
if hasattr(obj, 'workflow_jobs'):
|
||||
job_mgr = obj.workflow_jobs
|
||||
else:
|
||||
job_mgr = obj.jobs
|
||||
return [{'id': x.id, 'status': x.status, 'finished': x.finished}
|
||||
for x in job_mgr.all().order_by('-created')[:10]]
|
||||
# Exclude "joblets", jobs that ran as part of a sliced workflow job
|
||||
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
|
||||
# Would like to apply an .only, but does not play well with non_polymorphic
|
||||
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
|
||||
optimized_qs = uj_qs.non_polymorphic()
|
||||
return [{
|
||||
'id': x.id, 'status': x.status, 'finished': x.finished,
|
||||
# Make type consistent with API top-level key, for instance workflow_job
|
||||
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
|
||||
} for x in optimized_qs[:10]]
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
d = super(JobTemplateMixin, self).get_summary_fields(obj)
|
||||
@@ -3009,7 +3022,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
|
||||
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
|
||||
'allow_simultaneous', 'custom_virtualenv')
|
||||
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobTemplateSerializer, self).get_related(obj)
|
||||
@@ -3026,6 +3039,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
|
||||
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
|
||||
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
|
||||
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
|
||||
))
|
||||
if self.version > 1:
|
||||
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
|
||||
@@ -3047,7 +3061,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job types 'run' and 'check' must have assigned a project.")})
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
raise serializers.ValidationError({'inventory': prompting_error_message})
|
||||
|
||||
@@ -3056,6 +3070,13 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
def validate_extra_vars(self, value):
|
||||
return vars_validate_or_raise(value)
|
||||
|
||||
def validate_job_slice_count(self, value):
|
||||
if value > 1 and not feature_enabled('workflows'):
|
||||
raise LicenseForbids({'job_slice_count': [_(
|
||||
"Job slicing is a workflows-based feature and your license does not allow use of workflows."
|
||||
)]})
|
||||
return value
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
|
||||
all_creds = []
|
||||
@@ -3100,19 +3121,46 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
return summary_fields
|
||||
|
||||
|
||||
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
|
||||
'''
|
||||
Used for activity stream entries.
|
||||
'''
|
||||
|
||||
class Meta:
|
||||
model = JobTemplate
|
||||
fields = ('*', 'survey_spec')
|
||||
|
||||
|
||||
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
|
||||
passwords_needed_to_start = serializers.ReadOnlyField()
|
||||
ask_diff_mode_on_launch = serializers.ReadOnlyField()
|
||||
ask_variables_on_launch = serializers.ReadOnlyField()
|
||||
ask_limit_on_launch = serializers.ReadOnlyField()
|
||||
ask_skip_tags_on_launch = serializers.ReadOnlyField()
|
||||
ask_tags_on_launch = serializers.ReadOnlyField()
|
||||
ask_job_type_on_launch = serializers.ReadOnlyField()
|
||||
ask_verbosity_on_launch = serializers.ReadOnlyField()
|
||||
ask_inventory_on_launch = serializers.ReadOnlyField()
|
||||
ask_credential_on_launch = serializers.ReadOnlyField()
|
||||
ask_diff_mode_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_variables_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_limit_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_skip_tags_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_tags_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_job_type_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_verbosity_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_inventory_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
ask_credential_on_launch = serializers.BooleanField(
|
||||
read_only=True,
|
||||
help_text=_('This field has been deprecated and will be removed in a future release'))
|
||||
artifacts = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
@@ -3121,7 +3169,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch',
|
||||
'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch', 'allow_simultaneous', 'artifacts', 'scm_revision',
|
||||
'instance_group', 'diff_mode')
|
||||
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(JobSerializer, self).get_related(obj)
|
||||
@@ -3555,7 +3603,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
class Meta:
|
||||
model = WorkflowJobTemplate
|
||||
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
|
||||
'ask_variables_on_launch',)
|
||||
'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
|
||||
@@ -3583,12 +3631,24 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
|
||||
return vars_validate_or_raise(value)
|
||||
|
||||
|
||||
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
|
||||
'''
|
||||
Used for activity stream entries.
|
||||
'''
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobTemplate
|
||||
fields = ('*', 'survey_spec')
|
||||
|
||||
|
||||
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
|
||||
'-execution_node', '-event_processing_finished', '-controller_node',)
|
||||
'job_template', 'is_sliced_job',
|
||||
'-execution_node', '-event_processing_finished', '-controller_node',
|
||||
'inventory',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobSerializer, self).get_related(obj)
|
||||
@@ -3596,6 +3656,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
|
||||
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
|
||||
kwargs={'pk': obj.workflow_job_template.pk})
|
||||
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
|
||||
if obj.job_template_id:
|
||||
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
|
||||
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
|
||||
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
|
||||
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
|
||||
@@ -3669,7 +3731,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
if obj is None:
|
||||
return ret
|
||||
if 'extra_data' in ret and obj.survey_passwords:
|
||||
ret['extra_data'] = obj.display_extra_data()
|
||||
ret['extra_data'] = obj.display_extra_vars()
|
||||
return ret
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
@@ -3810,9 +3872,6 @@ class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
ujt_obj = attrs['unified_job_template']
|
||||
elif self.instance:
|
||||
ujt_obj = self.instance.unified_job_template
|
||||
if isinstance(ujt_obj, (WorkflowJobTemplate)):
|
||||
raise serializers.ValidationError({
|
||||
"unified_job_template": _("Cannot nest a %s inside a WorkflowJobTemplate") % ujt_obj.__class__.__name__})
|
||||
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
|
||||
cred = deprecated_fields['credential']
|
||||
attrs['credential'] = cred
|
||||
@@ -3861,7 +3920,8 @@ class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'do_not_run',)
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
|
||||
@@ -4363,37 +4423,63 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
|
||||
can_start_without_user_input = serializers.BooleanField(read_only=True)
|
||||
defaults = serializers.SerializerMethodField()
|
||||
variables_needed_to_start = serializers.ReadOnlyField()
|
||||
survey_enabled = serializers.SerializerMethodField()
|
||||
extra_vars = VerbatimField(required=False, write_only=True)
|
||||
inventory = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Inventory.objects.all(),
|
||||
required=False, write_only=True
|
||||
)
|
||||
workflow_job_template_data = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobTemplate
|
||||
fields = ('can_start_without_user_input', 'extra_vars',
|
||||
'survey_enabled', 'variables_needed_to_start',
|
||||
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
|
||||
'inventory', 'survey_enabled', 'variables_needed_to_start',
|
||||
'node_templates_missing', 'node_prompts_rejected',
|
||||
'workflow_job_template_data')
|
||||
'workflow_job_template_data', 'survey_enabled')
|
||||
read_only_fields = ('ask_inventory_on_launch',)
|
||||
|
||||
def get_survey_enabled(self, obj):
|
||||
if obj:
|
||||
return obj.survey_enabled and 'spec' in obj.survey_spec
|
||||
return False
|
||||
|
||||
def get_defaults(self, obj):
|
||||
defaults_dict = {}
|
||||
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
|
||||
if field_name == 'inventory':
|
||||
defaults_dict[field_name] = dict(
|
||||
name=getattrd(obj, '%s.name' % field_name, None),
|
||||
id=getattrd(obj, '%s.pk' % field_name, None))
|
||||
else:
|
||||
defaults_dict[field_name] = getattr(obj, field_name)
|
||||
return defaults_dict
|
||||
|
||||
def get_workflow_job_template_data(self, obj):
|
||||
return dict(name=obj.name, id=obj.id, description=obj.description)
|
||||
|
||||
def validate(self, attrs):
|
||||
obj = self.instance
|
||||
template = self.instance
|
||||
|
||||
accepted, rejected, errors = obj._accept_or_ignore_job_kwargs(
|
||||
_exclude_errors=['required'],
|
||||
**attrs)
|
||||
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
|
||||
self._ignored_fields = rejected
|
||||
|
||||
WFJT_extra_vars = obj.extra_vars
|
||||
attrs = super(WorkflowJobLaunchSerializer, self).validate(attrs)
|
||||
obj.extra_vars = WFJT_extra_vars
|
||||
return attrs
|
||||
if template.inventory and template.inventory.pending_deletion is True:
|
||||
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
|
||||
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
|
||||
errors['inventory'] = _("The provided inventory is being deleted.")
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
|
||||
WFJT_extra_vars = template.extra_vars
|
||||
WFJT_inventory = template.inventory
|
||||
super(WorkflowJobLaunchSerializer, self).validate(attrs)
|
||||
template.extra_vars = WFJT_extra_vars
|
||||
template.inventory = WFJT_inventory
|
||||
return accepted
|
||||
|
||||
|
||||
class NotificationTemplateSerializer(BaseSerializer):
|
||||
@@ -4533,13 +4619,13 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
# - COUNT > 999
|
||||
def validate_rrule(self, value):
|
||||
rrule_value = value
|
||||
multi_by_month_day = ".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
|
||||
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
|
||||
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
|
||||
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
|
||||
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
|
||||
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
|
||||
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
|
||||
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
|
||||
if not len(match_multiple_dtstart):
|
||||
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
|
||||
if len(match_native_dtstart):
|
||||
@@ -4612,6 +4698,23 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
|
||||
return res
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
|
||||
if 'inventory' in summary_fields:
|
||||
return summary_fields
|
||||
|
||||
inventory = None
|
||||
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
|
||||
inventory = obj.unified_job_template.inventory
|
||||
else:
|
||||
return summary_fields
|
||||
|
||||
summary_fields['inventory'] = dict()
|
||||
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
|
||||
summary_fields['inventory'][field] = getattr(inventory, field, None)
|
||||
|
||||
return summary_fields
|
||||
|
||||
def validate_unified_job_template(self, value):
|
||||
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
|
||||
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
|
||||
@@ -4835,10 +4938,6 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
rel = {}
|
||||
VIEW_NAME_EXCEPTIONS = {
|
||||
'custom_inventory_script': 'inventory_script_detail',
|
||||
'o_auth2_access_token': 'o_auth2_token_detail'
|
||||
}
|
||||
if obj.actor is not None:
|
||||
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
|
||||
for fk, __ in self._local_summarizable_fk_fields:
|
||||
@@ -4852,11 +4951,12 @@ class ActivityStreamSerializer(BaseSerializer):
|
||||
if getattr(thisItem, 'id', None) in id_list:
|
||||
continue
|
||||
id_list.append(getattr(thisItem, 'id', None))
|
||||
if fk in VIEW_NAME_EXCEPTIONS:
|
||||
view_name = VIEW_NAME_EXCEPTIONS[fk]
|
||||
if hasattr(thisItem, 'get_absolute_url'):
|
||||
rel_url = thisItem.get_absolute_url(self.context.get('request'))
|
||||
else:
|
||||
view_name = fk + '_detail'
|
||||
rel[fk].append(self.reverse('api:' + view_name, kwargs={'pk': thisItem.id}))
|
||||
rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})
|
||||
rel[fk].append(rel_url)
|
||||
|
||||
if fk == 'schedule':
|
||||
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
|
||||
|
||||
@@ -54,8 +54,6 @@ within all designated text fields of a model.
|
||||
|
||||
?search=findme
|
||||
|
||||
_Added in AWX 1.4_
|
||||
|
||||
(_Added in Ansible Tower 3.1.0_) Search across related fields:
|
||||
|
||||
?related__search=findme
|
||||
@@ -84,7 +82,7 @@ To exclude results matching certain criteria, prefix the field parameter with
|
||||
|
||||
?not__field=value
|
||||
|
||||
(_Added in AWX 1.4_) By default, all query string filters are AND'ed together, so
|
||||
By default, all query string filters are AND'ed together, so
|
||||
only the results matching *all* filters will be returned. To combine results
|
||||
matching *any* one of multiple criteria, prefix each query string parameter
|
||||
with `or__`:
|
||||
|
||||
@@ -10,7 +10,7 @@ object containing groups, including the hosts, children and variables for each
|
||||
group. The response data is equivalent to that returned by passing the
|
||||
`--list` argument to an inventory script.
|
||||
|
||||
_(Added in AWX 1.3)_ Specify a query string of `?hostvars=1` to retrieve the JSON
|
||||
Specify a query string of `?hostvars=1` to retrieve the JSON
|
||||
object above including all host variables. The `['_meta']['hostvars']` object
|
||||
in the response contains an entry for each host with its variables. This
|
||||
response format can be used with Ansible 1.3 and later to avoid making a
|
||||
@@ -18,11 +18,19 @@ separate API request for each host. Refer to
|
||||
[Tuning the External Inventory Script](http://docs.ansible.com/developing_inventory.html#tuning-the-external-inventory-script)
|
||||
for more information on this feature.
|
||||
|
||||
_(Added in AWX 1.4)_ By default, the inventory script will only return hosts that
|
||||
By default, the inventory script will only return hosts that
|
||||
are enabled in the inventory. This feature allows disabled hosts to be skipped
|
||||
when running jobs without removing them from the inventory. Specify a query
|
||||
string of `?all=1` to return all hosts, including disabled ones.
|
||||
|
||||
Specify a query string of `?towervars=1` to add variables
|
||||
to the hostvars of each host that specifies its enabled state and database ID.
|
||||
|
||||
Specify a query string of `?subset=slice2of5` to produce an inventory that
|
||||
has a restricted number of hosts according to the rules of job slicing.
|
||||
|
||||
To apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`.
|
||||
|
||||
## Host Response
|
||||
|
||||
Make a GET request to this resource with a query string similar to
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
{% if has_named_url %}
|
||||
### Note: starting from api v2, this resource object can be accessed via its named URL.
|
||||
{% endif %}
|
||||
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from awx.api.views import (
|
||||
JobTemplateDetail,
|
||||
JobTemplateLaunch,
|
||||
JobTemplateJobsList,
|
||||
JobTemplateSliceWorkflowJobsList,
|
||||
JobTemplateCallback,
|
||||
JobTemplateSchedulesList,
|
||||
JobTemplateSurveySpec,
|
||||
@@ -28,6 +29,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobTemplateDetail.as_view(), name='job_template_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/launch/$', JobTemplateLaunch.as_view(), name='job_template_launch'),
|
||||
url(r'^(?P<pk>[0-9]+)/jobs/$', JobTemplateJobsList.as_view(), name='job_template_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/slice_workflow_jobs/$', JobTemplateSliceWorkflowJobsList.as_view(), name='job_template_slice_workflow_jobs_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/callback/$', JobTemplateCallback.as_view(), name='job_template_callback'),
|
||||
url(r'^(?P<pk>[0-9]+)/schedules/$', JobTemplateSchedulesList.as_view(), name='job_template_schedules_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/survey_spec/$', JobTemplateSurveySpec.as_view(), name='job_template_survey_spec'),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
211
awx/api/views/inventory.py
Normal file
211
awx/api/views/inventory.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models import Q
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Inventory,
|
||||
JobTemplate,
|
||||
Role,
|
||||
User,
|
||||
InstanceGroup,
|
||||
InventoryUpdateEvent,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
CustomInventoryScript,
|
||||
)
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
SubListAPIView,
|
||||
SubListAttachDetachAPIView,
|
||||
ResourceAccessList,
|
||||
CopyAPIView,
|
||||
)
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
InventoryUpdateEventSerializer,
|
||||
CustomInventoryScriptSerializer,
|
||||
InventoryDetailSerializer,
|
||||
JobTemplateSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
ActivityStreamEnforcementMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
ControlledByScmMixin,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
class InventoryUpdateEventsList(SubListAPIView):
|
||||
|
||||
model = InventoryUpdateEvent
|
||||
serializer_class = InventoryUpdateEventSerializer
|
||||
parent_model = InventoryUpdate
|
||||
relationship = 'inventory_update_events'
|
||||
view_name = _('Inventory Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS
|
||||
return super(InventoryUpdateEventsList, self).finalize_response(request, response, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryScriptList(ListCreateAPIView):
|
||||
|
||||
model = CustomInventoryScript
|
||||
serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
|
||||
class InventoryScriptDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = CustomInventoryScript
|
||||
serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
can_delete = request.user.can_access(self.model, 'delete', instance)
|
||||
if not can_delete:
|
||||
raise PermissionDenied(_("Cannot delete inventory script."))
|
||||
for inv_src in InventorySource.objects.filter(source_script=instance):
|
||||
inv_src.source_script = None
|
||||
inv_src.save()
|
||||
return super(InventoryScriptDetail, self).destroy(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryScriptObjectRolesList(SubListAPIView):
|
||||
|
||||
model = Role
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = CustomInventoryScript
|
||||
search_fields = ('role_field', 'content_type__model',)
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
class InventoryScriptCopy(CopyAPIView):
|
||||
|
||||
model = CustomInventoryScript
|
||||
copy_return_serializer_class = CustomInventoryScriptSerializer
|
||||
|
||||
|
||||
class InventoryList(ListCreateAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'read_role', 'update_role', 'use_role', 'adhoc_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by', 'organization')
|
||||
return qs
|
||||
|
||||
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventoryDetailSerializer
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
kind = self.request.data.get('kind') or kwargs.get('kind')
|
||||
|
||||
# Do not allow changes to an Inventory kind.
|
||||
if kind is not None and obj.kind != kind:
|
||||
return self.http_method_not_allowed(request, *args, **kwargs)
|
||||
return super(InventoryDetail, self).update(request, *args, **kwargs)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(self.model, 'delete', obj):
|
||||
raise PermissionDenied()
|
||||
self.check_related_active_jobs(obj) # related jobs mixin
|
||||
try:
|
||||
obj.schedule_deletion(getattr(request.user, 'id', None))
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
except RuntimeError as e:
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class InventoryActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
|
||||
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(Q(inventory=parent) | Q(host__in=parent.hosts.all()) | Q(group__in=parent.groups.all()))
|
||||
|
||||
|
||||
class InventoryInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = InstanceGroup
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class InventoryAccessList(ResourceAccessList):
|
||||
|
||||
model = User # needs to be User for AccessLists's
|
||||
parent_model = Inventory
|
||||
|
||||
|
||||
class InventoryObjectRolesList(SubListAPIView):
|
||||
|
||||
model = Role
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = Inventory
|
||||
search_fields = ('role_field', 'content_type__model',)
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
class InventoryJobTemplateList(SubListAPIView):
|
||||
|
||||
model = JobTemplate
|
||||
serializer_class = JobTemplateSerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'jobtemplates'
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(inventory=parent)
|
||||
|
||||
|
||||
class InventoryCopy(CopyAPIView):
|
||||
|
||||
model = Inventory
|
||||
copy_return_serializer_class = InventorySerializer
|
||||
309
awx/api/views/mixin.py
Normal file
309
awx/api/views/mixin.py
Normal file
@@ -0,0 +1,309 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import dateutil
|
||||
import logging
|
||||
|
||||
from django.db.models import (
|
||||
Count,
|
||||
F,
|
||||
)
|
||||
from django.db import transaction
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
parse_yaml_or_json,
|
||||
)
|
||||
from awx.main.models.ha import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.inventory import Inventory
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
from awx.conf.license import (
|
||||
feature_enabled,
|
||||
LicenseForbids,
|
||||
)
|
||||
from awx.api.exceptions import ActiveJobConflict
|
||||
|
||||
logger = logging.getLogger('awx.api.views.mixin')
|
||||
|
||||
|
||||
class ActivityStreamEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports activity streams.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('activity_streams'):
|
||||
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
|
||||
return ret
|
||||
|
||||
|
||||
class SystemTrackingEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports system tracking.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
|
||||
return ret
|
||||
|
||||
|
||||
class WorkflowsEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports workflows.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
|
||||
raise LicenseForbids(_('Your license does not allow use of workflows.'))
|
||||
return ret
|
||||
|
||||
|
||||
class UnifiedJobDeletionMixin(object):
|
||||
'''
|
||||
Special handling when deleting a running unified job object.
|
||||
'''
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(self.model, 'delete', obj):
|
||||
raise PermissionDenied()
|
||||
try:
|
||||
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
|
||||
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
|
||||
except self.model.unified_job_node.RelatedObjectDoesNotExist:
|
||||
pass
|
||||
# Still allow deletion of new status, because these can be manually created
|
||||
if obj.status in ACTIVE_STATES and obj.status != 'new':
|
||||
raise PermissionDenied(detail=_("Cannot delete running job resource."))
|
||||
elif not obj.event_processing_finished:
|
||||
# Prohibit deletion if job events are still coming in
|
||||
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
|
||||
# less than 1 minute has passed since job finished and events are not in
|
||||
return Response({"error": _("Job has not finished processing events.")},
|
||||
status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events '
|
||||
'processed.'.format(obj.log_format))
|
||||
obj.delete()
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
class InstanceGroupMembershipMixin(object):
|
||||
'''
|
||||
This mixin overloads attach/detach so that it calls InstanceGroup.save(),
|
||||
triggering a background recalculation of policy-based instance group
|
||||
membership.
|
||||
'''
|
||||
def attach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
|
||||
sub_id, res = self.attach_validate(request)
|
||||
if status.is_success(response.status_code):
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(self.model, pk=sub_id)
|
||||
inst_name = ig_obj.hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name not in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.append(inst_name)
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.is_isolated():
|
||||
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
|
||||
if self.parent_model is InstanceGroup:
|
||||
ig_obj = self.get_parent_object()
|
||||
if ig_obj.controller_id is not None:
|
||||
return {'error': _('Isolated instance group membership may not be managed via the API.')}
|
||||
return None
|
||||
|
||||
def unattach_validate(self, request):
|
||||
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
|
||||
if res:
|
||||
return (sub_id, res)
|
||||
sub = get_object_or_400(self.model, pk=sub_id)
|
||||
attach_errors = self.is_valid_relation(None, sub)
|
||||
if attach_errors:
|
||||
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
|
||||
return (sub_id, res)
|
||||
|
||||
def unattach(self, request, *args, **kwargs):
|
||||
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
|
||||
if status.is_success(response.status_code):
|
||||
sub_id = request.data.get('id', None)
|
||||
if self.parent_model is Instance:
|
||||
inst_name = self.get_parent_object().hostname
|
||||
else:
|
||||
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
|
||||
with transaction.atomic():
|
||||
ig_qs = InstanceGroup.objects.select_for_update()
|
||||
if self.parent_model is Instance:
|
||||
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
|
||||
else:
|
||||
# similar to get_parent_object, but selected for update
|
||||
parent_filter = {
|
||||
self.lookup_field: self.kwargs.get(self.lookup_field, None),
|
||||
}
|
||||
ig_obj = get_object_or_404(ig_qs, **parent_filter)
|
||||
if inst_name in ig_obj.policy_instance_list:
|
||||
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
|
||||
ig_obj.save(update_fields=['policy_instance_list'])
|
||||
return response
|
||||
|
||||
|
||||
class RelatedJobsPreventDeleteMixin(object):
|
||||
def perform_destroy(self, obj):
|
||||
self.check_related_active_jobs(obj)
|
||||
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
|
||||
|
||||
def check_related_active_jobs(self, obj):
|
||||
active_jobs = obj.get_active_jobs()
|
||||
if len(active_jobs) > 0:
|
||||
raise ActiveJobConflict(active_jobs)
|
||||
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
|
||||
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
|
||||
for unified_job in recent_jobs.get_real_instances():
|
||||
if not unified_job.event_processing_finished:
|
||||
raise PermissionDenied(_(
|
||||
'Related job {} is still processing events.'
|
||||
).format(unified_job.log_format))
|
||||
|
||||
|
||||
class OrganizationCountsMixin(object):
|
||||
|
||||
def get_serializer_context(self, *args, **kwargs):
|
||||
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
|
||||
|
||||
if self.request is None:
|
||||
return full_context
|
||||
|
||||
db_results = {}
|
||||
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
||||
org_id_list = org_qs.values('id')
|
||||
if len(org_id_list) == 0:
|
||||
if self.request.method == 'POST':
|
||||
full_context['related_field_counts'] = {}
|
||||
return full_context
|
||||
|
||||
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
project_qs = Project.accessible_objects(self.request.user, 'read_role')
|
||||
|
||||
# Produce counts of Foreign Key relationships
|
||||
db_results['inventories'] = inv_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
db_results['teams'] = Team.accessible_objects(
|
||||
self.request.user, 'read_role').values('organization').annotate(
|
||||
Count('organization')).order_by('organization')
|
||||
|
||||
JT_project_reference = 'project__organization'
|
||||
JT_inventory_reference = 'inventory__organization'
|
||||
db_results['job_templates_project'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').exclude(
|
||||
project__organization=F(JT_inventory_reference)).values(JT_project_reference).annotate(
|
||||
Count(JT_project_reference)).order_by(JT_project_reference)
|
||||
|
||||
db_results['job_templates_inventory'] = JobTemplate.accessible_objects(
|
||||
self.request.user, 'read_role').values(JT_inventory_reference).annotate(
|
||||
Count(JT_inventory_reference)).order_by(JT_inventory_reference)
|
||||
|
||||
db_results['projects'] = project_qs\
|
||||
.values('organization').annotate(Count('organization')).order_by('organization')
|
||||
|
||||
# Other members and admins of organization are always viewable
|
||||
db_results['users'] = org_qs.annotate(
|
||||
users=Count('member_role__members', distinct=True),
|
||||
admins=Count('admin_role__members', distinct=True)
|
||||
).values('id', 'users', 'admins')
|
||||
|
||||
count_context = {}
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id] = {
|
||||
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
|
||||
'admins': 0, 'projects': 0}
|
||||
|
||||
for res, count_qs in db_results.items():
|
||||
if res == 'job_templates_project':
|
||||
org_reference = JT_project_reference
|
||||
elif res == 'job_templates_inventory':
|
||||
org_reference = JT_inventory_reference
|
||||
elif res == 'users':
|
||||
org_reference = 'id'
|
||||
else:
|
||||
org_reference = 'organization'
|
||||
for entry in count_qs:
|
||||
org_id = entry[org_reference]
|
||||
if org_id in count_context:
|
||||
if res == 'users':
|
||||
count_context[org_id]['admins'] = entry['admins']
|
||||
count_context[org_id]['users'] = entry['users']
|
||||
continue
|
||||
count_context[org_id][res] = entry['%s__count' % org_reference]
|
||||
|
||||
# Combine the counts for job templates by project and inventory
|
||||
for org in org_id_list:
|
||||
org_id = org['id']
|
||||
count_context[org_id]['job_templates'] = 0
|
||||
for related_path in ['job_templates_project', 'job_templates_inventory']:
|
||||
if related_path in count_context[org_id]:
|
||||
count_context[org_id]['job_templates'] += count_context[org_id].pop(related_path)
|
||||
|
||||
full_context['related_field_counts'] = count_context
|
||||
|
||||
return full_context
|
||||
|
||||
|
||||
class ControlledByScmMixin(object):
|
||||
'''
|
||||
Special method to reset SCM inventory commit hash
|
||||
if anything that it manages changes.
|
||||
'''
|
||||
|
||||
def _reset_inv_src_rev(self, obj):
|
||||
if self.request.method in SAFE_METHODS or not obj:
|
||||
return
|
||||
project_following_sources = obj.inventory_sources.filter(
|
||||
update_on_project_update=True, source='scm')
|
||||
if project_following_sources:
|
||||
# Allow inventory changes unrelated to variables
|
||||
if self.model == Inventory and (
|
||||
not self.request or not self.request.data or
|
||||
parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)):
|
||||
return
|
||||
project_following_sources.update(scm_last_revision='')
|
||||
|
||||
def get_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
|
||||
def get_parent_object(self):
|
||||
obj = super(ControlledByScmMixin, self).get_parent_object()
|
||||
self._reset_inv_src_rev(obj)
|
||||
return obj
|
||||
247
awx/api/views/organization.py
Normal file
247
awx/api/views/organization.py
Normal file
@@ -0,0 +1,247 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.db.models import Count
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.conf.license import (
|
||||
feature_enabled,
|
||||
LicenseForbids,
|
||||
)
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Inventory,
|
||||
Project,
|
||||
JobTemplate,
|
||||
WorkflowJobTemplate,
|
||||
Organization,
|
||||
NotificationTemplate,
|
||||
Role,
|
||||
User,
|
||||
Team,
|
||||
InstanceGroup,
|
||||
)
|
||||
from awx.api.generics import (
|
||||
ListCreateAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
SubListAPIView,
|
||||
SubListCreateAttachDetachAPIView,
|
||||
SubListAttachDetachAPIView,
|
||||
ResourceAccessList,
|
||||
BaseUsersList,
|
||||
)
|
||||
|
||||
from awx.api.serializers import (
|
||||
OrganizationSerializer,
|
||||
InventorySerializer,
|
||||
ProjectSerializer,
|
||||
UserSerializer,
|
||||
TeamSerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
NotificationTemplateSerializer,
|
||||
WorkflowJobTemplateSerializer,
|
||||
InstanceGroupSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
ActivityStreamEnforcementMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
OrganizationCountsMixin,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
"""Create a new organzation.
|
||||
|
||||
If there is already an organization and the license of this
|
||||
instance does not permit multiple organizations, then raise
|
||||
LicenseForbids.
|
||||
"""
|
||||
# Sanity check: If the multiple organizations feature is disallowed
|
||||
# by the license, then we are only willing to create this organization
|
||||
# if no organizations exist in the system.
|
||||
if (not feature_enabled('multiple_organizations') and
|
||||
self.model.objects.exists()):
|
||||
raise LicenseForbids(_('Your license only permits a single '
|
||||
'organization to exist.'))
|
||||
|
||||
# Okay, create the organization as usual.
|
||||
return super(OrganizationList, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_serializer_context(self, *args, **kwargs):
|
||||
full_context = super(OrganizationDetail, self).get_serializer_context(*args, **kwargs)
|
||||
|
||||
if not hasattr(self, 'kwargs') or 'pk' not in self.kwargs:
|
||||
return full_context
|
||||
org_id = int(self.kwargs['pk'])
|
||||
|
||||
org_counts = {}
|
||||
access_kwargs = {'accessor': self.request.user, 'role_field': 'read_role'}
|
||||
direct_counts = Organization.objects.filter(id=org_id).annotate(
|
||||
users=Count('member_role__members', distinct=True),
|
||||
admins=Count('admin_role__members', distinct=True)
|
||||
).values('users', 'admins')
|
||||
|
||||
if not direct_counts:
|
||||
return full_context
|
||||
|
||||
org_counts = direct_counts[0]
|
||||
org_counts['inventories'] = Inventory.accessible_objects(**access_kwargs).filter(
|
||||
organization__id=org_id).count()
|
||||
org_counts['teams'] = Team.accessible_objects(**access_kwargs).filter(
|
||||
organization__id=org_id).count()
|
||||
org_counts['projects'] = Project.accessible_objects(**access_kwargs).filter(
|
||||
organization__id=org_id).count()
|
||||
org_counts['job_templates'] = JobTemplate.accessible_objects(**access_kwargs).filter(
|
||||
project__organization__id=org_id).count()
|
||||
|
||||
full_context['related_field_counts'] = {}
|
||||
full_context['related_field_counts'][org_id] = org_counts
|
||||
|
||||
return full_context
|
||||
|
||||
|
||||
class OrganizationInventoriesList(SubListAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
parent_model = Organization
|
||||
relationship = 'inventories'
|
||||
|
||||
|
||||
class OrganizationUsersList(BaseUsersList):
|
||||
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'member_role.members'
|
||||
|
||||
|
||||
class OrganizationAdminsList(BaseUsersList):
|
||||
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'admin_role.members'
|
||||
|
||||
|
||||
class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = Project
|
||||
serializer_class = ProjectSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'projects'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationWorkflowJobTemplatesList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
serializer_class = WorkflowJobTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'workflows'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'teams'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
|
||||
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
|
||||
class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = NotificationTemplate
|
||||
serializer_class = NotificationTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'notification_templates'
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = NotificationTemplate
|
||||
serializer_class = NotificationTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'notification_templates_any'
|
||||
|
||||
|
||||
class OrganizationNotificationTemplatesErrorList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = NotificationTemplate
|
||||
serializer_class = NotificationTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'notification_templates_error'
|
||||
|
||||
|
||||
class OrganizationNotificationTemplatesSuccessList(SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = NotificationTemplate
|
||||
serializer_class = NotificationTemplateSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'notification_templates_success'
|
||||
|
||||
|
||||
class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
|
||||
model = InstanceGroup
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
|
||||
|
||||
class OrganizationAccessList(ResourceAccessList):
|
||||
|
||||
model = User # needs to be User for AccessLists's
|
||||
parent_model = Organization
|
||||
|
||||
|
||||
class OrganizationObjectRolesList(SubListAPIView):
|
||||
|
||||
model = Role
|
||||
serializer_class = RoleSerializer
|
||||
parent_model = Organization
|
||||
search_fields = ('role_field', 'content_type__model',)
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
278
awx/api/views/root.py
Normal file
278
awx/api/views/root.py
Normal file
@@ -0,0 +1,278 @@
|
||||
# Copyright (c) 2018 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.csrf import ensure_csrf_cookie
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import AllowAny, IsAuthenticated
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
from awx.api.generics import APIView
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.utils import (
|
||||
get_awx_version,
|
||||
get_ansible_version,
|
||||
get_custom_venv_choices,
|
||||
to_python_boolean,
|
||||
)
|
||||
from awx.api.versioning import reverse, get_request_version, drf_reverse
|
||||
from awx.conf.license import get_license, feature_enabled
|
||||
from awx.main.models import (
|
||||
Project,
|
||||
Organization,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
JobTemplate,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
|
||||
|
||||
class ApiRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
view_name = _('REST API')
|
||||
versioning_class = None
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
@method_decorator(ensure_csrf_cookie)
|
||||
def get(self, request, format=None):
|
||||
''' List supported API versions '''
|
||||
|
||||
v1 = reverse('api:api_v1_root_view', kwargs={'version': 'v1'})
|
||||
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
|
||||
data = OrderedDict()
|
||||
data['description'] = _('AWX REST API')
|
||||
data['current_version'] = v2
|
||||
data['available_versions'] = dict(v1 = v1, v2 = v2)
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
if feature_enabled('rebranding'):
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
return Response(data)
|
||||
|
||||
|
||||
class ApiOAuthAuthorizationRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
view_name = _("API OAuth 2 Authorization Root")
|
||||
versioning_class = None
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorize'] = drf_reverse('api:authorize')
|
||||
data['token'] = drf_reverse('api:token')
|
||||
data['revoke_token'] = drf_reverse('api:revoke-token')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class ApiVersionRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
def get(self, request, format=None):
|
||||
''' List top level resources '''
|
||||
data = OrderedDict()
|
||||
data['ping'] = reverse('api:api_v1_ping_view', request=request)
|
||||
data['instances'] = reverse('api:instance_list', request=request)
|
||||
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
||||
data['config'] = reverse('api:api_v1_config_view', request=request)
|
||||
data['settings'] = reverse('api:setting_category_list', request=request)
|
||||
data['me'] = reverse('api:user_me_list', request=request)
|
||||
data['dashboard'] = reverse('api:dashboard_view', request=request)
|
||||
data['organizations'] = reverse('api:organization_list', request=request)
|
||||
data['users'] = reverse('api:user_list', request=request)
|
||||
data['projects'] = reverse('api:project_list', request=request)
|
||||
data['project_updates'] = reverse('api:project_update_list', request=request)
|
||||
data['teams'] = reverse('api:team_list', request=request)
|
||||
data['credentials'] = reverse('api:credential_list', request=request)
|
||||
if get_request_version(request) > 1:
|
||||
data['credential_types'] = reverse('api:credential_type_list', request=request)
|
||||
data['applications'] = reverse('api:o_auth2_application_list', request=request)
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['inventory_scripts'] = reverse('api:inventory_script_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['job_events'] = reverse('api:job_event_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
data['system_job_templates'] = reverse('api:system_job_template_list', request=request)
|
||||
data['system_jobs'] = reverse('api:system_job_list', request=request)
|
||||
data['schedules'] = reverse('api:schedule_list', request=request)
|
||||
data['roles'] = reverse('api:role_list', request=request)
|
||||
data['notification_templates'] = reverse('api:notification_template_list', request=request)
|
||||
data['notifications'] = reverse('api:notification_list', request=request)
|
||||
data['labels'] = reverse('api:label_list', request=request)
|
||||
data['unified_job_templates'] = reverse('api:unified_job_template_list', request=request)
|
||||
data['unified_jobs'] = reverse('api:unified_job_list', request=request)
|
||||
data['activity_stream'] = reverse('api:activity_stream_list', request=request)
|
||||
data['workflow_job_templates'] = reverse('api:workflow_job_template_list', request=request)
|
||||
data['workflow_jobs'] = reverse('api:workflow_job_list', request=request)
|
||||
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
class ApiV1RootView(ApiVersionRootView):
|
||||
view_name = _('Version 1')
|
||||
|
||||
|
||||
class ApiV2RootView(ApiVersionRootView):
|
||||
view_name = _('Version 2')
|
||||
|
||||
|
||||
class ApiV1PingView(APIView):
|
||||
"""A simple view that reports very basic information about this
|
||||
instance, which is acceptable to be public information.
|
||||
"""
|
||||
permission_classes = (AllowAny,)
|
||||
authentication_classes = ()
|
||||
view_name = _('Ping')
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def get(self, request, format=None):
|
||||
"""Return some basic information about this instance
|
||||
|
||||
Everything returned here should be considered public / insecure, as
|
||||
this requires no auth and is intended for use by the installer process.
|
||||
"""
|
||||
response = {
|
||||
'ha': is_ha_environment(),
|
||||
'version': get_awx_version(),
|
||||
'active_node': settings.CLUSTER_HOST_ID,
|
||||
}
|
||||
|
||||
response['instances'] = []
|
||||
for instance in Instance.objects.all():
|
||||
response['instances'].append(dict(node=instance.hostname, heartbeat=instance.modified,
|
||||
capacity=instance.capacity, version=instance.version))
|
||||
response['instances'].sort()
|
||||
response['instance_groups'] = []
|
||||
for instance_group in InstanceGroup.objects.all():
|
||||
response['instance_groups'].append(dict(name=instance_group.name,
|
||||
capacity=instance_group.capacity,
|
||||
instances=[x.hostname for x in instance_group.instances.all()]))
|
||||
return Response(response)
|
||||
|
||||
|
||||
class ApiV1ConfigView(APIView):
|
||||
|
||||
permission_classes = (IsAuthenticated,)
|
||||
view_name = _('Configuration')
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def check_permissions(self, request):
|
||||
super(ApiV1ConfigView, self).check_permissions(request)
|
||||
if not request.user.is_superuser and request.method.lower() not in {'options', 'head', 'get'}:
|
||||
self.permission_denied(request) # Raises PermissionDenied exception.
|
||||
|
||||
def get(self, request, format=None):
|
||||
'''Return various sitewide configuration settings'''
|
||||
|
||||
if request.user.is_superuser or request.user.is_system_auditor:
|
||||
license_data = get_license(show_key=True)
|
||||
else:
|
||||
license_data = get_license(show_key=False)
|
||||
if not license_data.get('valid_key', False):
|
||||
license_data = {}
|
||||
if license_data and 'features' in license_data and 'activity_streams' in license_data['features']:
|
||||
# FIXME: Make the final setting value dependent on the feature?
|
||||
license_data['features']['activity_streams'] &= settings.ACTIVITY_STREAM_ENABLED
|
||||
|
||||
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
|
||||
|
||||
data = dict(
|
||||
time_zone=settings.TIME_ZONE,
|
||||
license_info=license_data,
|
||||
version=get_awx_version(),
|
||||
ansible_version=get_ansible_version(),
|
||||
eula=render_to_string("eula.md") if license_data.get('license_type', 'UNLICENSED') != 'open' else '',
|
||||
analytics_status=pendo_state
|
||||
)
|
||||
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
# names that are managed by LDAP and should be read-only for users with
|
||||
# a non-empty ldap_dn attribute.
|
||||
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None) and feature_enabled('ldap'):
|
||||
user_ldap_fields = ['username', 'password']
|
||||
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
|
||||
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
|
||||
data['user_ldap_fields'] = user_ldap_fields
|
||||
|
||||
if request.user.is_superuser \
|
||||
or request.user.is_system_auditor \
|
||||
or Organization.accessible_objects(request.user, 'admin_role').exists() \
|
||||
or Organization.accessible_objects(request.user, 'auditor_role').exists():
|
||||
data.update(dict(
|
||||
project_base_dir = settings.PROJECTS_ROOT,
|
||||
project_local_paths = Project.get_local_path_choices(),
|
||||
custom_virtualenvs = get_custom_venv_choices()
|
||||
))
|
||||
elif JobTemplate.accessible_objects(request.user, 'admin_role').exists():
|
||||
data['custom_virtualenvs'] = get_custom_venv_choices()
|
||||
|
||||
return Response(data)
|
||||
|
||||
def post(self, request):
|
||||
if not isinstance(request.data, dict):
|
||||
return Response({"error": _("Invalid license data")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
if "eula_accepted" not in request.data:
|
||||
return Response({"error": _("Missing 'eula_accepted' property")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
eula_accepted = to_python_boolean(request.data["eula_accepted"])
|
||||
except ValueError:
|
||||
return Response({"error": _("'eula_accepted' value is invalid")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
if not eula_accepted:
|
||||
return Response({"error": _("'eula_accepted' must be True")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
request.data.pop("eula_accepted")
|
||||
try:
|
||||
data_actual = json.dumps(request.data)
|
||||
except Exception:
|
||||
logger.info(smart_text(u"Invalid JSON submitted for license."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid JSON")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
try:
|
||||
from awx.main.utils.common import get_licenser
|
||||
license_data = json.loads(data_actual)
|
||||
license_data_validated = get_licenser(**license_data).validate()
|
||||
except Exception:
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# If the license is valid, write it to the database.
|
||||
if license_data_validated['valid_key']:
|
||||
settings.LICENSE = license_data
|
||||
settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host())
|
||||
return Response(license_data_validated)
|
||||
|
||||
logger.warning(smart_text(u"Invalid license submitted."),
|
||||
extra=dict(actor=request.user.username))
|
||||
return Response({"error": _("Invalid license")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
def delete(self, request):
|
||||
try:
|
||||
settings.LICENSE = {}
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
except Exception:
|
||||
# FIX: Log
|
||||
return Response({"error": _("Failed to remove license.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import os
|
||||
from celery import Celery
|
||||
from django.conf import settings # noqa
|
||||
|
||||
|
||||
try:
|
||||
import awx.devonly # noqa
|
||||
MODE = 'development'
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
|
||||
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.start()
|
||||
18
awx/conf/migrations/0006_v331_ldap_group_type.py
Normal file
18
awx/conf/migrations/0006_v331_ldap_group_type.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# AWX
|
||||
from awx.conf.migrations._ldap_group_type import fill_ldap_group_type_params
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('conf', '0005_v330_rename_two_session_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(fill_ldap_group_type_params),
|
||||
]
|
||||
30
awx/conf/migrations/_ldap_group_type.py
Normal file
30
awx/conf/migrations/_ldap_group_type.py
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
import inspect
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
|
||||
|
||||
def fill_ldap_group_type_params(apps, schema_editor):
|
||||
group_type = settings.AUTH_LDAP_GROUP_TYPE
|
||||
Setting = apps.get_model('conf', 'Setting')
|
||||
|
||||
group_type_params = {'name_attr': 'cn', 'member_attr': 'member'}
|
||||
qs = Setting.objects.filter(key='AUTH_LDAP_GROUP_TYPE_PARAMS')
|
||||
entry = None
|
||||
if qs.exists():
|
||||
entry = qs[0]
|
||||
group_type_params = entry.value
|
||||
else:
|
||||
entry = Setting(key='AUTH_LDAP_GROUP_TYPE_PARAMS',
|
||||
value=group_type_params,
|
||||
created=now(),
|
||||
modified=now())
|
||||
|
||||
init_attrs = set(inspect.getargspec(group_type.__init__).args[1:])
|
||||
for k in group_type_params.keys():
|
||||
if k not in init_attrs:
|
||||
del group_type_params[k]
|
||||
|
||||
entry.value = group_type_params
|
||||
entry.save()
|
||||
@@ -2,11 +2,13 @@
|
||||
from collections import namedtuple
|
||||
import contextlib
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import StringIO
|
||||
import traceback
|
||||
import urllib
|
||||
|
||||
import six
|
||||
|
||||
@@ -60,6 +62,15 @@ SETTING_CACHE_DEFAULTS = True
|
||||
__all__ = ['SettingsWrapper', 'get_settings_to_cache', 'SETTING_CACHE_NOTSET']
|
||||
|
||||
|
||||
def normalize_broker_url(value):
|
||||
parts = value.rsplit('@', 1)
|
||||
match = re.search('(amqp://[^:]+:)(.*)', parts[0])
|
||||
if match:
|
||||
prefix, password = match.group(1), match.group(2)
|
||||
parts[0] = prefix + urllib.quote(password)
|
||||
return '@'.join(parts)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _ctit_db_wrapper(trans_safe=False):
|
||||
'''
|
||||
@@ -115,7 +126,8 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
else:
|
||||
logger.error('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
finally:
|
||||
@@ -444,7 +456,16 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
value = self._get_local(name)
|
||||
if value is not empty:
|
||||
return value
|
||||
return self._get_default(name)
|
||||
value = self._get_default(name)
|
||||
# sometimes users specify RabbitMQ passwords that contain
|
||||
# unescaped : and @ characters that confused urlparse, e.g.,
|
||||
# amqp://guest:a@ns:ibl3#@localhost:5672//
|
||||
#
|
||||
# detect these scenarios, and automatically escape the user's
|
||||
# password so it just works
|
||||
if name == 'BROKER_URL':
|
||||
value = normalize_broker_url(value)
|
||||
return value
|
||||
|
||||
def _set_local(self, name, value):
|
||||
field = self.registry.get_setting_field(name)
|
||||
|
||||
@@ -24,7 +24,12 @@ import os
|
||||
import pwd
|
||||
|
||||
# PSUtil
|
||||
import psutil
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
raise ImportError('psutil is missing; {}bin/pip install psutil'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
@@ -27,7 +27,13 @@ import os
|
||||
import stat
|
||||
import threading
|
||||
import uuid
|
||||
import memcache
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
raise ImportError('python-memcached is missing; {}bin/pip install python-memcached'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
from six.moves import xrange
|
||||
|
||||
|
||||
@@ -475,6 +475,15 @@ class BaseCallbackModule(CallbackBase):
|
||||
with self.capture_event_data('runner_retry', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_retry(result)
|
||||
|
||||
def v2_runner_on_start(self, host, task):
|
||||
event_data = dict(
|
||||
host=host.get_name(),
|
||||
task=task
|
||||
)
|
||||
with self.capture_event_data('runner_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_start(host, task)
|
||||
|
||||
|
||||
|
||||
class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -524,7 +524,7 @@ class UserAccess(BaseAccess):
|
||||
# A user can be changed if they are themselves, or by org admins or
|
||||
# superusers. Change permission implies changing only certain fields
|
||||
# that a user should be able to edit for themselves.
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
return bool(self.user == obj or self.can_admin(obj, data))
|
||||
|
||||
@@ -577,7 +577,7 @@ class UserAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
# Reverse obj and sub_obj, defer to RoleAccess if this is a role assignment.
|
||||
@@ -587,7 +587,7 @@ class UserAccess(BaseAccess):
|
||||
return super(UserAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
if relationship == 'roles':
|
||||
@@ -611,7 +611,8 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
select_related = ('user',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(organization__in=self.user.organizations)
|
||||
org_access_qs = Organization.accessible_objects(self.user, 'member_role')
|
||||
return self.model.objects.filter(organization__in=org_access_qs)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
@@ -1156,13 +1157,10 @@ class TeamAccess(BaseAccess):
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
"""Reverse obj and sub_obj, defer to RoleAccess if this is an assignment
|
||||
of a resource role to the team."""
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
return False
|
||||
# MANAGE_ORGANIZATION_AUTH setting checked in RoleAccess
|
||||
if isinstance(sub_obj, Role):
|
||||
if sub_obj.content_object is None:
|
||||
raise PermissionDenied(_("The {} role cannot be assigned to a team").format(sub_obj.name))
|
||||
elif isinstance(sub_obj.content_object, User):
|
||||
raise PermissionDenied(_("The admin_role for a User cannot be assigned to a team"))
|
||||
|
||||
if isinstance(sub_obj.content_object, ResourceMixin):
|
||||
role_access = RoleAccess(self.user)
|
||||
@@ -1174,9 +1172,7 @@ class TeamAccess(BaseAccess):
|
||||
*args, **kwargs)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
return False
|
||||
|
||||
# MANAGE_ORGANIZATION_AUTH setting checked in RoleAccess
|
||||
if isinstance(sub_obj, Role):
|
||||
if isinstance(sub_obj.content_object, ResourceMixin):
|
||||
role_access = RoleAccess(self.user)
|
||||
@@ -1212,7 +1208,7 @@ class ProjectAccess(BaseAccess):
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return Organization.accessible_objects(self.user, 'project_admin_role').exists()
|
||||
return (self.check_related('organization', Organization, data, role_field='project_admin_role', mandatory=True) and
|
||||
self.check_related('credential', Credential, data, role_field='use_role'))
|
||||
|
||||
@@ -1280,6 +1276,7 @@ class JobTemplateAccess(BaseAccess):
|
||||
'instance_groups',
|
||||
'credentials__credential_type',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name')),
|
||||
Prefetch('last_job', queryset=UnifiedJob.objects.non_polymorphic()),
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
@@ -1393,7 +1390,7 @@ class JobTemplateAccess(BaseAccess):
|
||||
'job_tags', 'force_handlers', 'skip_tags', 'ask_variables_on_launch',
|
||||
'ask_tags_on_launch', 'ask_job_type_on_launch', 'ask_skip_tags_on_launch',
|
||||
'ask_inventory_on_launch', 'ask_credential_on_launch', 'survey_enabled',
|
||||
'custom_virtualenv', 'diff_mode',
|
||||
'custom_virtualenv', 'diff_mode', 'timeout', 'job_slice_count',
|
||||
|
||||
# These fields are ignored, but it is convenient for QA to allow clients to post them
|
||||
'last_job_run', 'created', 'modified',
|
||||
@@ -1788,7 +1785,7 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
workflow_job__workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
|
||||
workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
|
||||
self.user, 'read_role'))
|
||||
|
||||
@check_superuser
|
||||
@@ -1839,8 +1836,10 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
if 'survey_enabled' in data and data['survey_enabled']:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
return self.check_related('organization', Organization, data, role_field='workflow_admin_role',
|
||||
mandatory=True)
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
)
|
||||
|
||||
def can_copy(self, obj):
|
||||
if self.save_messages:
|
||||
@@ -1894,8 +1893,11 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
return (self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) and
|
||||
self.user in obj.admin_role)
|
||||
return (
|
||||
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) and
|
||||
self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) and
|
||||
self.user in obj.admin_role
|
||||
)
|
||||
|
||||
def can_delete(self, obj):
|
||||
return self.user.is_superuser or self.user in obj.admin_role
|
||||
@@ -1914,7 +1916,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
workflow_job_template__in=WorkflowJobTemplate.accessible_objects(
|
||||
unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(
|
||||
self.user, 'read_role'))
|
||||
|
||||
def can_add(self, data):
|
||||
@@ -1946,27 +1948,39 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
wfjt = obj.workflow_job_template
|
||||
template = obj.workflow_job_template
|
||||
if not template and obj.job_template_id:
|
||||
template = obj.job_template
|
||||
# only superusers can relaunch orphans
|
||||
if not wfjt:
|
||||
if not template:
|
||||
return False
|
||||
|
||||
# If job was launched by another user, it could have survey passwords
|
||||
if obj.created_by_id != self.user.pk:
|
||||
# Obtain prompts used to start original job
|
||||
JobLaunchConfig = obj._meta.get_field('launch_config').related_model
|
||||
try:
|
||||
config = JobLaunchConfig.objects.get(job=obj)
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
config = None
|
||||
# Obtain prompts used to start original job
|
||||
JobLaunchConfig = obj._meta.get_field('launch_config').related_model
|
||||
try:
|
||||
config = JobLaunchConfig.objects.get(job=obj)
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Workflow Job was launched with unknown prompts.')
|
||||
return False
|
||||
|
||||
if config is None or config.prompts_dict():
|
||||
# Check if access to prompts to prevent relaunch
|
||||
if config.prompts_dict():
|
||||
if obj.created_by_id != self.user.pk:
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts provided by another user.')
|
||||
return False
|
||||
if not JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts you lack access to.')
|
||||
return False
|
||||
if config.has_unprompted(template):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts no longer accepted.')
|
||||
return False
|
||||
|
||||
# execute permission to WFJT is mandatory for any relaunch
|
||||
return (self.user in wfjt.execute_role)
|
||||
return (self.user in template.execute_role)
|
||||
|
||||
def can_recreate(self, obj):
|
||||
node_qs = obj.workflow_job_nodes.all().prefetch_related('inventory', 'credentials', 'unified_job_template')
|
||||
@@ -2549,14 +2563,13 @@ class RoleAccess(BaseAccess):
|
||||
# Unsupported for now
|
||||
return False
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, data,
|
||||
skip_sub_obj_read_check=False):
|
||||
return self.can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check)
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
return self.can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
@check_superuser
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None, skip_sub_obj_read_check=False):
|
||||
if isinstance(obj.content_object, Team):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH:
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
if not skip_sub_obj_read_check and relationship in ['members', 'member_role.parents', 'parents']:
|
||||
|
||||
@@ -29,3 +29,11 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
CENSOR_VALUE = '************'
|
||||
ENV_BLACKLIST = frozenset((
|
||||
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
))
|
||||
|
||||
5
awx/main/dispatch/__init__.py
Normal file
5
awx/main/dispatch/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID.encode('utf-8')
|
||||
58
awx/main/dispatch/control.py
Normal file
58
awx/main/dispatch/control.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from kombu import Connection, Queue, Exchange, Producer, Consumer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class Control(object):
|
||||
|
||||
services = ('dispatcher', 'callback_receiver')
|
||||
result = None
|
||||
|
||||
def __init__(self, service, host=None):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queue = Queue(self.queuename, Exchange(self.queuename), routing_key=self.queuename)
|
||||
|
||||
def publish(self, msg, conn, **kwargs):
|
||||
producer = Producer(
|
||||
exchange=self.queue.exchange,
|
||||
channel=conn,
|
||||
routing_key=self.queuename
|
||||
)
|
||||
producer.publish(msg, expiration=5, **kwargs)
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
return self.control_with_reply('status', *args, **kwargs)
|
||||
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def control_with_reply(self, command, timeout=5):
|
||||
logger.warn('checking {} {} for {}'.format(self.service, command, self.queuename))
|
||||
reply_queue = Queue(name="amq.rabbitmq.reply-to")
|
||||
self.result = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
with Consumer(conn, reply_queue, callbacks=[self.process_message], no_ack=True):
|
||||
self.publish({'control': command}, conn, reply_to='amq.rabbitmq.reply-to')
|
||||
try:
|
||||
conn.drain_events(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error('{} did not reply within {}s'.format(self.service, timeout))
|
||||
raise
|
||||
return self.result
|
||||
|
||||
def control(self, msg, **kwargs):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
self.publish(msg, conn)
|
||||
|
||||
def process_message(self, body, message):
|
||||
self.result = body
|
||||
message.ack()
|
||||
408
awx/main/dispatch/pool.py
Normal file
408
awx/main/dispatch/pool.py
Normal file
@@ -0,0 +1,408 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Full as QueueFull, Empty as QueueEmpty
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as django_connection, connections
|
||||
from django.core.cache import cache as django_cache
|
||||
from jinja2 import Template
|
||||
import psutil
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
'''
|
||||
Used to track a worker child process and its pending and finished messages.
|
||||
|
||||
This class makes use of two distinct multiprocessing.Queues to track state:
|
||||
|
||||
- self.queue: this is a queue which represents pending messages that should
|
||||
be handled by this worker process; as new AMQP messages come
|
||||
in, a pool will put() them into this queue; the child
|
||||
process that is forked will get() from this queue and handle
|
||||
received messages in an endless loop
|
||||
- self.finished: this is a queue which the worker process uses to signal
|
||||
that it has finished processing a message
|
||||
|
||||
When a message is put() onto this worker, it is tracked in
|
||||
self.managed_tasks.
|
||||
|
||||
Periodically, the worker will call .calculate_managed_tasks(), which will
|
||||
cause messages in self.finished to be removed from self.managed_tasks.
|
||||
|
||||
In this way, self.managed_tasks represents a view of the messages assigned
|
||||
to a specific process. The message at [0] is the least-recently inserted
|
||||
message, and it represents what the worker is running _right now_
|
||||
(self.current_task).
|
||||
|
||||
A worker is "busy" when it has at least one message in self.managed_tasks.
|
||||
It is "idle" when self.managed_tasks is empty.
|
||||
'''
|
||||
|
||||
def __init__(self, queue_size, target, args):
|
||||
self.messages_sent = 0
|
||||
self.messages_finished = 0
|
||||
self.managed_tasks = collections.OrderedDict()
|
||||
self.finished = MPQueue(queue_size)
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
|
||||
def start(self):
|
||||
self.process.start()
|
||||
|
||||
def put(self, body):
|
||||
uuid = '?'
|
||||
if isinstance(body, dict):
|
||||
if not body.get('uuid'):
|
||||
body['uuid'] = str(uuid4())
|
||||
uuid = body['uuid']
|
||||
logger.debug('delivered {} to worker[{}] qsize {}'.format(
|
||||
uuid, self.pid, self.qsize
|
||||
))
|
||||
self.managed_tasks[uuid] = body
|
||||
self.queue.put(body, block=True, timeout=5)
|
||||
self.messages_sent += 1
|
||||
self.calculate_managed_tasks()
|
||||
|
||||
def quit(self):
|
||||
'''
|
||||
Send a special control message to the worker that tells it to exit
|
||||
gracefully.
|
||||
'''
|
||||
self.queue.put('QUIT')
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self.process.pid
|
||||
|
||||
@property
|
||||
def qsize(self):
|
||||
return self.queue.qsize()
|
||||
|
||||
@property
|
||||
def alive(self):
|
||||
return self.process.is_alive()
|
||||
|
||||
@property
|
||||
def mb(self):
|
||||
if self.alive:
|
||||
return '{:0.3f}'.format(
|
||||
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
|
||||
)
|
||||
return '0'
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
return str(self.process.exitcode)
|
||||
|
||||
def calculate_managed_tasks(self):
|
||||
# look to see if any tasks were finished
|
||||
finished = []
|
||||
for _ in range(self.finished.qsize()):
|
||||
try:
|
||||
finished.append(self.finished.get(block=False))
|
||||
except QueueEmpty:
|
||||
break # qsize is not always _totally_ up to date
|
||||
|
||||
# if any tasks were finished, removed them from the managed tasks for
|
||||
# this worker
|
||||
for uuid in finished:
|
||||
self.messages_finished += 1
|
||||
del self.managed_tasks[uuid]
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
self.calculate_managed_tasks()
|
||||
# the task at [0] is the one that's running right now (or is about to
|
||||
# be running)
|
||||
if len(self.managed_tasks):
|
||||
return self.managed_tasks[self.managed_tasks.keys()[0]]
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def orphaned_tasks(self):
|
||||
orphaned = []
|
||||
if not self.alive:
|
||||
# if this process had a running task that never finished,
|
||||
# requeue its error callbacks
|
||||
current_task = self.current_task
|
||||
if isinstance(current_task, dict):
|
||||
orphaned.extend(current_task.get('errbacks', []))
|
||||
|
||||
# if this process has any pending messages requeue them
|
||||
for _ in range(self.qsize):
|
||||
try:
|
||||
message = self.queue.get(block=False)
|
||||
if message != 'QUIT':
|
||||
orphaned.append(message)
|
||||
except QueueEmpty:
|
||||
break # qsize is not always _totally_ up to date
|
||||
if len(orphaned):
|
||||
logger.error(
|
||||
'requeuing {} messages from gone worker pid:{}'.format(
|
||||
len(orphaned), self.pid
|
||||
)
|
||||
)
|
||||
return orphaned
|
||||
|
||||
@property
|
||||
def busy(self):
|
||||
self.calculate_managed_tasks()
|
||||
return len(self.managed_tasks) > 0
|
||||
|
||||
@property
|
||||
def idle(self):
|
||||
return not self.busy
|
||||
|
||||
|
||||
class WorkerPool(object):
|
||||
'''
|
||||
Creates a pool of forked PoolWorkers.
|
||||
|
||||
As WorkerPool.write(...) is called (generally, by a kombu consumer
|
||||
implementation when it receives an AMQP message), messages are passed to
|
||||
one of the multiprocessing Queues where some work can be done on them.
|
||||
|
||||
class MessagePrinter(awx.main.dispatch.worker.BaseWorker):
|
||||
|
||||
def perform_work(self, body):
|
||||
print body
|
||||
|
||||
pool = WorkerPool(min_workers=4) # spawn four worker processes
|
||||
pool.init_workers(MessagePrint().work_loop)
|
||||
pool.write(
|
||||
0, # preferred worker 0
|
||||
'Hello, World!'
|
||||
)
|
||||
'''
|
||||
|
||||
debug_meta = ''
|
||||
|
||||
def __init__(self, min_workers=None, queue_size=None):
|
||||
self.name = settings.CLUSTER_HOST_ID
|
||||
self.pid = os.getpid()
|
||||
self.min_workers = min_workers or settings.JOB_EVENT_WORKERS
|
||||
self.queue_size = queue_size or settings.JOB_EVENT_MAX_QUEUE_SIZE
|
||||
self.workers = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.workers)
|
||||
|
||||
def init_workers(self, target, *target_args):
|
||||
self.target = target
|
||||
self.target_args = target_args
|
||||
for idx in range(self.min_workers):
|
||||
self.up()
|
||||
|
||||
def up(self):
|
||||
idx = len(self.workers)
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
|
||||
self.workers.append(worker)
|
||||
try:
|
||||
worker.start()
|
||||
except Exception:
|
||||
logger.exception('could not fork')
|
||||
else:
|
||||
logger.warn('scaling up worker pid:{}'.format(worker.pid))
|
||||
return idx, worker
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
self.cleanup()
|
||||
tmpl = Template(
|
||||
'{{ pool.name }}[pid:{{ pool.pid }}] workers total={{ workers|length }} {{ meta }} \n'
|
||||
'{% for w in workers %}'
|
||||
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
||||
' sent={{ w.messages_sent }}'
|
||||
' finished={{ w.messages_finished }}'
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
'{% for task in w.managed_tasks.values() %}'
|
||||
'\n - {% if loop.index0 == 0 %}running {% else %}queued {% endif %}'
|
||||
'{{ task["uuid"] }} '
|
||||
'{% if "task" in task %}'
|
||||
'{{ task["task"].rsplit(".", 1)[-1] }}'
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
'(*{{ task.get("args", []) }})'
|
||||
'{% endif %}'
|
||||
'{% endfor %}'
|
||||
'{% if not w.managed_tasks|length %}'
|
||||
' [IDLE]'
|
||||
'{% endif %}'
|
||||
'\n'
|
||||
'{% endfor %}'
|
||||
)
|
||||
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
queue_order = sorted(range(len(self.workers)), cmp=lambda x, y: -1 if x==preferred_queue else 0)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
self.workers[queue_actual].put(body)
|
||||
return queue_actual
|
||||
except QueueFull:
|
||||
pass
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("could not write to queue %s" % preferred_queue)
|
||||
logger.warn("detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def stop(self, signum):
|
||||
try:
|
||||
for worker in self.workers:
|
||||
os.kill(worker.pid, signum)
|
||||
except Exception:
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
'''
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
down based on demand
|
||||
'''
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.max_workers = kwargs.pop('max_workers', None)
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
total_memory_gb = int(settings_absmem)
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
# 5 workers per GB of total memory
|
||||
self.max_workers = (total_memory_gb * 5)
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
if len(self.workers) < self.min_workers:
|
||||
# If we don't have at least min_workers, add more
|
||||
return True
|
||||
# If every worker is busy doing something, add more
|
||||
return all([w.busy for w in self.workers])
|
||||
|
||||
@property
|
||||
def full(self):
|
||||
return len(self.workers) == self.max_workers
|
||||
|
||||
@property
|
||||
def debug_meta(self):
|
||||
return 'min={} max={}'.format(self.min_workers, self.max_workers)
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
Perform some internal account and cleanup. This is run on
|
||||
every cluster node heartbeat:
|
||||
|
||||
1. Discover worker processes that exited, and recover messages they
|
||||
were handling.
|
||||
2. Clean up unnecessary, idle workers.
|
||||
3. Check to see if the database says this node is running any tasks
|
||||
that aren't actually running. If so, reap them.
|
||||
"""
|
||||
orphaned = []
|
||||
for w in self.workers[::]:
|
||||
if not w.alive:
|
||||
# the worker process has exited
|
||||
# 1. take the task it was running and enqueue the error
|
||||
# callbacks
|
||||
# 2. take any pending tasks delivered to its queue and
|
||||
# send them to another worker
|
||||
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
|
||||
if w.current_task:
|
||||
if w.current_task != 'QUIT':
|
||||
try:
|
||||
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
|
||||
reaper.reap_job(j, 'failed')
|
||||
except Exception:
|
||||
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
# the process has an empty queue (it's idle) and we have
|
||||
# more processes in the pool than we need (> min)
|
||||
# send this process a message so it will exit gracefully
|
||||
# at the next opportunity
|
||||
logger.warn('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
if not len(self.workers):
|
||||
self.up()
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
self.write(idx, m)
|
||||
|
||||
# if the database says a job is running on this node, but it's *not*,
|
||||
# then reap it
|
||||
running_uuids = []
|
||||
for worker in self.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
running_uuids.extend(worker.managed_tasks.keys())
|
||||
try:
|
||||
reaper.reap(excluded_uuids=running_uuids)
|
||||
except Exception:
|
||||
# we _probably_ failed here due to DB connectivity issues, so
|
||||
# don't use our logger (it accesses the database for configuration)
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
|
||||
def up(self):
|
||||
if self.full:
|
||||
# if we can't spawn more workers, just toss this message into a
|
||||
# random worker's backlog
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
return idx, self.workers[idx]
|
||||
else:
|
||||
return super(AutoscalePool, self).up()
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
try:
|
||||
# when the cluster heartbeat occurs, clean up internally
|
||||
if isinstance(body, dict) and 'cluster_node_heartbeat' in body['task']:
|
||||
self.cleanup()
|
||||
if self.should_grow:
|
||||
self.up()
|
||||
# we don't care about "preferred queue" round robin distribution, just
|
||||
# find the first non-busy worker and claim it
|
||||
workers = self.workers[:]
|
||||
random.shuffle(workers)
|
||||
for w in workers:
|
||||
if not w.busy:
|
||||
w.put(body)
|
||||
break
|
||||
else:
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
logger.exception('failed to write inbound message')
|
||||
128
awx/main/dispatch/publish.py
Normal file
128
awx/main/dispatch/publish.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import inspect
|
||||
import logging
|
||||
import sys
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from kombu import Connection, Exchange, Producer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def serialize_task(f):
|
||||
return '.'.join([f.__module__, f.__name__])
|
||||
|
||||
|
||||
class task:
|
||||
"""
|
||||
Used to decorate a function or class so that it can be run asynchronously
|
||||
via the task dispatcher. Tasks can be simple functions:
|
||||
|
||||
@task()
|
||||
def add(a, b):
|
||||
return a + b
|
||||
|
||||
...or classes that define a `run` method:
|
||||
|
||||
@task()
|
||||
class Adder:
|
||||
def run(self, a, b):
|
||||
return a + b
|
||||
|
||||
# Tasks can be run synchronously...
|
||||
assert add(1, 1) == 2
|
||||
assert Adder().run(1, 1) == 2
|
||||
|
||||
# ...or published to a queue:
|
||||
add.apply_async([1, 1])
|
||||
Adder.apply_async([1, 1])
|
||||
|
||||
# Tasks can also define a specific target queue or exchange type:
|
||||
|
||||
@task(queue='slow-tasks')
|
||||
def snooze():
|
||||
time.sleep(10)
|
||||
|
||||
@task(queue='tower_broadcast', exchange_type='fanout')
|
||||
def announce():
|
||||
print "Run this everywhere!"
|
||||
"""
|
||||
|
||||
def __init__(self, queue=None, exchange_type=None):
|
||||
self.queue = queue
|
||||
self.exchange_type = exchange_type
|
||||
|
||||
def __call__(self, fn=None):
|
||||
queue = self.queue
|
||||
exchange_type = self.exchange_type
|
||||
|
||||
class PublisherMixin(object):
|
||||
|
||||
queue = None
|
||||
|
||||
@classmethod
|
||||
def delay(cls, *args, **kwargs):
|
||||
return cls.apply_async(args, kwargs)
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
task_id = uuid or str(uuid4())
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
queue = (
|
||||
queue or
|
||||
getattr(cls.queue, 'im_func', cls.queue) or
|
||||
settings.CELERY_DEFAULT_QUEUE
|
||||
)
|
||||
obj = {
|
||||
'uuid': task_id,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'task': cls.name
|
||||
}
|
||||
obj.update(**kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not settings.IS_TESTING(sys.argv):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
exchange = Exchange(queue, type=exchange_type or 'direct')
|
||||
producer = Producer(conn)
|
||||
logger.debug('publish {}({}, queue={})'.format(
|
||||
cls.name,
|
||||
task_id,
|
||||
queue
|
||||
))
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
compression='bzip2',
|
||||
exchange=exchange,
|
||||
declare=[exchange],
|
||||
delivery_mode="persistent",
|
||||
routing_key=queue)
|
||||
return (obj, queue)
|
||||
|
||||
# If the object we're wrapping *is* a class (e.g., RunJob), return
|
||||
# a *new* class that inherits from the wrapped class *and* BaseTask
|
||||
# In this way, the new class returned by our decorator is the class
|
||||
# being decorated *plus* PublisherMixin so cls.apply_async() and
|
||||
# cls.delay() work
|
||||
bases = []
|
||||
ns = {'name': serialize_task(fn), 'queue': queue}
|
||||
if inspect.isclass(fn):
|
||||
bases = list(fn.__bases__)
|
||||
ns.update(fn.__dict__)
|
||||
cls = type(
|
||||
fn.__name__,
|
||||
tuple(bases + [PublisherMixin]),
|
||||
ns
|
||||
)
|
||||
if inspect.isclass(fn):
|
||||
return cls
|
||||
|
||||
# if the object being decorated is *not* a class (it's a Python
|
||||
# function), make fn.apply_async and fn.delay proxy through to the
|
||||
# PublisherMixin we dynamically created above
|
||||
setattr(fn, 'name', cls.name)
|
||||
setattr(fn, 'apply_async', cls.apply_async)
|
||||
setattr(fn, 'delay', cls.delay)
|
||||
return fn
|
||||
49
awx/main/dispatch/reaper.py
Normal file
49
awx/main/dispatch/reaper.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from awx.main.models import Instance, UnifiedJob, WorkflowJob
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def reap_job(j, status):
|
||||
if UnifiedJob.objects.get(id=j.id).status not in ('running', 'waiting'):
|
||||
# just in case, don't reap jobs that aren't running
|
||||
return
|
||||
j.status = status
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
))
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status(status)
|
||||
logger.error(
|
||||
'{} is no longer running; reaping'.format(j.log_format)
|
||||
)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
'''
|
||||
Reap all jobs in waiting|running for this instance.
|
||||
'''
|
||||
me = instance or Instance.objects.me()
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
(
|
||||
Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))
|
||||
) & (
|
||||
Q(execution_node=me.hostname) |
|
||||
Q(controller_node=me.hostname)
|
||||
) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
).exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status)
|
||||
3
awx/main/dispatch/worker/__init__.py
Normal file
3
awx/main/dispatch/worker/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .base import AWXConsumer, BaseWorker # noqa
|
||||
from .callback import CallbackBrokerWorker # noqa
|
||||
from .task import TaskWorker # noqa
|
||||
151
awx/main/dispatch/worker/base.py
Normal file
151
awx/main/dispatch/worker/base.py
Normal file
@@ -0,0 +1,151 @@
|
||||
# Copyright (c) 2018 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
from uuid import UUID
|
||||
from Queue import Empty as QueueEmpty
|
||||
|
||||
from django import db
|
||||
from kombu import Producer
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def signame(sig):
|
||||
return dict(
|
||||
(k, v) for v, k in signal.__dict__.items()
|
||||
if v.startswith('SIG') and not v.startswith('SIG_')
|
||||
)[sig]
|
||||
|
||||
|
||||
class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class AWXConsumer(ConsumerMixin):
|
||||
|
||||
def __init__(self, name, connection, worker, queues=[], pool=None):
|
||||
self.connection = connection
|
||||
self.total_messages = 0
|
||||
self.queues = queues
|
||||
self.worker = worker
|
||||
self.pool = pool
|
||||
if pool is None:
|
||||
self.pool = WorkerPool()
|
||||
self.pool.init_workers(self.worker.work_loop)
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
logger.debug(self.listening_on)
|
||||
return [Consumer(queues=self.queues, accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
@property
|
||||
def listening_on(self):
|
||||
return 'listening on {}'.format([
|
||||
'{} [{}]'.format(q.name, q.exchange.type) for q in self.queues
|
||||
])
|
||||
|
||||
def control(self, body, message):
|
||||
logger.warn(body)
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running'):
|
||||
producer = Producer(
|
||||
channel=self.connection,
|
||||
routing_key=message.properties['reply_to']
|
||||
)
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
elif control == 'running':
|
||||
msg = []
|
||||
for worker in self.pool.workers:
|
||||
worker.calculate_managed_tasks()
|
||||
msg.extend(worker.managed_tasks.keys())
|
||||
producer.publish(msg)
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
message.ack()
|
||||
|
||||
def process_task(self, body, message):
|
||||
if 'control' in body:
|
||||
return self.control(body, message)
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
queue = UUID(body['uuid']).int % len(self.pool)
|
||||
except Exception:
|
||||
queue = self.total_messages % len(self.pool)
|
||||
else:
|
||||
queue = self.total_messages % len(self.pool)
|
||||
else:
|
||||
queue = 0
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
signal.signal(signal.SIGTERM, self.stop)
|
||||
self.worker.on_start()
|
||||
super(AWXConsumer, self).run(*args, **kwargs)
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
logger.debug('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
if os.getppid() != ppid:
|
||||
break
|
||||
try:
|
||||
body = queue.get(block=True, timeout=1)
|
||||
if body == 'QUIT':
|
||||
break
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
||||
continue
|
||||
try:
|
||||
for conn in db.connections.all():
|
||||
# If the database connection has a hiccup during the prior message, close it
|
||||
# so we can establish a new connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
self.perform_work(body, *args)
|
||||
finally:
|
||||
if 'uuid' in body:
|
||||
uuid = body['uuid']
|
||||
logger.debug('task {} is finished'.format(uuid))
|
||||
finished.put(uuid)
|
||||
logger.warn('worker exiting gracefully pid:{}'.format(os.getpid()))
|
||||
|
||||
def perform_work(self, body):
|
||||
raise NotImplementedError()
|
||||
|
||||
def on_start(self):
|
||||
pass
|
||||
|
||||
def on_stop(self):
|
||||
pass
|
||||
127
awx/main/dispatch/worker/callback.py
Normal file
127
awx/main/dispatch/worker/callback.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import logging
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import DatabaseError, OperationalError, connection as django_connection
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob)
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
it into the database.
|
||||
|
||||
The code that *builds* these types of messages is found in the AWX display
|
||||
callback (`awx.lib.awx_display_callback`).
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
def perform_work(self, body):
|
||||
try:
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
|
||||
def _save_event_data():
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
job_key = 'unknown'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
job_key = key
|
||||
break
|
||||
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
if body.get('event') == 'EOF':
|
||||
event_thing = 'EOF event'
|
||||
else:
|
||||
event_thing = 'event {}'.format(body.get('counter', 'unknown'))
|
||||
logger.info('Callback worker received {} for {} {}'.format(
|
||||
event_thing, job_key[:-len('_id')], job_identifier
|
||||
))
|
||||
logger.debug('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
return
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError):
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on event for Job {}'.format(job_identifier))
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
break
|
||||
except Exception as exc:
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
113
awx/main/dispatch/worker/task.py
Normal file
113
awx/main/dispatch/worker/task.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import inspect
|
||||
import logging
|
||||
import importlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import six
|
||||
|
||||
from awx.main.tasks import dispatch_startup, inform_cluster_of_shutdown
|
||||
|
||||
from .base import BaseWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class TaskWorker(BaseWorker):
|
||||
'''
|
||||
A worker implementation that deserializes task messages and runs native
|
||||
Python code.
|
||||
|
||||
The code that *builds* these types of messages is found in
|
||||
`awx.main.dispatch.publish`.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
'''
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
awx.main.tasks.delete_inventory
|
||||
awx.main.tasks.RunProjectUpdate
|
||||
'''
|
||||
module, target = task.rsplit('.', 1)
|
||||
module = importlib.import_module(module)
|
||||
_call = None
|
||||
if hasattr(module, target):
|
||||
_call = getattr(module, target, None)
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
'''
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
'''
|
||||
task = body['task']
|
||||
uuid = body.get('uuid', '<unknown>')
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
_call = TaskWorker.resolve_callable(task)
|
||||
if inspect.isclass(_call):
|
||||
# the callable is a class, e.g., RunJob; instantiate and
|
||||
# return its `run()` method
|
||||
_call = _call().run
|
||||
# don't print kwargs, they often contain launch-time secrets
|
||||
logger.debug('task {} starting {}(*{})'.format(uuid, task, args))
|
||||
return _call(*args, **kwargs)
|
||||
|
||||
def perform_work(self, body):
|
||||
'''
|
||||
Import and run code for a task e.g.,
|
||||
|
||||
body = {
|
||||
'args': [8],
|
||||
'callbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {}
|
||||
'task': u'awx.main.tasks.handle_work_success'
|
||||
}],
|
||||
'errbacks': [{
|
||||
'args': [],
|
||||
'kwargs': {},
|
||||
'task': 'awx.main.tasks.handle_work_error'
|
||||
}],
|
||||
'kwargs': {},
|
||||
'task': u'awx.main.tasks.RunProjectUpdate'
|
||||
}
|
||||
'''
|
||||
result = None
|
||||
try:
|
||||
result = self.run_callable(body)
|
||||
except Exception as exc:
|
||||
|
||||
try:
|
||||
if getattr(exc, 'is_awx_task_error', False):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(six.text_type("{}").format(exc))
|
||||
else:
|
||||
task = body['task']
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
logger.exception('Worker failed to run task {}(*{}, **{}'.format(
|
||||
task, args, kwargs
|
||||
))
|
||||
except Exception:
|
||||
# It's fairly critical that this code _not_ raise exceptions on logging
|
||||
# If you configure external logging in a way that _it_ fails, there's
|
||||
# not a lot we can do here; sys.stderr.write is a final hail mary
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
|
||||
for callback in body.get('errbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
self.perform_work(callback)
|
||||
|
||||
for callback in body.get('callbacks', []) or []:
|
||||
callback['uuid'] = body['uuid']
|
||||
self.perform_work(callback)
|
||||
return result
|
||||
|
||||
def on_start(self):
|
||||
dispatch_startup()
|
||||
|
||||
def on_stop(self):
|
||||
inform_cluster_of_shutdown()
|
||||
@@ -4,11 +4,6 @@
|
||||
import six
|
||||
|
||||
|
||||
# Celery does not respect exception type when using a serializer different than pickle;
|
||||
# and awx uses the json serializer
|
||||
# https://github.com/celery/celery/issues/3586
|
||||
|
||||
|
||||
class _AwxTaskError():
|
||||
def build_exception(self, task, message=None):
|
||||
if message is None:
|
||||
@@ -36,5 +31,3 @@ class _AwxTaskError():
|
||||
|
||||
|
||||
AwxTaskError = _AwxTaskError()
|
||||
|
||||
|
||||
|
||||
1
awx/main/expect/.gitignore
vendored
Normal file
1
awx/main/expect/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
authorized_keys
|
||||
0
awx/main/expect/authorized_keys
Normal file
0
awx/main/expect/authorized_keys
Normal file
@@ -38,7 +38,7 @@ class IsolatedManager(object):
|
||||
:param stdout_handle: a file-like object for capturing stdout
|
||||
:param ssh_key_path: a filepath where SSH key data can be read
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:\s*?$':
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
@@ -117,10 +117,10 @@ class IsolatedManager(object):
|
||||
|
||||
@classmethod
|
||||
def awx_playbook_path(cls):
|
||||
return os.path.join(
|
||||
return os.path.abspath(os.path.join(
|
||||
os.path.dirname(awx.__file__),
|
||||
'playbooks'
|
||||
)
|
||||
))
|
||||
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
@@ -71,7 +71,7 @@ def run_pexpect(args, cwd, env, logfile,
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:\s*?$':
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param extra_update_fields: a dict used to specify DB fields which should
|
||||
be updated on the underlying model
|
||||
@@ -208,6 +208,12 @@ def run_isolated_job(private_data_dir, secrets, logfile=sys.stdout):
|
||||
env['AWX_ISOLATED_DATA_DIR'] = private_data_dir
|
||||
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + callback_dir + ':'
|
||||
|
||||
venv_path = env.get('VIRTUAL_ENV')
|
||||
if venv_path and not os.path.exists(venv_path):
|
||||
raise RuntimeError(
|
||||
'a valid Python virtualenv does not exist at {}'.format(venv_path)
|
||||
)
|
||||
|
||||
return run_pexpect(args, cwd, env, logfile,
|
||||
expect_passwords=expect_passwords,
|
||||
idle_timeout=idle_timeout,
|
||||
|
||||
@@ -46,7 +46,7 @@ from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS, ENV_BLACKLIST
|
||||
from awx.main import utils
|
||||
|
||||
|
||||
@@ -573,10 +573,10 @@ class CredentialInputField(JSONSchemaField):
|
||||
# string)
|
||||
match = re.search(
|
||||
# 'foo' is a dependency of 'bar'
|
||||
"'" # apostrophe
|
||||
"([^']+)" # one or more non-apostrophes (first group)
|
||||
"'[\w ]+'" # one or more words/spaces
|
||||
"([^']+)", # second group
|
||||
r"'" # apostrophe
|
||||
r"([^']+)" # one or more non-apostrophes (first group)
|
||||
r"'[\w ]+'" # one or more words/spaces
|
||||
r"([^']+)", # second group
|
||||
error.message,
|
||||
)
|
||||
if match:
|
||||
@@ -755,7 +755,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
'file': {
|
||||
'type': 'object',
|
||||
'patternProperties': {
|
||||
'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
|
||||
r'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
},
|
||||
@@ -767,7 +767,12 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
# of underscores, digits, and alphabetics from the portable
|
||||
# character set. The first character of a name is not
|
||||
# a digit.
|
||||
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {'type': 'string'},
|
||||
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {
|
||||
'type': 'string',
|
||||
# The environment variable _value_ can be any ascii,
|
||||
# but pexpect will choke on any unicode
|
||||
'pattern': '^[\x00-\x7F]*$'
|
||||
},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
},
|
||||
@@ -783,6 +788,19 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
'additionalProperties': False
|
||||
}
|
||||
|
||||
def validate_env_var_allowed(self, env_var):
|
||||
if env_var.startswith('ANSIBLE_'):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} may affect Ansible configuration so its '
|
||||
'use is not allowed in credentials.').format(env_var),
|
||||
code='invalid', params={'value': env_var},
|
||||
)
|
||||
if env_var in ENV_BLACKLIST:
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} is blacklisted from use in credentials.').format(env_var),
|
||||
code='invalid', params={'value': env_var},
|
||||
)
|
||||
|
||||
def validate(self, value, model_instance):
|
||||
super(CredentialTypeInjectorField, self).validate(
|
||||
value, model_instance
|
||||
@@ -834,6 +852,9 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
setattr(valid_namespace['tower'].filename, template_name, 'EXAMPLE_FILENAME')
|
||||
|
||||
for type_, injector in value.items():
|
||||
if type_ == 'env':
|
||||
for key in injector.keys():
|
||||
self.validate_env_var_allowed(key)
|
||||
for key, tmpl in injector.items():
|
||||
try:
|
||||
Environment(
|
||||
|
||||
12
awx/main/management/commands/check_migrations.py
Normal file
12
awx/main/management/commands/check_migrations.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from django.db import connections
|
||||
from django.db.backends.sqlite3.base import DatabaseWrapper
|
||||
from django.core.management.commands.makemigrations import Command as MakeMigrations
|
||||
|
||||
|
||||
class Command(MakeMigrations):
|
||||
|
||||
def execute(self, *args, **options):
|
||||
settings = connections['default'].settings_dict.copy()
|
||||
settings['ENGINE'] = 'sqlite3'
|
||||
connections['default'] = DatabaseWrapper(settings)
|
||||
return MakeMigrations().execute(*args, **options)
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
# Python
|
||||
import re
|
||||
import sys
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
# Django
|
||||
@@ -129,6 +130,7 @@ class Command(BaseCommand):
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
sys.stderr.write("This command has been deprecated and will be removed in a future release.\n")
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise CommandError("The System Tracking feature is not enabled for your instance")
|
||||
cleanup_facts = CleanupFacts()
|
||||
|
||||
@@ -15,6 +15,8 @@ class Command(BaseCommand):
|
||||
def handle(self, *args, **kwargs):
|
||||
# Sanity check: Is there already an organization in the system?
|
||||
if Organization.objects.count():
|
||||
print('An organization is already in the system, exiting.')
|
||||
print('(changed: False)')
|
||||
return
|
||||
|
||||
# Create a default organization as the first superuser found.
|
||||
@@ -54,3 +56,4 @@ class Command(BaseCommand):
|
||||
jt.credentials.add(c)
|
||||
print('Default organization added.')
|
||||
print('Demo Credential, Inventory, and Job Template added.')
|
||||
print('(changed: True)')
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# All Rights Reserved
|
||||
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
from django.db import transaction
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
@@ -24,31 +23,28 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str,
|
||||
help='Hostname used during provisioning')
|
||||
parser.add_argument('--name', dest='name', type=str,
|
||||
help='(PENDING DEPRECIATION) Hostname used during provisioning')
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
# TODO: remove in 3.3
|
||||
if options.get('name'):
|
||||
warnings.warn("`--name` is depreciated in favor of `--hostname`, and will be removed in release 3.3.")
|
||||
if options.get('hostname'):
|
||||
raise CommandError("Cannot accept both --name and --hostname.")
|
||||
options['hostname'] = options['name']
|
||||
hostname = options.get('hostname')
|
||||
if not hostname:
|
||||
raise CommandError("--hostname is a required argument")
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
instance = Instance.objects.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
isolated = instance.first().is_isolated()
|
||||
instance.delete()
|
||||
print("Instance Removed")
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
if isolated:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
else:
|
||||
result = subprocess.Popen("rabbitmqctl forget_cluster_node rabbitmq@{}".format(hostname), shell=True).wait()
|
||||
if result != 0:
|
||||
print("Node deprovisioning may have failed when attempting to "
|
||||
"remove the RabbitMQ instance {} from the cluster".format(hostname))
|
||||
else:
|
||||
print('Successfully deprovisioned {}'.format(hostname))
|
||||
print('(changed: True)')
|
||||
else:
|
||||
print('No instance found matching name {}'.format(hostname))
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved
|
||||
|
||||
# Borrow from another AWX command
|
||||
from awx.main.management.commands.deprovision_instance import Command as OtherCommand
|
||||
|
||||
# Python
|
||||
import warnings
|
||||
|
||||
|
||||
class Command(OtherCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# TODO: delete this entire file in 3.3
|
||||
warnings.warn('This command is replaced with `deprovision_instance` and will '
|
||||
'be removed in release 3.3.')
|
||||
return super(Command, self).handle(*args, **options)
|
||||
@@ -491,7 +491,7 @@ class Command(BaseCommand):
|
||||
for host in hosts_qs.filter(pk__in=del_pks):
|
||||
host_name = host.name
|
||||
host.delete()
|
||||
logger.info('Deleted host "%s"', host_name)
|
||||
logger.debug('Deleted host "%s"', host_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('host deletions took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -528,7 +528,7 @@ class Command(BaseCommand):
|
||||
group_name = group.name
|
||||
with ignore_inventory_computed_fields():
|
||||
group.delete()
|
||||
logger.info('Group "%s" deleted', group_name)
|
||||
logger.debug('Group "%s" deleted', group_name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group deletions took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -549,7 +549,7 @@ class Command(BaseCommand):
|
||||
db_groups = self.inventory_source.groups
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.info(
|
||||
logger.debug(
|
||||
'Group "%s" from v1 API child group/host connections preserved',
|
||||
db_group.name
|
||||
)
|
||||
@@ -566,8 +566,8 @@ class Command(BaseCommand):
|
||||
for db_child in db_children.filter(pk__in=child_group_pks):
|
||||
group_group_count += 1
|
||||
db_group.children.remove(db_child)
|
||||
logger.info('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
# FIXME: Inventory source group relationships
|
||||
# Delete group/host relationships not present in imported data.
|
||||
db_hosts = db_group.hosts
|
||||
@@ -594,8 +594,8 @@ class Command(BaseCommand):
|
||||
if db_host not in db_group.hosts.all():
|
||||
continue
|
||||
db_group.hosts.remove(db_host)
|
||||
logger.info('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
@@ -614,9 +614,9 @@ class Command(BaseCommand):
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
logger.info('Inventory variables updated from "all" group')
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.info('Inventory variables unmodified')
|
||||
logger.debug('Inventory variables unmodified')
|
||||
|
||||
def _create_update_groups(self):
|
||||
'''
|
||||
@@ -648,11 +648,11 @@ class Command(BaseCommand):
|
||||
group.variables = json.dumps(db_variables)
|
||||
group.save(update_fields=['variables'])
|
||||
if self.overwrite_vars:
|
||||
logger.info('Group "%s" variables replaced', group.name)
|
||||
logger.debug('Group "%s" variables replaced', group.name)
|
||||
else:
|
||||
logger.info('Group "%s" variables updated', group.name)
|
||||
logger.debug('Group "%s" variables updated', group.name)
|
||||
else:
|
||||
logger.info('Group "%s" variables unmodified', group.name)
|
||||
logger.debug('Group "%s" variables unmodified', group.name)
|
||||
existing_group_names.add(group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
for group_name in all_group_names:
|
||||
@@ -666,7 +666,7 @@ class Command(BaseCommand):
|
||||
'description':'imported'
|
||||
}
|
||||
)[0]
|
||||
logger.info('Group "%s" added', group.name)
|
||||
logger.debug('Group "%s" added', group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
self._batch_add_m2m(self.inventory_source.groups, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
@@ -705,24 +705,24 @@ class Command(BaseCommand):
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
if 'name' in update_fields:
|
||||
logger.info('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
logger.debug('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
if 'instance_id' in update_fields:
|
||||
if old_instance_id:
|
||||
logger.info('Host "%s" instance_id updated', mem_host.name)
|
||||
logger.debug('Host "%s" instance_id updated', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" instance_id added', mem_host.name)
|
||||
logger.debug('Host "%s" instance_id added', mem_host.name)
|
||||
if 'variables' in update_fields:
|
||||
if self.overwrite_vars:
|
||||
logger.info('Host "%s" variables replaced', mem_host.name)
|
||||
logger.debug('Host "%s" variables replaced', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" variables updated', mem_host.name)
|
||||
logger.debug('Host "%s" variables updated', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" variables unmodified', mem_host.name)
|
||||
logger.debug('Host "%s" variables unmodified', mem_host.name)
|
||||
if 'enabled' in update_fields:
|
||||
if enabled:
|
||||
logger.info('Host "%s" is now enabled', mem_host.name)
|
||||
logger.debug('Host "%s" is now enabled', mem_host.name)
|
||||
else:
|
||||
logger.info('Host "%s" is now disabled', mem_host.name)
|
||||
logger.debug('Host "%s" is now disabled', mem_host.name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
def _create_update_hosts(self):
|
||||
@@ -796,9 +796,9 @@ class Command(BaseCommand):
|
||||
host_attrs['instance_id'] = instance_id
|
||||
db_host = self.inventory.hosts.update_or_create(name=mem_host_name, defaults=host_attrs)[0]
|
||||
if enabled is False:
|
||||
logger.info('Host "%s" added (disabled)', mem_host_name)
|
||||
logger.debug('Host "%s" added (disabled)', mem_host_name)
|
||||
else:
|
||||
logger.info('Host "%s" added', mem_host_name)
|
||||
logger.debug('Host "%s" added', mem_host_name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
self._batch_add_m2m(self.inventory_source.hosts, flush=True)
|
||||
@@ -827,10 +827,10 @@ class Command(BaseCommand):
|
||||
child_names = all_child_names[offset2:(offset2 + self._batch_size)]
|
||||
db_children_qs = self.inventory.groups.filter(name__in=child_names)
|
||||
for db_child in db_children_qs.filter(children__id=db_group.id):
|
||||
logger.info('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
for db_child in db_children_qs.exclude(children__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.children, db_child)
|
||||
logger.info('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
logger.debug('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.children, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-group updates took %d queries for %d group-group relationships',
|
||||
@@ -854,19 +854,19 @@ class Command(BaseCommand):
|
||||
host_names = all_host_names[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(name__in=host_names)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
all_instance_ids = sorted([h.instance_id for h in mem_group.hosts if h.instance_id])
|
||||
for offset2 in xrange(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(instance_id__in=instance_ids)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.debug('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.hosts, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('Group-host updates took %d queries for %d group-host relationships',
|
||||
@@ -938,7 +938,7 @@ class Command(BaseCommand):
|
||||
self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
|
||||
self.instance_id_var = options.get('instance_id_var', None)
|
||||
|
||||
self.celery_invoked = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
|
||||
self.invoked_from_dispatcher = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
|
||||
|
||||
# Load inventory and related objects from database.
|
||||
if self.inventory_name and self.inventory_id:
|
||||
@@ -1062,7 +1062,7 @@ class Command(BaseCommand):
|
||||
exc = e
|
||||
transaction.rollback()
|
||||
|
||||
if self.celery_invoked is False:
|
||||
if self.invoked_from_dispatcher is False:
|
||||
with ignore_inventory_computed_fields():
|
||||
self.inventory_update = InventoryUpdate.objects.get(pk=self.inventory_update.pk)
|
||||
self.inventory_update.result_traceback = tb
|
||||
|
||||
@@ -6,6 +6,22 @@ from django.core.management.base import BaseCommand
|
||||
import six
|
||||
|
||||
|
||||
class Ungrouped(object):
|
||||
|
||||
name = 'ungrouped'
|
||||
policy_instance_percentage = None
|
||||
policy_instance_minimum = None
|
||||
controller = None
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
return Instance.objects.filter(rampart_groups__isnull=True)
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum([x.capacity for x in self.instances])
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""List instances from the Tower database
|
||||
"""
|
||||
@@ -13,12 +29,28 @@ class Command(BaseCommand):
|
||||
def handle(self, *args, **options):
|
||||
super(Command, self).__init__()
|
||||
|
||||
for instance in Instance.objects.all():
|
||||
print(six.text_type(
|
||||
"hostname: {0.hostname}; created: {0.created}; "
|
||||
"heartbeat: {0.modified}; capacity: {0.capacity}").format(instance))
|
||||
for instance_group in InstanceGroup.objects.all():
|
||||
print(six.text_type(
|
||||
"Instance Group: {0.name}; created: {0.created}; "
|
||||
"capacity: {0.capacity}; members: {1}").format(instance_group,
|
||||
[x.hostname for x in instance_group.instances.all()]))
|
||||
groups = list(InstanceGroup.objects.all())
|
||||
ungrouped = Ungrouped()
|
||||
if len(ungrouped.instances):
|
||||
groups.append(ungrouped)
|
||||
|
||||
for instance_group in groups:
|
||||
fmt = '[{0.name} capacity={0.capacity}'
|
||||
if instance_group.policy_instance_percentage:
|
||||
fmt += ' policy={0.policy_instance_percentage}%'
|
||||
if instance_group.policy_instance_minimum:
|
||||
fmt += ' policy>={0.policy_instance_minimum}'
|
||||
if instance_group.controller:
|
||||
fmt += ' controller={0.controller.name}'
|
||||
print(six.text_type(fmt + ']').format(instance_group))
|
||||
for x in instance_group.instances.all():
|
||||
color = '\033[92m'
|
||||
if x.capacity == 0 or x.enabled is False:
|
||||
color = '\033[91m'
|
||||
fmt = '\t' + color + '{0.hostname} capacity={0.capacity} version={1}'
|
||||
if x.last_isolated_check:
|
||||
fmt += ' last_isolated_check="{0.last_isolated_check:%Y-%m-%d %H:%M:%S}"'
|
||||
if x.capacity:
|
||||
fmt += ' heartbeat="{0.modified:%Y-%m-%d %H:%M:%S}"'
|
||||
print(six.text_type(fmt + '\033[0m').format(x, x.version or '?'))
|
||||
print('')
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved
|
||||
|
||||
# Borrow from another AWX command
|
||||
from awx.main.management.commands.provision_instance import Command as OtherCommand
|
||||
|
||||
# Python
|
||||
import warnings
|
||||
|
||||
|
||||
class Command(OtherCommand):
|
||||
|
||||
def handle(self, *args, **options):
|
||||
# TODO: delete this entire file in 3.3
|
||||
warnings.warn('This command is replaced with `provision_instance` and will '
|
||||
'be removed in release 3.3.')
|
||||
return super(Command, self).handle(*args, **options)
|
||||
@@ -4,6 +4,7 @@
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import random
|
||||
|
||||
from django.utils import timezone
|
||||
from django.core.management.base import BaseCommand
|
||||
@@ -26,7 +27,21 @@ from awx.api.serializers import (
|
||||
)
|
||||
|
||||
|
||||
class ReplayJobEvents():
|
||||
class JobStatusLifeCycle():
|
||||
def emit_job_status(self, job, status):
|
||||
# {"status": "successful", "project_id": 13, "unified_job_id": 659, "group_name": "jobs"}
|
||||
job.websocket_emit_status(status)
|
||||
|
||||
def determine_job_event_finish_status_index(self, job_event_count, random_seed):
|
||||
if random_seed == 0:
|
||||
return job_event_count - 1
|
||||
|
||||
random.seed(random_seed)
|
||||
job_event_index = random.randint(0, job_event_count - 1)
|
||||
return job_event_index
|
||||
|
||||
|
||||
class ReplayJobEvents(JobStatusLifeCycle):
|
||||
|
||||
recording_start = None
|
||||
replay_start = None
|
||||
@@ -76,9 +91,10 @@ class ReplayJobEvents():
|
||||
job_events = job.inventory_update_events.order_by('created')
|
||||
elif type(job) is SystemJob:
|
||||
job_events = job.system_job_events.order_by('created')
|
||||
if job_events.count() == 0:
|
||||
count = job_events.count()
|
||||
if count == 0:
|
||||
raise RuntimeError("No events for job id {}".format(job.id))
|
||||
return job_events
|
||||
return job_events, count
|
||||
|
||||
def get_serializer(self, job):
|
||||
if type(job) is Job:
|
||||
@@ -95,7 +111,7 @@ class ReplayJobEvents():
|
||||
raise RuntimeError("Job is of type {} and replay is not yet supported.".format(type(job)))
|
||||
sys.exit(1)
|
||||
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip=0):
|
||||
def run(self, job_id, speed=1.0, verbosity=0, skip_range=[], random_seed=0, final_status_delay=0, debug=False):
|
||||
stats = {
|
||||
'events_ontime': {
|
||||
'total': 0,
|
||||
@@ -119,17 +135,27 @@ class ReplayJobEvents():
|
||||
}
|
||||
try:
|
||||
job = self.get_job(job_id)
|
||||
job_events = self.get_job_events(job)
|
||||
job_events, job_event_count = self.get_job_events(job)
|
||||
serializer = self.get_serializer(job)
|
||||
except RuntimeError as e:
|
||||
print("{}".format(e.message))
|
||||
sys.exit(1)
|
||||
|
||||
je_previous = None
|
||||
|
||||
self.emit_job_status(job, 'pending')
|
||||
self.emit_job_status(job, 'waiting')
|
||||
self.emit_job_status(job, 'running')
|
||||
|
||||
finish_status_index = self.determine_job_event_finish_status_index(job_event_count, random_seed)
|
||||
|
||||
for n, je_current in enumerate(job_events):
|
||||
if n < skip:
|
||||
if je_current.counter in skip_range:
|
||||
continue
|
||||
|
||||
if debug:
|
||||
raw_input("{} of {}:".format(n, job_event_count))
|
||||
|
||||
if not je_previous:
|
||||
stats['recording_start'] = je_current.created
|
||||
self.start(je_current.created)
|
||||
@@ -146,7 +172,7 @@ class ReplayJobEvents():
|
||||
print("recording: next job in {} seconds".format(recording_diff))
|
||||
if replay_offset >= 0:
|
||||
replay_diff = recording_diff - replay_offset
|
||||
|
||||
|
||||
if replay_diff > 0:
|
||||
stats['events_ontime']['total'] += 1
|
||||
if verbosity >= 3:
|
||||
@@ -167,6 +193,11 @@ class ReplayJobEvents():
|
||||
stats['events_total'] += 1
|
||||
je_previous = je_current
|
||||
|
||||
if n == finish_status_index:
|
||||
if final_status_delay != 0:
|
||||
self.sleep(final_status_delay)
|
||||
self.emit_job_status(job, job.status)
|
||||
|
||||
if stats['events_total'] > 2:
|
||||
stats['replay_end'] = self.now()
|
||||
stats['replay_duration'] = (stats['replay_end'] - stats['replay_start']).total_seconds()
|
||||
@@ -193,19 +224,39 @@ class Command(BaseCommand):
|
||||
|
||||
help = 'Replay job events over websockets ordered by created on date.'
|
||||
|
||||
def _parse_slice_range(self, slice_arg):
|
||||
slice_arg = tuple([int(n) for n in slice_arg.split(':')])
|
||||
slice_obj = slice(*slice_arg)
|
||||
|
||||
start = slice_obj.start or 0
|
||||
stop = slice_obj.stop or -1
|
||||
step = slice_obj.step or 1
|
||||
|
||||
return range(start, stop, step)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--job_id', dest='job_id', type=int, metavar='j',
|
||||
help='Id of the job to replay (job or adhoc)')
|
||||
parser.add_argument('--speed', dest='speed', type=int, metavar='s',
|
||||
parser.add_argument('--speed', dest='speed', type=float, metavar='s',
|
||||
help='Speedup factor.')
|
||||
parser.add_argument('--skip', dest='skip', type=int, metavar='k',
|
||||
help='Number of events to skip.')
|
||||
parser.add_argument('--skip-range', dest='skip_range', type=str, metavar='k',
|
||||
default='0:-1:1', help='Range of events to skip')
|
||||
parser.add_argument('--random-seed', dest='random_seed', type=int, metavar='r',
|
||||
default=0, help='Random number generator seed to use when determining job_event index to emit final job status')
|
||||
parser.add_argument('--final-status-delay', dest='final_status_delay', type=float, metavar='f',
|
||||
default=0, help='Delay between event and final status emit')
|
||||
parser.add_argument('--debug', dest='debug', type=bool, metavar='d',
|
||||
default=False, help='Enable step mode to control emission of job events one at a time.')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
job_id = options.get('job_id')
|
||||
speed = options.get('speed') or 1
|
||||
verbosity = options.get('verbosity') or 0
|
||||
skip = options.get('skip') or 0
|
||||
random_seed = options.get('random_seed')
|
||||
final_status_delay = options.get('final_status_delay')
|
||||
debug = options.get('debug')
|
||||
skip = self._parse_slice_range(options.get('skip_range'))
|
||||
|
||||
replayer = ReplayJobEvents()
|
||||
replayer.run(job_id, speed, verbosity, skip)
|
||||
replayer.run(job_id, speed=speed, verbosity=verbosity, skip_range=skip, random_seed=random_seed,
|
||||
final_status_delay=final_status_delay, debug=debug)
|
||||
|
||||
37
awx/main/management/commands/revoke_oauth2_tokens.py
Normal file
37
awx/main/management/commands/revoke_oauth2_tokens.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
# AWX
|
||||
from awx.main.models.oauth import OAuth2AccessToken
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
def revoke_tokens(token_list):
|
||||
for token in token_list:
|
||||
token.revoke()
|
||||
print('revoked {} {}'.format(token.__class__.__name__, token.token))
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Command that revokes OAuth2 access tokens."""
|
||||
help='Revokes OAuth2 access tokens. Use --all to revoke access and refresh tokens.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--user', dest='user', type=str, help='revoke OAuth2 tokens for a specific username')
|
||||
parser.add_argument('--all', dest='all', action='store_true', help='revoke OAuth2 access tokens and refresh tokens')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
if not options['user']:
|
||||
if options['all']:
|
||||
revoke_tokens(RefreshToken.objects.filter(revoked=None))
|
||||
revoke_tokens(OAuth2AccessToken.objects.all())
|
||||
else:
|
||||
try:
|
||||
user = User.objects.get(username=options['user'])
|
||||
except ObjectDoesNotExist:
|
||||
raise CommandError('A user with that username does not exist.')
|
||||
if options['all']:
|
||||
revoke_tokens(RefreshToken.objects.filter(revoked=None).filter(user=user))
|
||||
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
||||
@@ -1,224 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
from uuid import UUID
|
||||
from multiprocessing import Process
|
||||
from multiprocessing import Queue as MPQueue
|
||||
from Queue import Empty as QueueEmpty
|
||||
from Queue import Full as QueueFull
|
||||
|
||||
from kombu import Connection, Exchange, Queue
|
||||
from kombu.mixins import ConsumerMixin
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
from django.db import DatabaseError, OperationalError
|
||||
from django.db.utils import InterfaceError, InternalError
|
||||
from django.core.cache import cache as django_cache
|
||||
from kombu import Connection, Exchange, Queue
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
signal.signal(signal.SIGTERM, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
self.kill_now = True
|
||||
|
||||
|
||||
class CallbackBrokerWorker(ConsumerMixin):
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
def __init__(self, connection, use_workers=True):
|
||||
self.connection = connection
|
||||
self.worker_queues = []
|
||||
self.total_messages = 0
|
||||
self.init_workers(use_workers)
|
||||
|
||||
def init_workers(self, use_workers=True):
|
||||
def shutdown_handler(active_workers):
|
||||
def _handler(signum, frame):
|
||||
try:
|
||||
for active_worker in active_workers:
|
||||
active_worker.terminate()
|
||||
signal.signal(signum, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
|
||||
except Exception:
|
||||
logger.exception('Error in shutdown_handler')
|
||||
return _handler
|
||||
|
||||
if use_workers:
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
for idx in range(settings.JOB_EVENT_WORKERS):
|
||||
queue_actual = MPQueue(settings.JOB_EVENT_MAX_QUEUE_SIZE)
|
||||
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
|
||||
w.start()
|
||||
if settings.DEBUG:
|
||||
logger.info('Started worker %s' % str(idx))
|
||||
self.worker_queues.append([0, queue_actual, w])
|
||||
elif settings.DEBUG:
|
||||
logger.warn('Started callback receiver (no workers)')
|
||||
|
||||
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
|
||||
|
||||
def get_consumers(self, Consumer, channel):
|
||||
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE)],
|
||||
accept=['json'],
|
||||
callbacks=[self.process_task])]
|
||||
|
||||
def process_task(self, body, message):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
queue = UUID(body['uuid']).int % settings.JOB_EVENT_WORKERS
|
||||
except Exception:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
else:
|
||||
queue = self.total_messages % settings.JOB_EVENT_WORKERS
|
||||
self.write_queue_worker(queue, body)
|
||||
self.total_messages += 1
|
||||
message.ack()
|
||||
|
||||
def write_queue_worker(self, preferred_queue, body):
|
||||
queue_order = sorted(range(settings.JOB_EVENT_WORKERS), cmp=lambda x, y: -1 if x==preferred_queue else 0)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
worker_actual = self.worker_queues[queue_actual]
|
||||
worker_actual[1].put(body, block=True, timeout=5)
|
||||
worker_actual[0] += 1
|
||||
return queue_actual
|
||||
except QueueFull:
|
||||
pass
|
||||
except Exception:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.warn("Could not write to queue %s" % preferred_queue)
|
||||
logger.warn("Detail: {}".format(tb))
|
||||
write_attempt_order.append(preferred_queue)
|
||||
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
|
||||
return None
|
||||
|
||||
def callback_worker(self, queue_actual, idx):
|
||||
signal_handler = WorkerSignalHandler()
|
||||
while not signal_handler.kill_now:
|
||||
try:
|
||||
body = queue_actual.get(block=True, timeout=1)
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker thread, restarting: " + str(e))
|
||||
continue
|
||||
try:
|
||||
|
||||
event_map = {
|
||||
'job_id': JobEvent,
|
||||
'ad_hoc_command_id': AdHocCommandEvent,
|
||||
'project_update_id': ProjectUpdateEvent,
|
||||
'inventory_update_id': InventoryUpdateEvent,
|
||||
'system_job_id': SystemJobEvent,
|
||||
}
|
||||
|
||||
if not any([key in body for key in event_map]):
|
||||
raise Exception('Payload does not have a job identifier')
|
||||
if settings.DEBUG:
|
||||
from pygments import highlight
|
||||
from pygments.lexers import PythonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
from pprint import pformat
|
||||
logger.info('Body: {}'.format(
|
||||
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
|
||||
)[:1024 * 4])
|
||||
|
||||
def _save_event_data():
|
||||
for key, cls in event_map.items():
|
||||
if key in body:
|
||||
cls.create_from_data(**body)
|
||||
|
||||
job_identifier = 'unknown job'
|
||||
for key in event_map.keys():
|
||||
if key in body:
|
||||
job_identifier = body[key]
|
||||
break
|
||||
|
||||
if body.get('event') == 'EOF':
|
||||
try:
|
||||
final_counter = body.get('final_counter', 0)
|
||||
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
|
||||
# EOF events are sent when stdout for the running task is
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
if hasattr(uj, 'send_notification_templates'):
|
||||
retries = 0
|
||||
while retries < 5:
|
||||
if uj.finished:
|
||||
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
|
||||
break
|
||||
else:
|
||||
# wait a few seconds to avoid a race where the
|
||||
# events are persisted _before_ the UJ.status
|
||||
# changes from running -> successful
|
||||
retries += 1
|
||||
time.sleep(1)
|
||||
uj = UnifiedJob.objects.get(pk=job_identifier)
|
||||
except Exception:
|
||||
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
|
||||
continue
|
||||
|
||||
retries = 0
|
||||
while retries <= self.MAX_RETRIES:
|
||||
try:
|
||||
_save_event_data()
|
||||
break
|
||||
except (OperationalError, InterfaceError, InternalError) as e:
|
||||
if retries >= self.MAX_RETRIES:
|
||||
logger.exception('Worker could not re-establish database connectivity, shutting down gracefully: Job {}'.format(job_identifier))
|
||||
os.kill(os.getppid(), signal.SIGINT)
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
except DatabaseError as e:
|
||||
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
|
||||
break
|
||||
except Exception as exc:
|
||||
import traceback
|
||||
tb = traceback.format_exc()
|
||||
logger.error('Callback Task Processor Raised Exception: %r', exc)
|
||||
logger.error('Detail: {}'.format(tb))
|
||||
from awx.main.dispatch.worker import AWXConsumer, CallbackBrokerWorker
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -231,8 +18,22 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
consumer = None
|
||||
try:
|
||||
worker = CallbackBrokerWorker(conn)
|
||||
worker.run()
|
||||
consumer = AWXConsumer(
|
||||
'callback_receiver',
|
||||
conn,
|
||||
CallbackBrokerWorker(),
|
||||
[
|
||||
Queue(
|
||||
settings.CALLBACK_QUEUE,
|
||||
Exchange(settings.CALLBACK_QUEUE, type='direct'),
|
||||
routing_key=settings.CALLBACK_QUEUE
|
||||
)
|
||||
]
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
print('Terminating Callback Receiver')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
128
awx/main/management/commands/run_dispatcher.py
Normal file
128
awx/main/management/commands/run_dispatcher.py
Normal file
@@ -0,0 +1,128 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
import os
|
||||
import logging
|
||||
from multiprocessing import Process
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection, connections
|
||||
from kombu import Connection, Exchange, Queue
|
||||
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def construct_bcast_queue_name(common_name):
|
||||
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the task dispatcher'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--running', dest='running', action='store_true',
|
||||
help='print the UUIDs of any tasked managed by this dispatcher')
|
||||
parser.add_argument('--reload', dest='reload', action='store_true',
|
||||
help=('cause the dispatcher to recycle all of its worker processes;'
|
||||
'running jobs will run to completion first'))
|
||||
|
||||
def beat(self):
|
||||
from celery import Celery
|
||||
from celery.beat import PersistentScheduler
|
||||
from celery.apps import beat
|
||||
|
||||
class AWXScheduler(PersistentScheduler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ppid = os.getppid()
|
||||
super(AWXScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
super(AWXScheduler, self).setup_schedule()
|
||||
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
def tick(self, *args, **kwargs):
|
||||
if os.getppid() != self.ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
raise SystemExit()
|
||||
return super(AWXScheduler, self).tick(*args, **kwargs)
|
||||
|
||||
def apply_async(self, entry, producer=None, advance=True, **kwargs):
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
task = TaskWorker.resolve_callable(entry.task)
|
||||
result, queue = task.apply_async()
|
||||
|
||||
class TaskResult(object):
|
||||
id = result['uuid']
|
||||
|
||||
return TaskResult()
|
||||
|
||||
app = Celery()
|
||||
app.conf.BROKER_URL = settings.BROKER_URL
|
||||
app.conf.CELERY_TASK_RESULT_EXPIRES = False
|
||||
beat.Beat(
|
||||
30,
|
||||
app,
|
||||
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
|
||||
).run()
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print Control('dispatcher').status()
|
||||
return
|
||||
if options.get('running'):
|
||||
print Control('dispatcher').running()
|
||||
return
|
||||
if options.get('reload'):
|
||||
return Control('dispatcher').control({'control': 'reload'})
|
||||
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
beat = Process(target=self.beat)
|
||||
beat.daemon = True
|
||||
beat.start()
|
||||
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
with Connection(settings.BROKER_URL) as conn:
|
||||
try:
|
||||
bcast = 'tower_broadcast_all'
|
||||
queues = [
|
||||
Queue(q, Exchange(q), routing_key=q)
|
||||
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
|
||||
]
|
||||
queues.append(
|
||||
Queue(
|
||||
construct_bcast_queue_name(bcast),
|
||||
exchange=Exchange(bcast, type='fanout'),
|
||||
routing_key=bcast,
|
||||
reply=True
|
||||
)
|
||||
)
|
||||
consumer = AWXConsumer(
|
||||
'dispatcher',
|
||||
conn,
|
||||
TaskWorker(),
|
||||
queues,
|
||||
AutoscalePool(min_workers=4)
|
||||
)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
@@ -1,66 +0,0 @@
|
||||
import datetime
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from celery import Celery
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Watch local celery workers"""
|
||||
help=("Sends a periodic ping to the local celery process over AMQP to ensure "
|
||||
"it's responsive; this command is only intended to run in an environment "
|
||||
"where celeryd is running")
|
||||
|
||||
#
|
||||
# Just because celery is _running_ doesn't mean it's _working_; it's
|
||||
# imperative that celery workers are _actually_ handling AMQP messages on
|
||||
# their appropriate queues for awx to function. Unfortunately, we've been
|
||||
# plagued by a variety of bugs in celery that cause it to hang and become
|
||||
# an unresponsive zombie, such as:
|
||||
#
|
||||
# https://github.com/celery/celery/issues/4185
|
||||
# https://github.com/celery/celery/issues/4457
|
||||
#
|
||||
# The goal of this code is periodically send a broadcast AMQP message to
|
||||
# the celery process on the local host via celery.app.control.ping;
|
||||
# If that _fails_, we attempt to determine the pid of the celery process
|
||||
# and send SIGHUP (which tends to resolve these sorts of issues for us).
|
||||
#
|
||||
|
||||
INTERVAL = 60
|
||||
|
||||
def _log(self, msg):
|
||||
sys.stderr.write(datetime.datetime.utcnow().isoformat())
|
||||
sys.stderr.write(' ')
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.write('\n')
|
||||
|
||||
def handle(self, **options):
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
while True:
|
||||
try:
|
||||
pongs = app.control.ping(['celery@{}'.format(settings.CLUSTER_HOST_ID)], timeout=30)
|
||||
except Exception:
|
||||
pongs = []
|
||||
if not pongs:
|
||||
self._log('celery is not responsive to ping over local AMQP')
|
||||
pid = self.getpid()
|
||||
if pid:
|
||||
self._log('sending SIGHUP to {}'.format(pid))
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
time.sleep(self.INTERVAL)
|
||||
|
||||
def getpid(self):
|
||||
cmd = 'supervisorctl pid tower-processes:awx-celeryd'
|
||||
if os.path.exists('/supervisor_task.conf'):
|
||||
cmd = 'supervisorctl -c /supervisor_task.conf pid tower-processes:celery'
|
||||
try:
|
||||
return int(subprocess.check_output(cmd, shell=True))
|
||||
except Exception:
|
||||
self._log('could not detect celery pid')
|
||||
@@ -1,9 +1,9 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import uuid
|
||||
import logging
|
||||
import threading
|
||||
import uuid
|
||||
import six
|
||||
import time
|
||||
import cProfile
|
||||
@@ -128,8 +128,9 @@ class SessionTimeoutMiddleware(object):
|
||||
def process_response(self, request, response):
|
||||
req_session = getattr(request, 'session', None)
|
||||
if req_session and not req_session.is_empty():
|
||||
request.session.set_expiry(request.session.get_expiry_age())
|
||||
response['Session-Timeout'] = int(settings.SESSION_COOKIE_AGE)
|
||||
expiry = int(settings.SESSION_COOKIE_AGE)
|
||||
request.session.set_expiry(expiry)
|
||||
response['Session-Timeout'] = expiry
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ class Migration(migrations.Migration):
|
||||
('status', models.CharField(default=b'pending', max_length=20, editable=False, choices=[(b'pending', 'Pending'), (b'successful', 'Successful'), (b'failed', 'Failed')])),
|
||||
('error', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('notifications_sent', models.IntegerField(default=0, editable=False)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'rocketchat', 'Rocket.Chat'), (b'irc', 'IRC')])),
|
||||
('recipients', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('subject', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('body', jsonfield.fields.JSONField(default=dict, blank=True)),
|
||||
@@ -174,7 +174,7 @@ class Migration(migrations.Migration):
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('description', models.TextField(default=b'', blank=True)),
|
||||
('name', models.CharField(unique=True, max_length=512)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'rocketchat', 'Rocket.Chat'), (b'irc', 'IRC')])),
|
||||
('notification_configuration', jsonfield.fields.JSONField(default=dict)),
|
||||
('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
|
||||
@@ -27,7 +27,7 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.InventoryUpdate')),
|
||||
('inventory_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_update_events', to='main.InventoryUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
@@ -53,7 +53,7 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.ProjectUpdate')),
|
||||
('project_update', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='project_update_events', to='main.ProjectUpdate')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('pk',),
|
||||
@@ -72,12 +72,24 @@ class Migration(migrations.Migration):
|
||||
('verbosity', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('start_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('end_line', models.PositiveIntegerField(default=0, editable=False)),
|
||||
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='generic_command_events', to='main.SystemJob')),
|
||||
('system_job', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='system_job_events', to='main.SystemJob')),
|
||||
],
|
||||
options={
|
||||
'ordering': ('-pk',),
|
||||
},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='inventoryupdateevent',
|
||||
index_together=set([('inventory_update', 'start_line'), ('inventory_update', 'uuid'), ('inventory_update', 'end_line')]),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='projectupdateevent',
|
||||
index_together=set([('project_update', 'event'), ('project_update', 'end_line'), ('project_update', 'start_line'), ('project_update', 'uuid')]),
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='systemjobevent',
|
||||
index_together=set([('system_job', 'end_line'), ('system_job', 'uuid'), ('system_job', 'start_line')]),
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
name='result_stdout_file',
|
||||
|
||||
@@ -64,12 +64,12 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_access_token',
|
||||
field=models.ManyToManyField(to='main.OAuth2AccessToken', blank=True, related_name='main_o_auth2_accesstoken'),
|
||||
field=models.ManyToManyField(to='main.OAuth2AccessToken', blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_application',
|
||||
field=models.ManyToManyField(to='main.OAuth2Application', blank=True, related_name='main_o_auth2_application'),
|
||||
field=models.ManyToManyField(to='main.OAuth2Application', blank=True),
|
||||
),
|
||||
|
||||
]
|
||||
|
||||
@@ -16,6 +16,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='scope',
|
||||
field=models.TextField(blank=True, help_text="Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']."),
|
||||
field=models.TextField(blank=True, default=b'write', help_text="Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write']."),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -15,6 +15,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='modified',
|
||||
field=models.DateTimeField(editable=False),
|
||||
field=models.DateTimeField(editable=False, auto_now=True),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-08-16 16:46
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0047_v330_activitystream_instance'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credential', u'model_name': 'credential'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credential',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credential', u'model_name': 'credential'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credentialtype', u'model_name': 'credentialtype'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='credentialtype',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'credentialtype', u'model_name': 'credentialtype'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='custominventoryscript',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'custominventoryscript', u'model_name': 'custominventoryscript'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='custominventoryscript',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'custominventoryscript', u'model_name': 'custominventoryscript'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='group',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'group', u'model_name': 'group'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='group',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'group', u'model_name': 'group'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='host',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'host', u'model_name': 'host'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='host',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'host', u'model_name': 'host'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'inventory', u'model_name': 'inventory'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'inventory', u'model_name': 'inventory'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='label',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'label', u'model_name': 'label'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='label',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'label', u'model_name': 'label'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'notificationtemplate', u'model_name': 'notificationtemplate'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'notificationtemplate', u'model_name': 'notificationtemplate'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'organization', u'model_name': 'organization'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='organization',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'organization', u'model_name': 'organization'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'schedule', u'model_name': 'schedule'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='schedule',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'schedule', u'model_name': 'schedule'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='team',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'team', u'model_name': 'team'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='team',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'team', u'model_name': 'team'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjob', u'model_name': 'unifiedjob'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjob', u'model_name': 'unifiedjob'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='created_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjobtemplate', u'model_name': 'unifiedjobtemplate'}(class)s_created+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='modified_by',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{u'app_label': 'main', u'class': 'unifiedjobtemplate', u'model_name': 'unifiedjobtemplate'}(class)s_modified+", to=settings.AUTH_USER_MODEL),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-08-17 16:13
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from decimal import Decimal
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0048_v330_django_created_modified_by_model_name'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='capacity_adjustment',
|
||||
field=models.DecimalField(decimal_places=2, default=Decimal('1'), max_digits=3, validators=[django.core.validators.MinValueValidator(0)]),
|
||||
),
|
||||
]
|
||||
21
awx/main/migrations/0050_v340_drop_celery_tables.py
Normal file
21
awx/main/migrations/0050_v340_drop_celery_tables.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([
|
||||
("DROP TABLE IF EXISTS {} CASCADE;".format(table))
|
||||
])
|
||||
for table in ('celery_taskmeta', 'celery_tasksetmeta', 'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule', 'djcelery_periodictask',
|
||||
'djcelery_periodictasks', 'djcelery_taskstate', 'djcelery_workerstate',
|
||||
'djkombu_message', 'djkombu_queue')
|
||||
]
|
||||
47
awx/main/migrations/0051_v340_job_slicing.py
Normal file
47
awx/main/migrations/0051_v340_job_slicing.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-10-15 16:21
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0050_v340_drop_celery_tables'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='job_slice_count',
|
||||
field=models.PositiveIntegerField(blank=True, default=1, help_text='If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='job_slice_number',
|
||||
field=models.PositiveIntegerField(blank=True, default=0, help_text='If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='job_slice_count',
|
||||
field=models.PositiveIntegerField(blank=True, default=1, help_text='The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='is_sliced_job',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='job_template',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='If automatically created for a sliced job run, the job template the workflow job was created from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='slice_workflow_jobs', to='main.JobTemplate'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='unified_job_template',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjob_unified_jobs', to='main.UnifiedJobTemplate'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-05-18 17:49
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0051_v340_job_slicing'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='project',
|
||||
name='scm_delete_on_next_update',
|
||||
),
|
||||
]
|
||||
37
awx/main/migrations/0053_v340_workflow_inventory.py
Normal file
37
awx/main/migrations/0053_v340_workflow_inventory.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-09-27 19:50
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0052_v340_remove_project_scm_delete_on_next_update'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='char_prompts',
|
||||
field=awx.main.fields.JSONField(blank=True, default={}),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='inventory',
|
||||
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='workflowjobs', to='main.Inventory'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplate',
|
||||
name='ask_inventory_on_launch',
|
||||
field=awx.main.fields.AskForField(default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplate',
|
||||
name='inventory',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='Inventory applied to all job templates in workflow that prompt for inventory.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='workflowjobtemplates', to='main.Inventory'),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/0054_v340_workflow_convergence.py
Normal file
20
awx/main/migrations/0054_v340_workflow_convergence.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.11 on 2018-09-28 14:23
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0053_v340_workflow_inventory'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='do_not_run',
|
||||
field=models.BooleanField(default=False, help_text='Indidcates that a job will not be created when True. Workflow runtime semantics will mark this True if the node is in a path that will decidedly not be ran. A value of False means the node may not run.'),
|
||||
),
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -134,6 +134,9 @@ User.add_to_class('is_in_enterprise_category', user_is_in_enterprise_category)
|
||||
|
||||
|
||||
def o_auth2_application_get_absolute_url(self, request=None):
|
||||
# this page does not exist in v1
|
||||
if request.version == 'v1':
|
||||
return reverse('api:o_auth2_application_detail', kwargs={'pk': self.pk}) # use default version
|
||||
return reverse('api:o_auth2_application_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
@@ -141,6 +144,9 @@ OAuth2Application.add_to_class('get_absolute_url', o_auth2_application_get_absol
|
||||
|
||||
|
||||
def o_auth2_token_get_absolute_url(self, request=None):
|
||||
# this page does not exist in v1
|
||||
if request.version == 'v1':
|
||||
return reverse('api:o_auth2_token_detail', kwargs={'pk': self.pk}) # use default version
|
||||
return reverse('api:o_auth2_token_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
|
||||
@@ -136,8 +136,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin):
|
||||
else:
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return ''
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -383,6 +383,8 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
super(Credential, self).save(*args, **kwargs)
|
||||
|
||||
def encrypt_field(self, field, ask):
|
||||
if not hasattr(self, field):
|
||||
return None
|
||||
encrypted = encrypt_field(self, field, ask=ask)
|
||||
if encrypted:
|
||||
self.inputs[field] = encrypted
|
||||
@@ -439,15 +441,6 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
|
||||
defaults = OrderedDict()
|
||||
|
||||
ENV_BLACKLIST = set((
|
||||
'VIRTUAL_ENV', 'PATH', 'PYTHONPATH', 'PROOT_TMP_DIR', 'JOB_ID',
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
))
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('kind', 'name')
|
||||
@@ -486,6 +479,9 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
# Page does not exist in API v1
|
||||
if request.version == 'v1':
|
||||
return reverse('api:credential_type_detail', kwargs={'pk': self.pk})
|
||||
return reverse('api:credential_type_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@property
|
||||
@@ -589,7 +585,7 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
if not self.injectors:
|
||||
if self.managed_by_tower and credential.kind in dir(builtin_injectors):
|
||||
injected_env = {}
|
||||
getattr(builtin_injectors, credential.kind)(credential, injected_env)
|
||||
getattr(builtin_injectors, credential.kind)(credential, injected_env, private_data_dir)
|
||||
env.update(injected_env)
|
||||
safe_env.update(build_safe_env(injected_env))
|
||||
return
|
||||
@@ -648,8 +644,14 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
file_label = file_label.split('.')[1]
|
||||
setattr(tower_namespace.filename, file_label, path)
|
||||
|
||||
injector_field = self._meta.get_field('injectors')
|
||||
for env_var, tmpl in self.injectors.get('env', {}).items():
|
||||
if env_var.startswith('ANSIBLE_') or env_var in self.ENV_BLACKLIST:
|
||||
try:
|
||||
injector_field.validate_env_var_allowed(env_var)
|
||||
except ValidationError as e:
|
||||
logger.error(six.text_type(
|
||||
'Ignoring prohibited env var {}, reason: {}'
|
||||
).format(env_var, e))
|
||||
continue
|
||||
env[env_var] = Template(tmpl).render(**namespace)
|
||||
safe_env[env_var] = Template(tmpl).render(**safe_namespace)
|
||||
|
||||
@@ -1,20 +1,37 @@
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from awx.main.utils import decrypt_field
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def aws(cred, env):
|
||||
def aws(cred, env, private_data_dir):
|
||||
env['AWS_ACCESS_KEY_ID'] = cred.username
|
||||
env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cred, 'password')
|
||||
if len(cred.security_token) > 0:
|
||||
env['AWS_SECURITY_TOKEN'] = decrypt_field(cred, 'security_token')
|
||||
|
||||
|
||||
def gce(cred, env):
|
||||
def gce(cred, env, private_data_dir):
|
||||
env['GCE_EMAIL'] = cred.username
|
||||
env['GCE_PROJECT'] = cred.project
|
||||
json_cred = {
|
||||
'type': 'service_account',
|
||||
'private_key': decrypt_field(cred, 'ssh_key_data'),
|
||||
'client_email': cred.username,
|
||||
'project_id': cred.project
|
||||
}
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
json.dump(json_cred, f)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = path
|
||||
|
||||
|
||||
def azure_rm(cred, env):
|
||||
def azure_rm(cred, env, private_data_dir):
|
||||
if len(cred.client) and len(cred.tenant):
|
||||
env['AZURE_CLIENT_ID'] = cred.client
|
||||
env['AZURE_SECRET'] = decrypt_field(cred, 'secret')
|
||||
@@ -28,7 +45,7 @@ def azure_rm(cred, env):
|
||||
env['AZURE_CLOUD_ENVIRONMENT'] = cred.inputs['cloud_environment']
|
||||
|
||||
|
||||
def vmware(cred, env):
|
||||
def vmware(cred, env, private_data_dir):
|
||||
env['VMWARE_USER'] = cred.username
|
||||
env['VMWARE_PASSWORD'] = decrypt_field(cred, 'password')
|
||||
env['VMWARE_HOST'] = cred.host
|
||||
|
||||
@@ -35,9 +35,9 @@ def sanitize_event_keys(kwargs, valid_keys):
|
||||
for key in [
|
||||
'play', 'role', 'task', 'playbook'
|
||||
]:
|
||||
if isinstance(kwargs.get(key), six.string_types):
|
||||
if len(kwargs[key]) > 1024:
|
||||
kwargs[key] = Truncator(kwargs[key]).chars(1024)
|
||||
if isinstance(kwargs.get('event_data', {}).get(key), six.string_types):
|
||||
if len(kwargs['event_data'][key]) > 1024:
|
||||
kwargs['event_data'][key] = Truncator(kwargs['event_data'][key]).chars(1024)
|
||||
|
||||
|
||||
def create_host_status_counts(event_data):
|
||||
|
||||
@@ -6,6 +6,7 @@ import random
|
||||
from decimal import Decimal
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.core.validators import MinValueValidator
|
||||
from django.db import models, connection
|
||||
from django.db.models.signals import post_save, post_delete
|
||||
from django.dispatch import receiver
|
||||
@@ -31,7 +32,7 @@ __all__ = ('Instance', 'InstanceGroup', 'JobOrigin', 'TowerScheduleState',)
|
||||
|
||||
|
||||
def validate_queuename(v):
|
||||
# celery and kombu don't play nice with unicode in queue names
|
||||
# kombu doesn't play nice with unicode in queue names
|
||||
if v:
|
||||
try:
|
||||
'{}'.format(v.decode('utf-8'))
|
||||
@@ -81,6 +82,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
default=Decimal(1.0),
|
||||
max_digits=3,
|
||||
decimal_places=2,
|
||||
validators=[MinValueValidator(0)]
|
||||
)
|
||||
enabled = models.BooleanField(
|
||||
default=True
|
||||
|
||||
@@ -9,7 +9,6 @@ import copy
|
||||
from urlparse import urljoin
|
||||
import os.path
|
||||
import six
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -20,6 +19,9 @@ from django.core.exceptions import ValidationError
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
@@ -42,7 +44,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import _inventory_updates, get_ansible_version, region_sorting
|
||||
from awx.main.utils import _inventory_updates, region_sorting
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate',
|
||||
@@ -218,67 +220,89 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
group_children.add(from_group_id)
|
||||
return group_children_map
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False):
|
||||
if show_all:
|
||||
hosts_q = dict()
|
||||
else:
|
||||
hosts_q = dict(enabled=True)
|
||||
@staticmethod
|
||||
def parse_slice_params(slice_str):
|
||||
m = re.match(r"slice(?P<number>\d+)of(?P<step>\d+)", slice_str)
|
||||
if not m:
|
||||
raise ParseError(_('Could not parse subset as slice specification.'))
|
||||
number = int(m.group('number'))
|
||||
step = int(m.group('step'))
|
||||
if number > step:
|
||||
raise ParseError(_('Slice number must be less than total number of slices.'))
|
||||
elif number < 1:
|
||||
raise ParseError(_('Slice number must be 1 or higher.'))
|
||||
return (number, step)
|
||||
|
||||
def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1):
|
||||
hosts_kw = dict()
|
||||
if not show_all:
|
||||
hosts_kw['enabled'] = True
|
||||
fetch_fields = ['name', 'id', 'variables']
|
||||
if towervars:
|
||||
fetch_fields.append('enabled')
|
||||
hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields)
|
||||
if slice_count > 1:
|
||||
offset = slice_number - 1
|
||||
hosts = hosts[offset::slice_count]
|
||||
|
||||
data = dict()
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_hostnames = set(host.name for host in hosts)
|
||||
|
||||
if self.variables_dict:
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['vars'] = self.variables_dict
|
||||
|
||||
if self.kind == 'smart':
|
||||
if len(self.hosts.all()) == 0:
|
||||
return {}
|
||||
else:
|
||||
all_group = data.setdefault('all', dict())
|
||||
smart_hosts_qs = self.hosts.filter(**hosts_q).all()
|
||||
smart_hosts = list(smart_hosts_qs.values_list('name', flat=True))
|
||||
all_group['hosts'] = smart_hosts
|
||||
all_group['hosts'] = [host.name for host in hosts]
|
||||
else:
|
||||
# Add hosts without a group to the all group.
|
||||
groupless_hosts_qs = self.hosts.filter(groups__isnull=True, **hosts_q)
|
||||
groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True))
|
||||
if groupless_hosts:
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['hosts'] = groupless_hosts
|
||||
# Keep track of hosts that are members of a group
|
||||
grouped_hosts = set([])
|
||||
|
||||
# Build in-memory mapping of groups and their hosts.
|
||||
group_hosts_kw = dict(group__inventory_id=self.id, host__inventory_id=self.id)
|
||||
if 'enabled' in hosts_q:
|
||||
group_hosts_kw['host__enabled'] = hosts_q['enabled']
|
||||
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
|
||||
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name')
|
||||
group_hosts_qs = Group.hosts.through.objects.filter(
|
||||
group__inventory_id=self.id,
|
||||
host__inventory_id=self.id
|
||||
).values_list('group_id', 'host_id', 'host__name')
|
||||
group_hosts_map = {}
|
||||
for group_id, host_id, host_name in group_hosts_qs:
|
||||
if host_name not in all_hostnames:
|
||||
continue # host might not be in current shard
|
||||
group_hostnames = group_hosts_map.setdefault(group_id, [])
|
||||
group_hostnames.append(host_name)
|
||||
grouped_hosts.add(host_name)
|
||||
|
||||
# Build in-memory mapping of groups and their children.
|
||||
group_parents_qs = Group.parents.through.objects.filter(
|
||||
from_group__inventory_id=self.id,
|
||||
to_group__inventory_id=self.id,
|
||||
)
|
||||
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name',
|
||||
'to_group_id')
|
||||
).values_list('from_group_id', 'from_group__name', 'to_group_id')
|
||||
group_children_map = {}
|
||||
for from_group_id, from_group_name, to_group_id in group_parents_qs:
|
||||
group_children = group_children_map.setdefault(to_group_id, [])
|
||||
group_children.append(from_group_name)
|
||||
|
||||
# Now use in-memory maps to build up group info.
|
||||
for group in self.groups.all():
|
||||
for group in self.groups.only('name', 'id', 'variables'):
|
||||
group_info = dict()
|
||||
group_info['hosts'] = group_hosts_map.get(group.id, [])
|
||||
group_info['children'] = group_children_map.get(group.id, [])
|
||||
group_info['vars'] = group.variables_dict
|
||||
data[group.name] = group_info
|
||||
|
||||
# Add ungrouped hosts to all group
|
||||
all_group['hosts'] = [host.name for host in hosts if host.name not in grouped_hosts]
|
||||
|
||||
# Remove any empty groups
|
||||
for group_name in list(data.keys()):
|
||||
if group_name == 'all':
|
||||
continue
|
||||
if not (data.get(group_name, {}).get('hosts', []) or data.get(group_name, {}).get('children', [])):
|
||||
data.pop(group_name)
|
||||
|
||||
if hostvars:
|
||||
data.setdefault('_meta', dict())
|
||||
data['_meta'].setdefault('hostvars', dict())
|
||||
for host in self.hosts.filter(**hosts_q):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(remote_tower_enabled=str(host.enabled).lower(),
|
||||
@@ -1262,6 +1286,11 @@ class InventorySourceOptions(BaseModel):
|
||||
'Credentials of type machine, source control, insights and vault are '
|
||||
'disallowed for custom inventory sources.'
|
||||
)
|
||||
elif source == 'scm' and cred and cred.credential_type.kind in ('insights', 'vault'):
|
||||
return _(
|
||||
'Credentials of type insights and vault are '
|
||||
'disallowed for scm inventory sources.'
|
||||
)
|
||||
return None
|
||||
|
||||
def get_inventory_plugin_name(self):
|
||||
@@ -1573,12 +1602,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
"Instead, configure the corresponding source project to update on launch."))
|
||||
return self.update_on_launch
|
||||
|
||||
def clean_overwrite_vars(self): # TODO: remove when Ansible 2.4 becomes unsupported, obviously
|
||||
if self.source == 'scm' and not self.overwrite_vars:
|
||||
if get_ansible_version() < LooseVersion('2.5'):
|
||||
raise ValidationError(_("SCM type sources must set `overwrite_vars` to `true` until Ansible 2.5."))
|
||||
return self.overwrite_vars
|
||||
|
||||
def clean_source_path(self):
|
||||
if self.source != 'scm' and self.source_path:
|
||||
raise ValidationError(_("Cannot set source_path if not SCM type."))
|
||||
@@ -1626,8 +1649,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
null=True
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'inventory_source'
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -34,7 +34,7 @@ from awx.main.models.notifications import (
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.fields import ImplicitRoleField, JSONField, AskForField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
SurveyJobTemplateMixin,
|
||||
@@ -43,7 +43,6 @@ from awx.main.models.mixins import (
|
||||
CustomVirtualEnvMixin,
|
||||
RelatedJobsMixin,
|
||||
)
|
||||
from awx.main.fields import JSONField, AskForField
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
@@ -238,11 +237,11 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
app_label = 'main'
|
||||
ordering = ('name',)
|
||||
|
||||
host_config_key = models.CharField(
|
||||
host_config_key = prevent_search(models.CharField(
|
||||
max_length=1024,
|
||||
blank=True,
|
||||
default='',
|
||||
)
|
||||
))
|
||||
ask_diff_mode_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
@@ -277,6 +276,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
default=False,
|
||||
allows_field='credentials'
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("The number of jobs to slice into at runtime. "
|
||||
"Will cause the Job Template to launch a workflow if value is greater than 1."),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=['project.organization.job_template_admin_role', 'inventory.organization.job_template_admin_role']
|
||||
)
|
||||
@@ -295,7 +301,8 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials']
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials',
|
||||
'job_slice_number', 'job_slice_count']
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -307,7 +314,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
if self.inventory is None and not self.ask_inventory_on_launch:
|
||||
validation_errors['inventory'] = [_("Job Template must provide 'inventory' or allow prompting for it."),]
|
||||
if self.project is None:
|
||||
validation_errors['project'] = [_("Job types 'run' and 'check' must have assigned a project."),]
|
||||
validation_errors['project'] = [_("Job Templates must have a project assigned."),]
|
||||
return validation_errors
|
||||
|
||||
@property
|
||||
@@ -320,6 +327,34 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
'''
|
||||
return self.create_unified_job(**kwargs)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_event = bool(self.job_slice_count > 1 and (not prevent_slicing))
|
||||
if slice_event:
|
||||
# A Slice Job Template will generate a WorkflowJob rather than a Job
|
||||
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobNode
|
||||
kwargs['_unified_job_class'] = WorkflowJobTemplate._get_unified_job_class()
|
||||
kwargs['_parent_field_name'] = "job_template"
|
||||
kwargs.setdefault('_eager_fields', {})
|
||||
kwargs['_eager_fields']['is_sliced_job'] = True
|
||||
elif prevent_slicing:
|
||||
kwargs.setdefault('_eager_fields', {})
|
||||
kwargs['_eager_fields'].setdefault('job_slice_count', 1)
|
||||
job = super(JobTemplate, self).create_unified_job(**kwargs)
|
||||
if slice_event:
|
||||
try:
|
||||
wj_config = job.launch_config
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
wj_config = JobLaunchConfig()
|
||||
actual_inventory = wj_config.inventory if wj_config.inventory else self.inventory
|
||||
for idx in xrange(min(self.job_slice_count,
|
||||
actual_inventory.hosts.count())):
|
||||
create_kwargs = dict(workflow_job=job,
|
||||
unified_job_template=self,
|
||||
ancestor_artifacts=dict(job_slice=idx + 1))
|
||||
WorkflowJobNode.objects.create(**create_kwargs)
|
||||
return job
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
@@ -452,7 +487,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
RelatedJobsMixin
|
||||
'''
|
||||
def _get_related_jobs(self):
|
||||
return Job.objects.filter(job_template=self)
|
||||
return UnifiedJob.objects.filter(unified_job_template=self)
|
||||
|
||||
|
||||
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskManagerJobMixin):
|
||||
@@ -501,10 +536,21 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_('The SCM Refresh task used to make sure the playbooks were available for the job run'),
|
||||
)
|
||||
job_slice_number = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
help_text=_("If part of a sliced job, the ID of the inventory slice operated on. "
|
||||
"If not part of sliced job, parameter is not used."),
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("If ran as part of sliced jobs, the total number of slices. "
|
||||
"If 1, job is not part of a sliced job."),
|
||||
)
|
||||
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'job_template'
|
||||
|
||||
@classmethod
|
||||
@@ -545,6 +591,15 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def event_class(self):
|
||||
return JobEvent
|
||||
|
||||
def copy_unified_job(self, **new_prompts):
|
||||
# Needed for job slice relaunch consistency, do no re-spawn workflow job
|
||||
# target same slice as original job
|
||||
new_prompts['_prevent_slicing'] = True
|
||||
new_prompts.setdefault('_eager_fields', {})
|
||||
new_prompts['_eager_fields']['job_slice_number'] = self.job_slice_number
|
||||
new_prompts['_eager_fields']['job_slice_count'] = self.job_slice_count
|
||||
return super(Job, self).copy_unified_job(**new_prompts)
|
||||
|
||||
@property
|
||||
def ask_diff_mode_on_launch(self):
|
||||
if self.job_template is not None:
|
||||
@@ -638,6 +693,9 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
count_hosts = 2
|
||||
else:
|
||||
count_hosts = Host.objects.filter(inventory__jobs__pk=self.pk).count()
|
||||
if self.job_slice_count > 1:
|
||||
# Integer division intentional
|
||||
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) / self.job_slice_count
|
||||
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
|
||||
|
||||
@property
|
||||
@@ -836,19 +894,19 @@ class NullablePromptPsuedoField(object):
|
||||
instance.char_prompts[self.field_name] = value
|
||||
|
||||
|
||||
class LaunchTimeConfig(BaseModel):
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
'''
|
||||
Common model for all objects that save details of a saved launch config
|
||||
WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
|
||||
Needed as separate class from LaunchTimeConfig because some models
|
||||
use `extra_data` and some use `extra_vars`. We cannot change the API,
|
||||
so we force fake it in the model definitions
|
||||
- model defines extra_vars - use this class
|
||||
- model needs to use extra data - use LaunchTimeConfig
|
||||
Use this for models which are SurveyMixins and UnifiedJobs or Templates
|
||||
'''
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
# Prompting-related fields that have to be handled as special cases
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
related_name='%(class)ss'
|
||||
)
|
||||
inventory = models.ForeignKey(
|
||||
'Inventory',
|
||||
related_name='%(class)ss',
|
||||
@@ -857,15 +915,6 @@ class LaunchTimeConfig(BaseModel):
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
extra_data = JSONField(
|
||||
blank=True,
|
||||
default={}
|
||||
)
|
||||
survey_passwords = prevent_search(JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
editable=False,
|
||||
))
|
||||
# All standard fields are stored in this dictionary field
|
||||
# This is a solution to the nullable CharField problem, specific to prompting
|
||||
char_prompts = JSONField(
|
||||
@@ -875,6 +924,7 @@ class LaunchTimeConfig(BaseModel):
|
||||
|
||||
def prompts_dict(self, display=False):
|
||||
data = {}
|
||||
# Some types may have different prompts, but always subset of JT prompts
|
||||
for prompt_name in JobTemplate.get_ask_mapping().keys():
|
||||
try:
|
||||
field = self._meta.get_field(prompt_name)
|
||||
@@ -887,11 +937,11 @@ class LaunchTimeConfig(BaseModel):
|
||||
if len(prompt_val) > 0:
|
||||
data[prompt_name] = prompt_val
|
||||
elif prompt_name == 'extra_vars':
|
||||
if self.extra_data:
|
||||
if self.extra_vars:
|
||||
if display:
|
||||
data[prompt_name] = self.display_extra_data()
|
||||
data[prompt_name] = self.display_extra_vars()
|
||||
else:
|
||||
data[prompt_name] = self.extra_data
|
||||
data[prompt_name] = self.extra_vars
|
||||
if self.survey_passwords and not display:
|
||||
data['survey_passwords'] = self.survey_passwords
|
||||
else:
|
||||
@@ -900,18 +950,21 @@ class LaunchTimeConfig(BaseModel):
|
||||
data[prompt_name] = prompt_val
|
||||
return data
|
||||
|
||||
def display_extra_data(self):
|
||||
def display_extra_vars(self):
|
||||
'''
|
||||
Hides fields marked as passwords in survey.
|
||||
'''
|
||||
if self.survey_passwords:
|
||||
extra_data = parse_yaml_or_json(self.extra_data).copy()
|
||||
extra_vars = parse_yaml_or_json(self.extra_vars).copy()
|
||||
for key, value in self.survey_passwords.items():
|
||||
if key in extra_data:
|
||||
extra_data[key] = value
|
||||
return extra_data
|
||||
if key in extra_vars:
|
||||
extra_vars[key] = value
|
||||
return extra_vars
|
||||
else:
|
||||
return self.extra_data
|
||||
return self.extra_vars
|
||||
|
||||
def display_extra_data(self):
|
||||
return self.display_extra_vars()
|
||||
|
||||
@property
|
||||
def _credential(self):
|
||||
@@ -935,7 +988,42 @@ class LaunchTimeConfig(BaseModel):
|
||||
return None
|
||||
|
||||
|
||||
class LaunchTimeConfig(LaunchTimeConfigBase):
|
||||
'''
|
||||
Common model for all objects that save details of a saved launch config
|
||||
WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
|
||||
'''
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
# Special case prompting fields, even more special than the other ones
|
||||
extra_data = JSONField(
|
||||
blank=True,
|
||||
default={}
|
||||
)
|
||||
survey_passwords = prevent_search(JSONField(
|
||||
blank=True,
|
||||
default={},
|
||||
editable=False,
|
||||
))
|
||||
# Credentials needed for non-unified job / unified JT models
|
||||
credentials = models.ManyToManyField(
|
||||
'Credential',
|
||||
related_name='%(class)ss'
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_vars(self):
|
||||
return self.extra_data
|
||||
|
||||
@extra_vars.setter
|
||||
def extra_vars(self, extra_vars):
|
||||
self.extra_data = extra_vars
|
||||
|
||||
|
||||
for field_name in JobTemplate.get_ask_mapping().keys():
|
||||
if field_name == 'extra_vars':
|
||||
continue
|
||||
try:
|
||||
LaunchTimeConfig._meta.get_field(field_name)
|
||||
except FieldDoesNotExist:
|
||||
|
||||
@@ -301,14 +301,22 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
accepted.update(extra_vars)
|
||||
extra_vars = {}
|
||||
|
||||
if extra_vars:
|
||||
# Prune the prompted variables for those identical to template
|
||||
tmp_extra_vars = self.extra_vars_dict
|
||||
for key in (set(tmp_extra_vars.keys()) & set(extra_vars.keys())):
|
||||
if tmp_extra_vars[key] == extra_vars[key]:
|
||||
extra_vars.pop(key)
|
||||
|
||||
if extra_vars:
|
||||
# Leftover extra_vars, keys provided that are not allowed
|
||||
rejected.update(extra_vars)
|
||||
# ignored variables does not block manual launch
|
||||
if 'prompts' not in _exclude_errors:
|
||||
errors['extra_vars'] = [_('Variables {list_of_keys} are not allowed on launch. Check the Prompt on Launch setting '+
|
||||
'on the Job Template to include Extra Variables.').format(
|
||||
list_of_keys=', '.join(extra_vars.keys()))]
|
||||
'on the {model_name} to include Extra Variables.').format(
|
||||
list_of_keys=six.text_type(', ').join([six.text_type(key) for key in extra_vars.keys()]),
|
||||
model_name=self._meta.verbose_name.title())]
|
||||
|
||||
return (accepted, rejected, errors)
|
||||
|
||||
|
||||
@@ -254,10 +254,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
on_delete=models.CASCADE,
|
||||
related_name='projects',
|
||||
)
|
||||
scm_delete_on_next_update = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
)
|
||||
scm_update_on_launch = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Update the project when a job is launched that uses the project.'),
|
||||
@@ -331,13 +327,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
skip_update = bool(kwargs.pop('skip_update', False))
|
||||
# Check if scm_type or scm_url changes.
|
||||
if self.pk:
|
||||
project_before = self.__class__.objects.get(pk=self.pk)
|
||||
if project_before.scm_type != self.scm_type or project_before.scm_url != self.scm_url:
|
||||
self.scm_delete_on_next_update = True
|
||||
if 'scm_delete_on_next_update' not in update_fields:
|
||||
update_fields.append('scm_delete_on_next_update')
|
||||
# Create auto-generated local path if project uses SCM.
|
||||
if self.pk and self.scm_type and not self.local_path.startswith('_'):
|
||||
slug_name = slugify(six.text_type(self.name)).replace(u'-', u'_')
|
||||
@@ -397,19 +386,6 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
def _can_update(self):
|
||||
return bool(self.scm_type)
|
||||
|
||||
def _update_unified_job_kwargs(self, create_kwargs, kwargs):
|
||||
'''
|
||||
:param create_kwargs: key-worded arguments to be updated and later used for creating unified job.
|
||||
:type create_kwargs: dict
|
||||
:param kwargs: request parameters used to override unified job template fields with runtime values.
|
||||
:type kwargs: dict
|
||||
:return: modified create_kwargs.
|
||||
:rtype: dict
|
||||
'''
|
||||
if self.scm_delete_on_next_update:
|
||||
create_kwargs['scm_delete_on_update'] = True
|
||||
return create_kwargs
|
||||
|
||||
def create_project_update(self, **kwargs):
|
||||
return self.create_unified_job(**kwargs)
|
||||
|
||||
@@ -466,6 +442,14 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
models.Q(ProjectUpdate___project=self)
|
||||
)
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
path_to_delete = self.get_project_path(check_if_exists=False)
|
||||
r = super(Project, self).delete(*args, **kwargs)
|
||||
if self.scm_type and path_to_delete: # non-manual, concrete path
|
||||
from awx.main.tasks import delete_project_files
|
||||
delete_project_files.delay(path_to_delete)
|
||||
return r
|
||||
|
||||
|
||||
class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManagerProjectUpdateMixin):
|
||||
'''
|
||||
@@ -488,10 +472,24 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
default='check',
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'project'
|
||||
|
||||
def _update_parent_instance(self):
|
||||
if not self.project:
|
||||
return # no parent instance to update
|
||||
if self.job_type == PERM_INVENTORY_DEPLOY:
|
||||
# Do not update project status if this is sync job
|
||||
# unless no other updates have happened or started
|
||||
first_update = False
|
||||
if self.project.status == 'never updated' and self.status == 'running':
|
||||
first_update = True
|
||||
elif self.project.current_job == self:
|
||||
first_update = True
|
||||
if not first_update:
|
||||
return
|
||||
return super(ProjectUpdate, self)._update_parent_instance()
|
||||
|
||||
@classmethod
|
||||
def _get_task_class(cls):
|
||||
from awx.main.tasks import RunProjectUpdate
|
||||
@@ -542,17 +540,6 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin, TaskManage
|
||||
def get_ui_url(self):
|
||||
return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/jobs/project/{}".format(self.pk))
|
||||
|
||||
def _update_parent_instance(self):
|
||||
parent_instance = self._get_parent_instance()
|
||||
if parent_instance and self.job_type == 'check':
|
||||
update_fields = self._update_parent_instance_no_save(parent_instance)
|
||||
if self.status in ('successful', 'failed', 'error', 'canceled'):
|
||||
if not self.failed and parent_instance.scm_delete_on_next_update:
|
||||
parent_instance.scm_delete_on_next_update = False
|
||||
if 'scm_delete_on_next_update' not in update_fields:
|
||||
update_fields.append('scm_delete_on_next_update')
|
||||
parent_instance.save(update_fields=update_fields)
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
res = super(ProjectUpdate, self).cancel(job_explanation=job_explanation, is_chain=is_chain)
|
||||
if res and self.launch_type != 'sync':
|
||||
|
||||
@@ -153,7 +153,7 @@ class Schedule(CommonModel, LaunchTimeConfig):
|
||||
if 'until=' in rrule.lower():
|
||||
# if DTSTART;TZID= is used, coerce "naive" UNTIL values
|
||||
# to the proper UTC date
|
||||
match_until = re.match(".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
match_until = re.match(r".*?(?P<until>UNTIL\=[0-9]+T[0-9]+)(?P<utcflag>Z?)", rrule)
|
||||
if not len(match_until.group('utcflag')):
|
||||
# rrule = DTSTART;TZID=America/New_York:20200601T120000 RRULE:...;UNTIL=20200601T170000
|
||||
|
||||
|
||||
@@ -7,9 +7,11 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
from collections import OrderedDict
|
||||
import six
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -27,18 +29,22 @@ from rest_framework.exceptions import ParseError
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
# Django-Celery
|
||||
from djcelery.models import TaskMeta
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import * # noqa
|
||||
from awx.main.models.base import (
|
||||
CommonModelNameNotUnique,
|
||||
PasswordFieldsModel,
|
||||
NotificationFieldsModel,
|
||||
prevent_search
|
||||
)
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin
|
||||
from awx.main.utils import (
|
||||
encrypt_dict, decrypt_field, _inventory_updates,
|
||||
copy_model_by_class, copy_m2m_relationships,
|
||||
get_type_for_model, parse_yaml_or_json, getattr_dne
|
||||
)
|
||||
from awx.main.utils import polymorphic
|
||||
from awx.main.utils import polymorphic, schedule_task_manager
|
||||
from awx.main.constants import ACTIVE_STATES, CAN_CANCEL
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
@@ -309,19 +315,13 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
'''
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
'''
|
||||
Return field names that should be copied from template to new job.
|
||||
'''
|
||||
raise NotImplementedError # Implement in subclass.
|
||||
|
||||
@property
|
||||
def notification_templates(self):
|
||||
'''
|
||||
Return notification_templates relevant to this Unified Job Template
|
||||
'''
|
||||
# NOTE: Derived classes should implement
|
||||
from awx.main.models.notifications import NotificationTemplate
|
||||
return NotificationTemplate.objects.none()
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
@@ -338,19 +338,33 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
|
||||
unified_job_class = self._get_unified_job_class()
|
||||
fields = self._get_unified_job_field_names()
|
||||
unallowed_fields = set(kwargs.keys()) - set(fields)
|
||||
if unallowed_fields:
|
||||
logger.warn('Fields {} are not allowed as overrides.'.format(unallowed_fields))
|
||||
map(kwargs.pop, unallowed_fields)
|
||||
parent_field_name = None
|
||||
if "_unified_job_class" in kwargs:
|
||||
# Special case where spawned job is different type than usual
|
||||
# Only used for slice jobs
|
||||
unified_job_class = kwargs.pop("_unified_job_class")
|
||||
fields = unified_job_class._get_unified_job_field_names() & fields
|
||||
parent_field_name = kwargs.pop('_parent_field_name')
|
||||
|
||||
unified_job = copy_model_by_class(self, unified_job_class, fields, kwargs)
|
||||
unallowed_fields = set(kwargs.keys()) - set(fields)
|
||||
validated_kwargs = kwargs.copy()
|
||||
if unallowed_fields:
|
||||
if parent_field_name is None:
|
||||
logger.warn(six.text_type('Fields {} are not allowed as overrides to spawn from {}.').format(
|
||||
six.text_type(', ').join(unallowed_fields), self
|
||||
))
|
||||
map(validated_kwargs.pop, unallowed_fields)
|
||||
|
||||
unified_job = copy_model_by_class(self, unified_job_class, fields, validated_kwargs)
|
||||
|
||||
if eager_fields:
|
||||
for fd, val in eager_fields.items():
|
||||
setattr(unified_job, fd, val)
|
||||
|
||||
# Set the unified job template back-link on the job
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
# NOTE: slice workflow jobs _get_parent_field_name method
|
||||
# is not correct until this is set
|
||||
if not parent_field_name:
|
||||
parent_field_name = unified_job._get_parent_field_name()
|
||||
setattr(unified_job, parent_field_name, self)
|
||||
|
||||
# For JobTemplate-based jobs with surveys, add passwords to list for perma-redaction
|
||||
@@ -361,27 +375,37 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
unified_job.survey_passwords = new_job_passwords
|
||||
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
|
||||
|
||||
unified_job.save()
|
||||
from awx.main.signals import disable_activity_stream, activity_stream_create
|
||||
with disable_activity_stream():
|
||||
# Don't emit the activity stream record here for creation,
|
||||
# because we haven't attached important M2M relations yet, like
|
||||
# credentials and labels
|
||||
unified_job.save()
|
||||
|
||||
# Labels and credentials copied here
|
||||
if kwargs.get('credentials'):
|
||||
if validated_kwargs.get('credentials'):
|
||||
Credential = UnifiedJob._meta.get_field('credentials').related_model
|
||||
cred_dict = Credential.unique_dict(self.credentials.all())
|
||||
prompted_dict = Credential.unique_dict(kwargs['credentials'])
|
||||
prompted_dict = Credential.unique_dict(validated_kwargs['credentials'])
|
||||
# combine prompted credentials with JT
|
||||
cred_dict.update(prompted_dict)
|
||||
kwargs['credentials'] = [cred for cred in cred_dict.values()]
|
||||
validated_kwargs['credentials'] = [cred for cred in cred_dict.values()]
|
||||
kwargs['credentials'] = validated_kwargs['credentials']
|
||||
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields, kwargs=kwargs)
|
||||
copy_m2m_relationships(self, unified_job, fields, kwargs=validated_kwargs)
|
||||
|
||||
if 'extra_vars' in kwargs:
|
||||
unified_job.handle_extra_data(kwargs['extra_vars'])
|
||||
if 'extra_vars' in validated_kwargs:
|
||||
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
||||
|
||||
if not getattr(self, '_deprecated_credential_launch', False):
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs)
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
|
||||
# manually issue the create activity stream entry _after_ M2M relations
|
||||
# have been associated to the UJ
|
||||
if unified_job.__class__ in activity_stream_registrar.models:
|
||||
activity_stream_create(None, unified_job, True)
|
||||
|
||||
return unified_job
|
||||
|
||||
@@ -546,7 +570,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
default=None,
|
||||
editable=False,
|
||||
related_name='%(class)s_unified_jobs',
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
launch_type = models.CharField(
|
||||
max_length=20,
|
||||
@@ -673,9 +697,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
real_instance = self.get_real_instance()
|
||||
if real_instance != self:
|
||||
return real_instance.get_absolute_url(request=request)
|
||||
RealClass = self.get_real_instance_class()
|
||||
if RealClass != UnifiedJob:
|
||||
return RealClass.get_absolute_url(RealClass(pk=self.pk), request=request)
|
||||
else:
|
||||
return ''
|
||||
|
||||
@@ -694,8 +718,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
def supports_isolation(cls):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
return 'unified_job_template' # Override in subclasses.
|
||||
|
||||
@classmethod
|
||||
@@ -828,7 +851,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
'''
|
||||
unified_job_class = self.__class__
|
||||
unified_jt_class = self._get_unified_job_template_class()
|
||||
parent_field_name = unified_job_class._get_parent_field_name()
|
||||
parent_field_name = self._get_parent_field_name()
|
||||
fields = unified_jt_class._get_unified_job_field_names() | set([parent_field_name])
|
||||
|
||||
create_data = {}
|
||||
@@ -847,10 +870,10 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
setattr(unified_job, fd, val)
|
||||
unified_job.save()
|
||||
|
||||
# Labels copied here
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields)
|
||||
# Labels copied here
|
||||
from awx.main.signals import disable_activity_stream
|
||||
with disable_activity_stream():
|
||||
copy_m2m_relationships(self, unified_job, fields)
|
||||
|
||||
return unified_job
|
||||
|
||||
@@ -866,16 +889,18 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
except JobLaunchConfig.DoesNotExist:
|
||||
return None
|
||||
|
||||
def create_config_from_prompts(self, kwargs):
|
||||
def create_config_from_prompts(self, kwargs, parent=None):
|
||||
'''
|
||||
Create a launch configuration entry for this job, given prompts
|
||||
returns None if it can not be created
|
||||
'''
|
||||
if self.unified_job_template is None:
|
||||
return None
|
||||
JobLaunchConfig = self._meta.get_field('launch_config').related_model
|
||||
config = JobLaunchConfig(job=self)
|
||||
valid_fields = self.unified_job_template.get_ask_mapping().keys()
|
||||
if parent is None:
|
||||
parent = getattr(self, self._get_parent_field_name())
|
||||
if parent is None:
|
||||
return
|
||||
valid_fields = parent.get_ask_mapping().keys()
|
||||
# Special cases allowed for workflows
|
||||
if hasattr(self, 'extra_vars'):
|
||||
valid_fields.extend(['survey_passwords', 'extra_vars'])
|
||||
@@ -892,8 +917,9 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
setattr(config, key, value)
|
||||
config.save()
|
||||
|
||||
job_creds = (set(kwargs.get('credentials', [])) -
|
||||
set(self.unified_job_template.credentials.all()))
|
||||
job_creds = set(kwargs.get('credentials', []))
|
||||
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
|
||||
job_creds = job_creds - set(parent.credentials.all())
|
||||
if job_creds:
|
||||
config.credentials.add(*job_creds)
|
||||
return config
|
||||
@@ -1112,14 +1138,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
pass
|
||||
return None
|
||||
|
||||
@property
|
||||
def celery_task(self):
|
||||
try:
|
||||
if self.celery_task_id:
|
||||
return TaskMeta.objects.get(task_id=self.celery_task_id)
|
||||
except TaskMeta.DoesNotExist:
|
||||
pass
|
||||
|
||||
def get_passwords_needed_to_start(self):
|
||||
return []
|
||||
|
||||
@@ -1224,29 +1242,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
|
||||
return (True, opts)
|
||||
|
||||
def start_celery_task(self, opts, error_callback, success_callback, queue):
|
||||
kwargs = {
|
||||
'link_error': error_callback,
|
||||
'link': success_callback,
|
||||
'queue': None,
|
||||
'task_id': None,
|
||||
}
|
||||
if not self.celery_task_id:
|
||||
raise RuntimeError("Expected celery_task_id to be set on model.")
|
||||
kwargs['task_id'] = self.celery_task_id
|
||||
task_class = self._get_task_class()
|
||||
kwargs['queue'] = queue
|
||||
task_class().apply_async([self.pk], opts, **kwargs)
|
||||
|
||||
def start(self, error_callback, success_callback, **kwargs):
|
||||
'''
|
||||
Start the task running via Celery.
|
||||
'''
|
||||
(res, opts) = self.pre_start(**kwargs)
|
||||
if res:
|
||||
self.start_celery_task(opts, error_callback, success_callback)
|
||||
return res
|
||||
|
||||
def signal_start(self, **kwargs):
|
||||
"""Notify the task runner system to begin work on this task."""
|
||||
|
||||
@@ -1266,8 +1261,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
self.update_fields(start_args=json.dumps(kwargs), status='pending')
|
||||
self.websocket_emit_status("pending")
|
||||
|
||||
from awx.main.scheduler.tasks import run_job_launch
|
||||
connection.on_commit(lambda: run_job_launch.delay(self.id))
|
||||
schedule_task_manager()
|
||||
|
||||
# Each type of unified job has a different Task class; get the
|
||||
# appropirate one.
|
||||
@@ -1282,46 +1276,35 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
# Done!
|
||||
return True
|
||||
|
||||
|
||||
@property
|
||||
def actually_running(self):
|
||||
# returns True if the job is running in the appropriate dispatcher process
|
||||
running = False
|
||||
if all([
|
||||
self.status == 'running',
|
||||
self.celery_task_id,
|
||||
self.execution_node
|
||||
]):
|
||||
# If the job is marked as running, but the dispatcher
|
||||
# doesn't know about it (or the dispatcher doesn't reply),
|
||||
# then cancel the job
|
||||
timeout = 5
|
||||
try:
|
||||
running = self.celery_task_id in ControlDispatcher(
|
||||
'dispatcher', self.execution_node
|
||||
).running(timeout=timeout)
|
||||
except socket.timeout:
|
||||
logger.error(six.text_type(
|
||||
'could not reach dispatcher on {} within {}s'
|
||||
).format(self.execution_node, timeout))
|
||||
running = False
|
||||
return running
|
||||
|
||||
@property
|
||||
def can_cancel(self):
|
||||
return bool(self.status in CAN_CANCEL)
|
||||
|
||||
def _force_cancel(self):
|
||||
# Update the status to 'canceled' if we can detect that the job
|
||||
# really isn't running (i.e. celery has crashed or forcefully
|
||||
# killed the worker).
|
||||
task_statuses = ('STARTED', 'SUCCESS', 'FAILED', 'RETRY', 'REVOKED')
|
||||
try:
|
||||
taskmeta = self.celery_task
|
||||
if not taskmeta or taskmeta.status not in task_statuses:
|
||||
return
|
||||
from celery import current_app
|
||||
i = current_app.control.inspect()
|
||||
for v in (i.active() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.reserved() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.revoked() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
for v in (i.scheduled() or {}).values():
|
||||
if taskmeta.task_id in [x['id'] for x in v]:
|
||||
return
|
||||
instance = self.__class__.objects.get(pk=self.pk)
|
||||
if instance.can_cancel:
|
||||
instance.status = 'canceled'
|
||||
update_fields = ['status']
|
||||
if not instance.job_explanation:
|
||||
instance.job_explanation = 'Forced cancel'
|
||||
update_fields.append('job_explanation')
|
||||
instance.save(update_fields=update_fields)
|
||||
self.websocket_emit_status("canceled")
|
||||
except Exception: # FIXME: Log this exception!
|
||||
if settings.DEBUG:
|
||||
raise
|
||||
|
||||
def _build_job_explanation(self):
|
||||
if not self.job_explanation:
|
||||
return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
|
||||
@@ -1340,13 +1323,14 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
if self.status in ('pending', 'waiting', 'new'):
|
||||
self.status = 'canceled'
|
||||
cancel_fields.append('status')
|
||||
if self.status == 'running' and not self.actually_running:
|
||||
self.status = 'canceled'
|
||||
cancel_fields.append('status')
|
||||
if job_explanation is not None:
|
||||
self.job_explanation = job_explanation
|
||||
cancel_fields.append('job_explanation')
|
||||
self.save(update_fields=cancel_fields)
|
||||
self.websocket_emit_status("canceled")
|
||||
if settings.BROKER_URL.startswith('amqp://'):
|
||||
self._force_cancel()
|
||||
return self.cancel_flag
|
||||
|
||||
@property
|
||||
@@ -1402,7 +1386,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
r['{}_user_last_name'.format(name)] = created_by.last_name
|
||||
return r
|
||||
|
||||
def get_celery_queue_name(self):
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or settings.CELERY_DEFAULT_QUEUE
|
||||
|
||||
def is_isolated(self):
|
||||
|
||||
@@ -9,6 +9,7 @@ import logging
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
#from django import settings as tower_settings
|
||||
|
||||
# AWX
|
||||
@@ -23,14 +24,14 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
)
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.fields import ImplicitRoleField, AskForField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
SurveyJobTemplateMixin,
|
||||
SurveyJobMixin,
|
||||
RelatedJobsMixin,
|
||||
)
|
||||
from awx.main.models.jobs import LaunchTimeConfig
|
||||
from awx.main.models.jobs import LaunchTimeConfigBase, LaunchTimeConfig, JobTemplate
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.redact import REPLACE_STR
|
||||
from awx.main.fields import JSONField
|
||||
@@ -81,7 +82,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
success_parents = getattr(self, '%ss_success' % self.__class__.__name__.lower()).all()
|
||||
failure_parents = getattr(self, '%ss_failure' % self.__class__.__name__.lower()).all()
|
||||
always_parents = getattr(self, '%ss_always' % self.__class__.__name__.lower()).all()
|
||||
return success_parents | failure_parents | always_parents
|
||||
return (success_parents | failure_parents | always_parents).order_by('id')
|
||||
|
||||
@classmethod
|
||||
def _get_workflow_job_field_names(cls):
|
||||
@@ -183,10 +184,26 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
default={},
|
||||
editable=False,
|
||||
)
|
||||
do_not_run = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_("Indidcates that a job will not be created when True. Workflow runtime "
|
||||
"semantics will mark this True if the node is in a path that will "
|
||||
"decidedly not be ran. A value of False means the node may not run."),
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def prompts_dict(self, *args, **kwargs):
|
||||
r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
|
||||
# Explanation - WFJT extra_vars still break pattern, so they are not
|
||||
# put through prompts processing, but inventory is only accepted
|
||||
# if JT prompts for it, so it goes through this mechanism
|
||||
if self.workflow_job and self.workflow_job.inventory_id:
|
||||
# workflow job inventory takes precedence
|
||||
r['inventory'] = self.workflow_job.inventory
|
||||
return r
|
||||
|
||||
def get_job_kwargs(self):
|
||||
'''
|
||||
In advance of creating a new unified job as part of a workflow,
|
||||
@@ -198,7 +215,14 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
data = {}
|
||||
ujt_obj = self.unified_job_template
|
||||
if ujt_obj is not None:
|
||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**self.prompts_dict())
|
||||
# MERGE note: move this to prompts_dict method on node when merging
|
||||
# with the workflow inventory branch
|
||||
prompts_data = self.prompts_dict()
|
||||
if isinstance(ujt_obj, WorkflowJobTemplate):
|
||||
if self.workflow_job.extra_vars:
|
||||
prompts_data.setdefault('extra_vars', {})
|
||||
prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict)
|
||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data)
|
||||
if errors:
|
||||
logger.info(_('Bad launch configuration starting template {template_pk} as part of '
|
||||
'workflow {workflow_pk}. Errors:\n{error_text}').format(
|
||||
@@ -206,13 +230,24 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
workflow_pk=self.pk,
|
||||
error_text=errors))
|
||||
data.update(accepted_fields) # missing fields are handled in the scheduler
|
||||
try:
|
||||
# config saved on the workflow job itself
|
||||
wj_config = self.workflow_job.launch_config
|
||||
except ObjectDoesNotExist:
|
||||
wj_config = None
|
||||
if wj_config:
|
||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
|
||||
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
|
||||
data.update(accepted_fields)
|
||||
# build ancestor artifacts, save them to node model for later
|
||||
aa_dict = {}
|
||||
is_root_node = True
|
||||
for parent_node in self.get_parent_nodes():
|
||||
is_root_node = False
|
||||
aa_dict.update(parent_node.ancestor_artifacts)
|
||||
if parent_node.job and hasattr(parent_node.job, 'artifacts'):
|
||||
aa_dict.update(parent_node.job.artifacts)
|
||||
if aa_dict:
|
||||
if aa_dict and not is_root_node:
|
||||
self.ancestor_artifacts = aa_dict
|
||||
self.save(update_fields=['ancestor_artifacts'])
|
||||
# process password list
|
||||
@@ -229,17 +264,25 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
data['survey_passwords'] = password_dict
|
||||
# process extra_vars
|
||||
extra_vars = data.get('extra_vars', {})
|
||||
if aa_dict:
|
||||
functional_aa_dict = copy(aa_dict)
|
||||
functional_aa_dict.pop('_ansible_no_log', None)
|
||||
extra_vars.update(functional_aa_dict)
|
||||
# Workflow Job extra_vars higher precedence than ancestor artifacts
|
||||
if self.workflow_job and self.workflow_job.extra_vars:
|
||||
extra_vars.update(self.workflow_job.extra_vars_dict)
|
||||
if ujt_obj and isinstance(ujt_obj, (JobTemplate, WorkflowJobTemplate)):
|
||||
if aa_dict:
|
||||
functional_aa_dict = copy(aa_dict)
|
||||
functional_aa_dict.pop('_ansible_no_log', None)
|
||||
extra_vars.update(functional_aa_dict)
|
||||
if ujt_obj and isinstance(ujt_obj, JobTemplate):
|
||||
# Workflow Job extra_vars higher precedence than ancestor artifacts
|
||||
if self.workflow_job and self.workflow_job.extra_vars:
|
||||
extra_vars.update(self.workflow_job.extra_vars_dict)
|
||||
if extra_vars:
|
||||
data['extra_vars'] = extra_vars
|
||||
# ensure that unified jobs created by WorkflowJobs are marked
|
||||
data['_eager_fields'] = {'launch_type': 'workflow'}
|
||||
# Extra processing in the case that this is a slice job
|
||||
if 'job_slice' in self.ancestor_artifacts and is_root_node:
|
||||
data['_eager_fields']['allow_simultaneous'] = True
|
||||
data['_eager_fields']['job_slice_number'] = self.ancestor_artifacts['job_slice']
|
||||
data['_eager_fields']['job_slice_count'] = self.workflow_job.workflow_job_nodes.count()
|
||||
data['_prevent_slicing'] = True
|
||||
return data
|
||||
|
||||
|
||||
@@ -261,6 +304,13 @@ class WorkflowJobOptions(BaseModel):
|
||||
def workflow_nodes(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
# NOTE: if other prompts are added to WFJT, put fields in WJOptions, remove inventory
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels', 'inventory']
|
||||
)
|
||||
|
||||
def _create_workflow_nodes(self, old_node_list, user=None):
|
||||
node_links = {}
|
||||
for old_node in old_node_list:
|
||||
@@ -288,7 +338,7 @@ class WorkflowJobOptions(BaseModel):
|
||||
|
||||
def create_relaunch_workflow_job(self):
|
||||
new_workflow_job = self.copy_unified_job()
|
||||
if self.workflow_job_template is None:
|
||||
if self.unified_job_template_id is None:
|
||||
new_workflow_job.copy_nodes_from_original(original=self)
|
||||
return new_workflow_job
|
||||
|
||||
@@ -310,6 +360,19 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
on_delete=models.SET_NULL,
|
||||
related_name='workflows',
|
||||
)
|
||||
inventory = models.ForeignKey(
|
||||
'Inventory',
|
||||
related_name='%(class)ss',
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_('Inventory applied to all job templates in workflow that prompt for inventory.'),
|
||||
)
|
||||
ask_inventory_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
admin_role = ImplicitRoleField(parent_role=[
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
'organization.workflow_admin_role'
|
||||
@@ -331,12 +394,6 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
def _get_unified_job_class(cls):
|
||||
return WorkflowJob
|
||||
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in WorkflowJobOptions._meta.fields) | set(
|
||||
['name', 'description', 'schedule', 'survey_passwords', 'labels']
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_unified_jt_copy_names(cls):
|
||||
base_list = super(WorkflowJobTemplate, cls)._get_unified_jt_copy_names()
|
||||
@@ -370,27 +427,45 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
workflow_job.copy_nodes_from_original(original=self)
|
||||
return workflow_job
|
||||
|
||||
def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):
|
||||
def _accept_or_ignore_job_kwargs(self, **kwargs):
|
||||
exclude_errors = kwargs.pop('_exclude_errors', [])
|
||||
prompted_data = {}
|
||||
rejected_data = {}
|
||||
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
|
||||
kwargs.get('extra_vars', {}),
|
||||
_exclude_errors=exclude_errors,
|
||||
extra_passwords=kwargs.get('survey_passwords', {}))
|
||||
if accepted_vars:
|
||||
prompted_data['extra_vars'] = accepted_vars
|
||||
if rejected_vars:
|
||||
rejected_data['extra_vars'] = rejected_vars
|
||||
errors_dict = {}
|
||||
|
||||
# WFJTs do not behave like JTs, it can not accept inventory, credential, etc.
|
||||
bad_kwargs = kwargs.copy()
|
||||
bad_kwargs.pop('extra_vars', None)
|
||||
bad_kwargs.pop('survey_passwords', None)
|
||||
if bad_kwargs:
|
||||
rejected_data.update(bad_kwargs)
|
||||
for field in bad_kwargs:
|
||||
errors_dict[field] = _('Field is not allowed for use in workflows.')
|
||||
# Handle all the fields that have prompting rules
|
||||
# NOTE: If WFJTs prompt for other things, this logic can be combined with jobs
|
||||
for field_name, ask_field_name in self.get_ask_mapping().items():
|
||||
|
||||
if field_name == 'extra_vars':
|
||||
accepted_vars, rejected_vars, vars_errors = self.accept_or_ignore_variables(
|
||||
kwargs.get('extra_vars', {}),
|
||||
_exclude_errors=exclude_errors,
|
||||
extra_passwords=kwargs.get('survey_passwords', {}))
|
||||
if accepted_vars:
|
||||
prompted_data['extra_vars'] = accepted_vars
|
||||
if rejected_vars:
|
||||
rejected_data['extra_vars'] = rejected_vars
|
||||
errors_dict.update(vars_errors)
|
||||
continue
|
||||
|
||||
if field_name not in kwargs:
|
||||
continue
|
||||
new_value = kwargs[field_name]
|
||||
old_value = getattr(self, field_name)
|
||||
|
||||
if new_value == old_value:
|
||||
continue # no-op case: Counted as neither accepted or ignored
|
||||
elif getattr(self, ask_field_name):
|
||||
# accepted prompt
|
||||
prompted_data[field_name] = new_value
|
||||
else:
|
||||
# unprompted - template is not configured to accept field on launch
|
||||
rejected_data[field_name] = new_value
|
||||
# Not considered an error for manual launch, to support old
|
||||
# behavior of putting them in ignored_fields and launching anyway
|
||||
if 'prompts' not in exclude_errors:
|
||||
errors_dict[field_name] = _('Field is not configured to prompt on launch.').format(field_name=field_name)
|
||||
|
||||
return prompted_data, rejected_data, errors_dict
|
||||
|
||||
@@ -420,7 +495,7 @@ class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTempl
|
||||
return WorkflowJob.objects.filter(workflow_job_template=self)
|
||||
|
||||
|
||||
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin):
|
||||
class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificationMixin, LaunchTimeConfigBase):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('id',)
|
||||
@@ -433,13 +508,28 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
job_template = models.ForeignKey(
|
||||
'JobTemplate',
|
||||
related_name='slice_workflow_jobs',
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_("If automatically created for a sliced job run, the job template "
|
||||
"the workflow job was created from."),
|
||||
)
|
||||
is_sliced_job = models.BooleanField(
|
||||
default=False
|
||||
)
|
||||
|
||||
@property
|
||||
def workflow_nodes(self):
|
||||
return self.workflow_job_nodes
|
||||
|
||||
@classmethod
|
||||
def _get_parent_field_name(cls):
|
||||
def _get_parent_field_name(self):
|
||||
if self.job_template_id:
|
||||
# This is a workflow job which is a container for slice jobs
|
||||
return 'job_template'
|
||||
return 'workflow_job_template'
|
||||
|
||||
@classmethod
|
||||
@@ -472,6 +562,24 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
def task_impact(self):
|
||||
return 0
|
||||
|
||||
def get_ancestor_workflows(self):
|
||||
"""Returns a list of WFJTs that are indirect parents of this workflow job
|
||||
say WFJTs are set up to spawn in order of A->B->C, and this workflow job
|
||||
came from C, then C is the parent and [B, A] will be returned from this.
|
||||
"""
|
||||
ancestors = []
|
||||
wj_ids = set([self.pk])
|
||||
wj = self.get_workflow_job()
|
||||
while wj and wj.workflow_job_template_id:
|
||||
if wj.pk in wj_ids:
|
||||
logger.critical('Cycles detected in the workflow jobs graph, '
|
||||
'this is not normal and suggests task manager degeneracy.')
|
||||
break
|
||||
wj_ids.add(wj.pk)
|
||||
ancestors.append(wj.workflow_job_template)
|
||||
wj = wj.get_workflow_job()
|
||||
return ancestors
|
||||
|
||||
def get_notification_templates(self):
|
||||
return self.workflow_job_template.notification_templates
|
||||
|
||||
@@ -482,8 +590,8 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
def preferred_instance_groups(self):
|
||||
return []
|
||||
|
||||
'''
|
||||
A WorkflowJob is a virtual job. It doesn't result in a celery task.
|
||||
'''
|
||||
def start_celery_task(self, opts, error_callback, success_callback, queue):
|
||||
return None
|
||||
@property
|
||||
def actually_running(self):
|
||||
# WorkflowJobs don't _actually_ run anything in the dispatcher, so
|
||||
# there's no point in asking the dispatcher if it knows about this task
|
||||
return self.status == 'running'
|
||||
|
||||
@@ -37,6 +37,7 @@ class SlackBackend(AWXBaseEmailBackend):
|
||||
if self.color:
|
||||
ret = connection.api_call("chat.postMessage",
|
||||
channel=r,
|
||||
as_user=True,
|
||||
attachments=[{
|
||||
"color": self.color,
|
||||
"text": m.subject
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -12,10 +13,31 @@ from django.conf import settings
|
||||
|
||||
# Kombu
|
||||
from kombu import Connection, Exchange, Producer
|
||||
from kombu.serialization import registry
|
||||
|
||||
__all__ = ['CallbackQueueDispatcher']
|
||||
|
||||
|
||||
# use a custom JSON serializer so we can properly handle !unsafe and !vault
|
||||
# objects that may exist in events emitted by the callback plugin
|
||||
# see: https://github.com/ansible/ansible/pull/38759
|
||||
class AnsibleJSONEncoder(json.JSONEncoder):
|
||||
|
||||
def default(self, o):
|
||||
if getattr(o, 'yaml_tag', None) == '!vault':
|
||||
return o.data
|
||||
return super(AnsibleJSONEncoder, self).default(o)
|
||||
|
||||
|
||||
registry.register(
|
||||
'json-ansible',
|
||||
lambda obj: json.dumps(obj, cls=AnsibleJSONEncoder),
|
||||
lambda obj: json.loads(obj),
|
||||
content_type='application/json',
|
||||
content_encoding='utf-8'
|
||||
)
|
||||
|
||||
|
||||
class CallbackQueueDispatcher(object):
|
||||
|
||||
def __init__(self):
|
||||
@@ -41,7 +63,7 @@ class CallbackQueueDispatcher(object):
|
||||
|
||||
producer = Producer(self.connection)
|
||||
producer.publish(obj,
|
||||
serializer='json',
|
||||
serializer='json-ansible',
|
||||
compression='bzip2',
|
||||
exchange=self.exchange,
|
||||
declare=[self.exchange],
|
||||
|
||||
@@ -1,11 +1,4 @@
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
AdHocCommand,
|
||||
InventoryUpdate,
|
||||
ProjectUpdate,
|
||||
WorkflowJob,
|
||||
)
|
||||
from collections import deque
|
||||
|
||||
|
||||
class SimpleDAG(object):
|
||||
@@ -13,12 +6,51 @@ class SimpleDAG(object):
|
||||
|
||||
def __init__(self):
|
||||
self.nodes = []
|
||||
self.edges = []
|
||||
self.root_nodes = set([])
|
||||
|
||||
r'''
|
||||
Track node_obj->node index
|
||||
dict where key is a full workflow node object or whatever we are
|
||||
storing in ['node_object'] and value is an index to be used into
|
||||
self.nodes
|
||||
'''
|
||||
self.node_obj_to_node_index = dict()
|
||||
|
||||
r'''
|
||||
Track per-node from->to edges
|
||||
|
||||
i.e.
|
||||
{
|
||||
'success': {
|
||||
1: [2, 3],
|
||||
4: [2, 3],
|
||||
},
|
||||
'failed': {
|
||||
1: [5],
|
||||
}
|
||||
}
|
||||
'''
|
||||
self.node_from_edges_by_label = dict()
|
||||
|
||||
r'''
|
||||
Track per-node reverse relationship (child to parent)
|
||||
|
||||
i.e.
|
||||
{
|
||||
'success': {
|
||||
2: [1, 4],
|
||||
3: [1, 4],
|
||||
},
|
||||
'failed': {
|
||||
5: [1],
|
||||
}
|
||||
}
|
||||
'''
|
||||
self.node_to_edges_by_label = dict()
|
||||
|
||||
def __contains__(self, obj):
|
||||
for node in self.nodes:
|
||||
if node['node_object'] == obj:
|
||||
return True
|
||||
if self.node['node_object'] in self.node_obj_to_node_index:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __len__(self):
|
||||
@@ -27,98 +59,169 @@ class SimpleDAG(object):
|
||||
def __iter__(self):
|
||||
return self.nodes.__iter__()
|
||||
|
||||
def generate_graphviz_plot(self):
|
||||
def short_string_obj(obj):
|
||||
if type(obj) == Job:
|
||||
type_str = "Job"
|
||||
if type(obj) == AdHocCommand:
|
||||
type_str = "AdHocCommand"
|
||||
elif type(obj) == InventoryUpdate:
|
||||
type_str = "Inventory"
|
||||
elif type(obj) == ProjectUpdate:
|
||||
type_str = "Project"
|
||||
elif type(obj) == WorkflowJob:
|
||||
type_str = "Workflow"
|
||||
else:
|
||||
type_str = "Unknown"
|
||||
type_str += "%s" % str(obj.id)
|
||||
return type_str
|
||||
def generate_graphviz_plot(self, file_name="/awx_devel/graph.gv"):
|
||||
def run_status(obj):
|
||||
dnr = "RUN"
|
||||
status = "NA"
|
||||
if hasattr(obj, 'job') and obj.job and hasattr(obj.job, 'status'):
|
||||
status = obj.job.status
|
||||
if hasattr(obj, 'do_not_run') and obj.do_not_run is True:
|
||||
dnr = "DNR"
|
||||
return "{}_{}_{}".format(dnr, status, obj.id)
|
||||
|
||||
doc = """
|
||||
digraph g {
|
||||
rankdir = LR
|
||||
"""
|
||||
for n in self.nodes:
|
||||
obj = n['node_object']
|
||||
status = "NA"
|
||||
if hasattr(obj, 'job') and obj.job:
|
||||
status = obj.job.status
|
||||
color = 'black'
|
||||
if status == 'successful':
|
||||
color = 'green'
|
||||
elif status == 'failed':
|
||||
color = 'red'
|
||||
elif obj.do_not_run is True:
|
||||
color = 'gray'
|
||||
doc += "%s [color = %s]\n" % (
|
||||
short_string_obj(n['node_object']),
|
||||
"red" if n['node_object'].status == 'running' else "black",
|
||||
)
|
||||
for from_node, to_node, label in self.edges:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
short_string_obj(self.nodes[from_node]['node_object']),
|
||||
short_string_obj(self.nodes[to_node]['node_object']),
|
||||
label,
|
||||
run_status(n['node_object']),
|
||||
color
|
||||
)
|
||||
for label, edges in self.node_from_edges_by_label.iteritems():
|
||||
for from_node, to_nodes in edges.iteritems():
|
||||
for to_node in to_nodes:
|
||||
doc += "%s -> %s [ label=\"%s\" ];\n" % (
|
||||
run_status(self.nodes[from_node]['node_object']),
|
||||
run_status(self.nodes[to_node]['node_object']),
|
||||
label,
|
||||
)
|
||||
doc += "}\n"
|
||||
gv_file = open('/tmp/graph.gv', 'w')
|
||||
gv_file = open(file_name, 'w')
|
||||
gv_file.write(doc)
|
||||
gv_file.close()
|
||||
|
||||
def add_node(self, obj, metadata=None):
|
||||
if self.find_ord(obj) is None:
|
||||
self.nodes.append(dict(node_object=obj, metadata=metadata))
|
||||
'''
|
||||
Assume node is a root node until a child is added
|
||||
'''
|
||||
node_index = len(self.nodes)
|
||||
self.root_nodes.add(node_index)
|
||||
self.node_obj_to_node_index[obj] = node_index
|
||||
entry = dict(node_object=obj, metadata=metadata)
|
||||
self.nodes.append(entry)
|
||||
|
||||
def add_edge(self, from_obj, to_obj, label=None):
|
||||
def add_edge(self, from_obj, to_obj, label):
|
||||
from_obj_ord = self.find_ord(from_obj)
|
||||
to_obj_ord = self.find_ord(to_obj)
|
||||
if from_obj_ord is None or to_obj_ord is None:
|
||||
raise LookupError("Object not found")
|
||||
self.edges.append((from_obj_ord, to_obj_ord, label))
|
||||
|
||||
def add_edges(self, edgelist):
|
||||
for edge_pair in edgelist:
|
||||
self.add_edge(edge_pair[0], edge_pair[1], edge_pair[2])
|
||||
'''
|
||||
To node is no longer a root node
|
||||
'''
|
||||
self.root_nodes.discard(to_obj_ord)
|
||||
|
||||
if from_obj_ord is None and to_obj_ord is None:
|
||||
raise LookupError("From object {} and to object not found".format(from_obj, to_obj))
|
||||
elif from_obj_ord is None:
|
||||
raise LookupError("From object not found {}".format(from_obj))
|
||||
elif to_obj_ord is None:
|
||||
raise LookupError("To object not found {}".format(to_obj))
|
||||
|
||||
self.node_from_edges_by_label.setdefault(label, dict()) \
|
||||
.setdefault(from_obj_ord, [])
|
||||
self.node_to_edges_by_label.setdefault(label, dict()) \
|
||||
.setdefault(to_obj_ord, [])
|
||||
|
||||
self.node_from_edges_by_label[label][from_obj_ord].append(to_obj_ord)
|
||||
self.node_to_edges_by_label[label][to_obj_ord].append(from_obj_ord)
|
||||
|
||||
def find_ord(self, obj):
|
||||
for idx in range(len(self.nodes)):
|
||||
if obj == self.nodes[idx]['node_object']:
|
||||
return idx
|
||||
return None
|
||||
return self.node_obj_to_node_index.get(obj, None)
|
||||
|
||||
def _get_dependencies_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_from_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependencies(self, obj, label=None):
|
||||
antecedents = []
|
||||
this_ord = self.find_ord(obj)
|
||||
for node, dep, lbl in self.edges:
|
||||
if label:
|
||||
if node == this_ord and lbl == label:
|
||||
antecedents.append(self.nodes[dep])
|
||||
else:
|
||||
if node == this_ord:
|
||||
antecedents.append(self.nodes[dep])
|
||||
return antecedents
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependencies_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
map(lambda l: nodes.extend(self._get_dependencies_by_label(this_ord, l)),
|
||||
self.node_from_edges_by_label.keys())
|
||||
return nodes
|
||||
|
||||
def _get_dependents_by_label(self, node_index, label):
|
||||
return [self.nodes[index] for index in
|
||||
self.node_to_edges_by_label.get(label, {})
|
||||
.get(node_index, [])]
|
||||
|
||||
def get_dependents(self, obj, label=None):
|
||||
decendents = []
|
||||
this_ord = self.find_ord(obj)
|
||||
for node, dep, lbl in self.edges:
|
||||
if label:
|
||||
if dep == this_ord and lbl == label:
|
||||
decendents.append(self.nodes[node])
|
||||
else:
|
||||
if dep == this_ord:
|
||||
decendents.append(self.nodes[node])
|
||||
return decendents
|
||||
|
||||
def get_leaf_nodes(self):
|
||||
leafs = []
|
||||
for n in self.nodes:
|
||||
if len(self.get_dependencies(n['node_object'])) < 1:
|
||||
leafs.append(n)
|
||||
return leafs
|
||||
nodes = []
|
||||
if label:
|
||||
return self._get_dependents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
map(lambda l: nodes.extend(self._get_dependents_by_label(this_ord, l)),
|
||||
self.node_to_edges_by_label.keys())
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
roots = []
|
||||
for n in self.nodes:
|
||||
if len(self.get_dependents(n['node_object'])) < 1:
|
||||
roots.append(n)
|
||||
return roots
|
||||
return [self.nodes[index] for index in self.root_nodes]
|
||||
|
||||
def has_cycle(self):
|
||||
node_objs = [node['node_object'] for node in self.get_root_nodes()]
|
||||
node_objs_visited = set([])
|
||||
path = set([])
|
||||
stack = node_objs
|
||||
res = False
|
||||
|
||||
if len(self.nodes) != 0 and len(node_objs) == 0:
|
||||
return True
|
||||
|
||||
while stack:
|
||||
node_obj = stack.pop()
|
||||
|
||||
children = [node['node_object'] for node in self.get_dependencies(node_obj)]
|
||||
children_to_add = filter(lambda node_obj: node_obj not in node_objs_visited, children)
|
||||
|
||||
if children_to_add:
|
||||
if node_obj in path:
|
||||
res = True
|
||||
break
|
||||
path.add(node_obj)
|
||||
stack.append(node_obj)
|
||||
stack.extend(children_to_add)
|
||||
else:
|
||||
node_objs_visited.add(node_obj)
|
||||
path.discard(node_obj)
|
||||
return res
|
||||
|
||||
def sort_nodes_topological(self):
|
||||
nodes_sorted = deque()
|
||||
obj_ids_processed = set([])
|
||||
|
||||
def visit(node):
|
||||
obj = node['node_object']
|
||||
if obj.id in obj_ids_processed:
|
||||
return
|
||||
|
||||
for child in self.get_dependencies(obj):
|
||||
visit(child)
|
||||
obj_ids_processed.add(obj.id)
|
||||
nodes_sorted.appendleft(node)
|
||||
|
||||
for node in self.nodes:
|
||||
obj = node['node_object']
|
||||
if obj.id in obj_ids_processed:
|
||||
continue
|
||||
|
||||
visit(node)
|
||||
|
||||
return nodes_sorted
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import smart_text
|
||||
|
||||
# Python
|
||||
from awx.main.models import (
|
||||
WorkflowJobTemplateNode,
|
||||
WorkflowJobNode,
|
||||
)
|
||||
|
||||
# AWX
|
||||
from awx.main.scheduler.dag_simple import SimpleDAG
|
||||
|
||||
@@ -9,47 +18,88 @@ class WorkflowDAG(SimpleDAG):
|
||||
if workflow_job:
|
||||
self._init_graph(workflow_job)
|
||||
|
||||
def _init_graph(self, workflow_job):
|
||||
node_qs = workflow_job.workflow_job_nodes
|
||||
workflow_nodes = node_qs.prefetch_related('success_nodes', 'failure_nodes', 'always_nodes').all()
|
||||
for workflow_node in workflow_nodes:
|
||||
def _init_graph(self, workflow_job_or_jt):
|
||||
if hasattr(workflow_job_or_jt, 'workflow_job_template_nodes'):
|
||||
vals = ['from_workflowjobtemplatenode_id', 'to_workflowjobtemplatenode_id']
|
||||
filters = {
|
||||
'from_workflowjobtemplatenode__workflow_job_template_id': workflow_job_or_jt.id
|
||||
}
|
||||
workflow_nodes = workflow_job_or_jt.workflow_job_template_nodes
|
||||
success_nodes = WorkflowJobTemplateNode.success_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
failure_nodes = WorkflowJobTemplateNode.failure_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
always_nodes = WorkflowJobTemplateNode.always_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
elif hasattr(workflow_job_or_jt, 'workflow_job_nodes'):
|
||||
vals = ['from_workflowjobnode_id', 'to_workflowjobnode_id']
|
||||
filters = {
|
||||
'from_workflowjobnode__workflow_job_id': workflow_job_or_jt.id
|
||||
}
|
||||
workflow_nodes = workflow_job_or_jt.workflow_job_nodes
|
||||
success_nodes = WorkflowJobNode.success_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
failure_nodes = WorkflowJobNode.failure_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
always_nodes = WorkflowJobNode.always_nodes.through.objects.filter(**filters).values_list(*vals)
|
||||
else:
|
||||
raise RuntimeError("Unexpected object {} {}".format(type(workflow_job_or_jt), workflow_job_or_jt))
|
||||
|
||||
wfn_by_id = dict()
|
||||
|
||||
for workflow_node in workflow_nodes.all():
|
||||
wfn_by_id[workflow_node.id] = workflow_node
|
||||
self.add_node(workflow_node)
|
||||
|
||||
for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']:
|
||||
for workflow_node in workflow_nodes:
|
||||
related_nodes = getattr(workflow_node, node_type).all()
|
||||
for related_node in related_nodes:
|
||||
self.add_edge(workflow_node, related_node, node_type)
|
||||
for edge in success_nodes:
|
||||
self.add_edge(wfn_by_id[edge[0]], wfn_by_id[edge[1]], 'success_nodes')
|
||||
for edge in failure_nodes:
|
||||
self.add_edge(wfn_by_id[edge[0]], wfn_by_id[edge[1]], 'failure_nodes')
|
||||
for edge in always_nodes:
|
||||
self.add_edge(wfn_by_id[edge[0]], wfn_by_id[edge[1]], 'always_nodes')
|
||||
|
||||
def _are_relevant_parents_finished(self, node):
|
||||
obj = node['node_object']
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
continue
|
||||
elif p.unified_job_template is None:
|
||||
continue
|
||||
# do_not_run is False, node might still run a job and thus blocks children
|
||||
elif not p.job:
|
||||
return False
|
||||
# Node decidedly got a job; check if job is done
|
||||
elif p.job and p.job.status not in ['successful', 'failed', 'error', 'canceled']:
|
||||
return False
|
||||
return True
|
||||
|
||||
def bfs_nodes_to_run(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes = root_nodes
|
||||
nodes = self.get_root_nodes()
|
||||
nodes_found = []
|
||||
node_ids_visited = set()
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
job = obj.job
|
||||
|
||||
if not job:
|
||||
nodes_found.append(n)
|
||||
# Job is about to run or is running. Hold our horses and wait for
|
||||
# the job to finish. We can't proceed down the graph path until we
|
||||
# have the job result.
|
||||
elif job.status not in ['failed', 'successful']:
|
||||
if obj.id in node_ids_visited:
|
||||
continue
|
||||
elif job.status == 'failed':
|
||||
children_failed = self.get_dependencies(obj, 'failure_nodes')
|
||||
children_always = self.get_dependencies(obj, 'always_nodes')
|
||||
children_all = children_failed + children_always
|
||||
nodes.extend(children_all)
|
||||
elif job.status == 'successful':
|
||||
children_success = self.get_dependencies(obj, 'success_nodes')
|
||||
children_always = self.get_dependencies(obj, 'always_nodes')
|
||||
children_all = children_success + children_always
|
||||
nodes.extend(children_all)
|
||||
node_ids_visited.add(obj.id)
|
||||
|
||||
if obj.do_not_run is True:
|
||||
continue
|
||||
|
||||
if obj.job:
|
||||
if obj.job.status in ['failed', 'error', 'canceled']:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
elif obj.job.status == 'successful':
|
||||
nodes.extend(self.get_dependencies(obj, 'success_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
elif obj.unified_job_template is None:
|
||||
nodes.extend(self.get_dependencies(obj, 'failure_nodes') +
|
||||
self.get_dependencies(obj, 'always_nodes'))
|
||||
else:
|
||||
if self._are_relevant_parents_finished(n):
|
||||
nodes_found.append(n)
|
||||
return [n['node_object'] for n in nodes_found]
|
||||
|
||||
def cancel_node_jobs(self):
|
||||
cancel_finished = True
|
||||
for n in self.nodes:
|
||||
obj = n['node_object']
|
||||
job = obj.job
|
||||
@@ -57,43 +107,118 @@ class WorkflowDAG(SimpleDAG):
|
||||
if not job:
|
||||
continue
|
||||
elif job.can_cancel:
|
||||
cancel_finished = False
|
||||
job.cancel()
|
||||
return cancel_finished
|
||||
|
||||
def is_workflow_done(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes = root_nodes
|
||||
is_failed = False
|
||||
for node in self.nodes:
|
||||
obj = node['node_object']
|
||||
if obj.do_not_run is False and not obj.job and obj.unified_job_template:
|
||||
return False
|
||||
elif obj.job and obj.job.status not in ['successful', 'failed', 'canceled', 'error']:
|
||||
return False
|
||||
return True
|
||||
|
||||
for index, n in enumerate(nodes):
|
||||
obj = n['node_object']
|
||||
job = obj.job
|
||||
def has_workflow_failed(self):
|
||||
failed_nodes = []
|
||||
res = False
|
||||
failed_path_nodes_id_status = []
|
||||
failed_unified_job_template_node_ids = []
|
||||
|
||||
if obj.unified_job_template is None:
|
||||
is_failed = True
|
||||
continue
|
||||
elif not job:
|
||||
return False, False
|
||||
for node in self.nodes:
|
||||
obj = node['node_object']
|
||||
if obj.do_not_run is False and obj.unified_job_template is None:
|
||||
failed_nodes.append(node)
|
||||
elif obj.job and obj.job.status in ['failed', 'canceled', 'error']:
|
||||
failed_nodes.append(node)
|
||||
|
||||
children_success = self.get_dependencies(obj, 'success_nodes')
|
||||
children_failed = self.get_dependencies(obj, 'failure_nodes')
|
||||
children_always = self.get_dependencies(obj, 'always_nodes')
|
||||
if not is_failed and job.status != 'successful':
|
||||
children_all = children_success + children_failed + children_always
|
||||
for child in children_all:
|
||||
if child['node_object'].job:
|
||||
break
|
||||
for node in failed_nodes:
|
||||
obj = node['node_object']
|
||||
if (len(self.get_dependencies(obj, 'failure_nodes')) +
|
||||
len(self.get_dependencies(obj, 'always_nodes'))) == 0:
|
||||
if obj.unified_job_template is None:
|
||||
res = True
|
||||
failed_unified_job_template_node_ids.append(str(obj.id))
|
||||
else:
|
||||
is_failed = True if children_all else job.status in ['failed', 'canceled', 'error']
|
||||
res = True
|
||||
failed_path_nodes_id_status.append((str(obj.id), obj.job.status))
|
||||
|
||||
if job.status in ['canceled', 'error']:
|
||||
continue
|
||||
elif job.status == 'failed':
|
||||
nodes.extend(children_failed + children_always)
|
||||
elif job.status == 'successful':
|
||||
nodes.extend(children_success + children_always)
|
||||
if res is True:
|
||||
s = _("No error handle path for workflow job node(s) [{node_status}] workflow job "
|
||||
"node(s) missing unified job template and error handle path [{no_ufjt}].")
|
||||
parms = {
|
||||
'node_status': '',
|
||||
'no_ufjt': '',
|
||||
}
|
||||
if len(failed_path_nodes_id_status) > 0:
|
||||
parms['node_status'] = ",".join(["({},{})".format(id, status) for id, status in failed_path_nodes_id_status])
|
||||
if len(failed_unified_job_template_node_ids) > 0:
|
||||
parms['no_ufjt'] = ",".join(failed_unified_job_template_node_ids)
|
||||
return True, smart_text(s.format(**parms))
|
||||
return False, None
|
||||
|
||||
r'''
|
||||
Determine if all nodes have been decided on being marked do_not_run.
|
||||
Nodes that are do_not_run False may become do_not_run True in the future.
|
||||
We know a do_not_run False node will NOT be marked do_not_run True if there
|
||||
is a job run for that node.
|
||||
|
||||
:param workflow_nodes: list of workflow_nodes
|
||||
|
||||
Return a boolean
|
||||
'''
|
||||
def _are_all_nodes_dnr_decided(self, workflow_nodes):
|
||||
for n in workflow_nodes:
|
||||
if n.do_not_run is False and not n.job and n.unified_job_template:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
r'''
|
||||
Determine if a node (1) is ready to be marked do_not_run and (2) should
|
||||
be marked do_not_run.
|
||||
|
||||
:param node: SimpleDAG internal node
|
||||
:param parent_nodes: list of workflow_nodes
|
||||
|
||||
Return a boolean
|
||||
'''
|
||||
def _should_mark_node_dnr(self, node, parent_nodes):
|
||||
for p in parent_nodes:
|
||||
if p.do_not_run is True:
|
||||
pass
|
||||
elif p.job:
|
||||
if p.job.status == 'successful':
|
||||
if node in (self.get_dependencies(p, 'success_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
return False
|
||||
elif p.job.status in ['failed', 'error', 'canceled']:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
elif p.do_not_run is False and p.unified_job_template is None:
|
||||
if node in (self.get_dependencies(p, 'failure_nodes') +
|
||||
self.get_dependencies(p, 'always_nodes')):
|
||||
return False
|
||||
else:
|
||||
# Job is about to run or is running. Hold our horses and wait for
|
||||
# the job to finish. We can't proceed down the graph path until we
|
||||
# have the job result.
|
||||
return False, False
|
||||
return True, is_failed
|
||||
return False
|
||||
return True
|
||||
|
||||
def mark_dnr_nodes(self):
|
||||
root_nodes = self.get_root_nodes()
|
||||
nodes_marked_do_not_run = []
|
||||
|
||||
for node in self.sort_nodes_topological():
|
||||
obj = node['node_object']
|
||||
|
||||
if obj.do_not_run is False and not obj.job and node not in root_nodes:
|
||||
parent_nodes = [p['node_object'] for p in self.get_dependents(obj)]
|
||||
if self._are_all_nodes_dnr_decided(parent_nodes):
|
||||
if self._should_mark_node_dnr(node, parent_nodes):
|
||||
obj.do_not_run = True
|
||||
nodes_marked_do_not_run.append(node)
|
||||
|
||||
return [n['node_object'] for n in nodes_marked_do_not_run]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# All Rights Reserved
|
||||
|
||||
# Python
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
@@ -11,18 +11,13 @@ import random
|
||||
from sets import Set
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import transaction, connection, DatabaseError
|
||||
from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.timezone import now as tz_now, utc
|
||||
from django.db.models import Q
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
@@ -30,21 +25,16 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate
|
||||
)
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model
|
||||
from awx.main.utils import get_type_for_model, task_manager_bulk_reschedule, schedule_task_manager
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
# Celery
|
||||
from celery import Celery
|
||||
from celery.app.control import Inspect
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@@ -76,7 +66,8 @@ class TaskManager():
|
||||
inventory_updates_qs = InventoryUpdate.objects.filter(
|
||||
status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
||||
inventory_updates = [i for i in inventory_updates_qs]
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
# Notice the job_type='check': we want to prevent implicit project updates from blocking our jobs.
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
|
||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
||||
@@ -84,79 +75,6 @@ class TaskManager():
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
'''
|
||||
Tasks that are running and SHOULD have a celery task.
|
||||
{
|
||||
'execution_node': [j1, j2,...],
|
||||
'execution_node': [j3],
|
||||
...
|
||||
}
|
||||
'''
|
||||
def get_running_tasks(self):
|
||||
execution_nodes = {}
|
||||
waiting_jobs = []
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter((Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))) &
|
||||
~Q(polymorphic_ctype_id=workflow_ctype_id))
|
||||
for j in jobs:
|
||||
if j.execution_node:
|
||||
execution_nodes.setdefault(j.execution_node, []).append(j)
|
||||
else:
|
||||
waiting_jobs.append(j)
|
||||
return (execution_nodes, waiting_jobs)
|
||||
|
||||
'''
|
||||
Tasks that are currently running in celery
|
||||
|
||||
Transform:
|
||||
{
|
||||
"celery@ec2-54-204-222-62.compute-1.amazonaws.com": [],
|
||||
"celery@ec2-54-163-144-168.compute-1.amazonaws.com": [{
|
||||
...
|
||||
"id": "5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
...
|
||||
}, {
|
||||
...,
|
||||
}, ...]
|
||||
}
|
||||
|
||||
to:
|
||||
{
|
||||
"ec2-54-204-222-62.compute-1.amazonaws.com": [
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8306",
|
||||
...
|
||||
]
|
||||
}
|
||||
'''
|
||||
def get_active_tasks(self):
|
||||
if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):
|
||||
app = Celery('awx')
|
||||
app.config_from_object('django.conf:settings')
|
||||
inspector = Inspect(app=app)
|
||||
active_task_queues = inspector.active()
|
||||
else:
|
||||
logger.warn("Ignoring celery task inspector")
|
||||
active_task_queues = None
|
||||
|
||||
queues = None
|
||||
|
||||
if active_task_queues is not None:
|
||||
queues = {}
|
||||
for queue in active_task_queues:
|
||||
active_tasks = set()
|
||||
map(lambda at: active_tasks.add(at['id']), active_task_queues[queue])
|
||||
|
||||
# celery worker name is of the form celery@myhost.com
|
||||
queue_name = queue.split('@')
|
||||
queue_name = queue_name[1 if len(queue_name) > 1 else 0]
|
||||
queues[queue_name] = active_tasks
|
||||
else:
|
||||
return (None, None)
|
||||
|
||||
return (active_task_queues, queues)
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = Set()
|
||||
@@ -186,8 +104,15 @@ class TaskManager():
|
||||
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
if workflow_job.cancel_flag:
|
||||
logger.debug('Not spawning jobs for %s because it is pending cancelation.', workflow_job.log_format)
|
||||
continue
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
if spawn_nodes:
|
||||
logger.info('Spawning jobs for %s', workflow_job.log_format)
|
||||
else:
|
||||
logger.debug('No nodes to spawn for %s', workflow_job.log_format)
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
@@ -195,41 +120,82 @@ class TaskManager():
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
if job._resources_sufficient_for_launch():
|
||||
can_start = job.signal_start()
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
else:
|
||||
logger.info('Spawned %s in %s for node %s', job.log_format, workflow_job.log_format, spawn_node.pk)
|
||||
can_start = True
|
||||
if isinstance(spawn_node.unified_job_template, WorkflowJobTemplate):
|
||||
workflow_ancestors = job.get_ancestor_workflows()
|
||||
if spawn_node.unified_job_template in set(workflow_ancestors):
|
||||
can_start = False
|
||||
logger.info('Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||
job.job_explanation = _(
|
||||
"Workflow Job spawned from workflow could not start because it "
|
||||
"would result in recursion (spawn order, most recent first: {})"
|
||||
).format(six.text_type(', ').join([six.text_type('<{}>').format(tmp) for tmp in display_list]))
|
||||
else:
|
||||
logger.debug('Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
if not job._resources_sufficient_for_launch():
|
||||
can_start = False
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
if can_start:
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
else:
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
connection.on_commit(lambda: job.websocket_emit_status('failed'))
|
||||
job.websocket_emit_status('failed')
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
# See comment in tasks.py::RunWorkflowJob::run()
|
||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
||||
result = []
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
status_changed = False
|
||||
if workflow_job.cancel_flag:
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.save()
|
||||
dag.cancel_node_jobs()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
workflow_job.workflow_nodes.filter(do_not_run=False, job__isnull=True).update(do_not_run=True)
|
||||
logger.debug('Canceling spawned jobs of %s due to cancel flag.', workflow_job.log_format)
|
||||
cancel_finished = dag.cancel_node_jobs()
|
||||
if cancel_finished:
|
||||
logger.info('Marking %s as canceled, all spawned jobs have concluded.', workflow_job.log_format)
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=['status', 'start_args'])
|
||||
status_changed = True
|
||||
else:
|
||||
is_done, has_failed = dag.is_workflow_done()
|
||||
workflow_nodes = dag.mark_dnr_nodes()
|
||||
map(lambda n: n.save(update_fields=['do_not_run']), workflow_nodes)
|
||||
is_done = dag.is_workflow_done()
|
||||
if not is_done:
|
||||
continue
|
||||
has_failed, reason = dag.has_workflow_failed()
|
||||
logger.info('Marking %s as %s.', workflow_job.log_format, 'failed' if has_failed else 'successful')
|
||||
result.append(workflow_job.id)
|
||||
workflow_job.status = 'failed' if has_failed else 'successful'
|
||||
workflow_job.save()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
new_status = 'failed' if has_failed else 'successful'
|
||||
logger.debug(six.text_type("Transitioning {} to {} status.").format(workflow_job.log_format, new_status))
|
||||
update_fields = ['status', 'start_args']
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(reason)
|
||||
workflow_job.job_explanation = "No error handling paths found, marking workflow as failed"
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
status_changed = True
|
||||
if status_changed:
|
||||
workflow_job.websocket_emit_status(workflow_job.status)
|
||||
if workflow_job.spawned_by_workflow:
|
||||
schedule_task_manager()
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
@@ -255,9 +221,6 @@ class TaskManager():
|
||||
rampart_group.name, task.log_format))
|
||||
return
|
||||
|
||||
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
|
||||
success_handler = handle_work_success.s(task_actual=task_actual)
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
@@ -272,6 +235,7 @@ class TaskManager():
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
logger.info('Transitioning %s to running status.', task.log_format)
|
||||
schedule_task_manager()
|
||||
elif not task.supports_isolation() and rampart_group.controller_id:
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
@@ -298,13 +262,25 @@ class TaskManager():
|
||||
self.consume_capacity(task, rampart_group.name)
|
||||
|
||||
def post_commit():
|
||||
task.websocket_emit_status(task.status)
|
||||
if task.status != 'failed':
|
||||
task.start_celery_task(opts,
|
||||
error_callback=error_handler,
|
||||
success_callback=success_handler,
|
||||
queue=task.get_celery_queue_name())
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{
|
||||
'task': handle_work_success.name,
|
||||
'kwargs': {'task_actual': task_actual}
|
||||
}],
|
||||
errbacks=[{
|
||||
'task': handle_work_error.name,
|
||||
'args': [task.celery_task_id],
|
||||
'kwargs': {'subtasks': [task_actual] + dependencies}
|
||||
}],
|
||||
)
|
||||
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
def process_running_tasks(self, running_tasks):
|
||||
@@ -318,6 +294,11 @@ class TaskManager():
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
project_task.status = 'pending'
|
||||
project_task.save()
|
||||
logger.info(
|
||||
'Spawned {} as dependency of {}'.format(
|
||||
project_task.log_format, task.log_format
|
||||
)
|
||||
)
|
||||
return project_task
|
||||
|
||||
def create_inventory_update(self, task, inventory_source_task):
|
||||
@@ -327,6 +308,11 @@ class TaskManager():
|
||||
inventory_task.created = task.created - timedelta(seconds=2)
|
||||
inventory_task.status = 'pending'
|
||||
inventory_task.save()
|
||||
logger.info(
|
||||
'Spawned {} as dependency of {}'.format(
|
||||
inventory_task.log_format, task.log_format
|
||||
)
|
||||
)
|
||||
# inventory_sources = self.get_inventory_source_tasks([task])
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
return inventory_task
|
||||
@@ -482,7 +468,7 @@ class TaskManager():
|
||||
logger.debug(six.text_type("Dependent {} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.workflow_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
@@ -492,12 +478,12 @@ class TaskManager():
|
||||
found_acceptable_queue = False
|
||||
idle_instance_that_fits = None
|
||||
if isinstance(task, WorkflowJob):
|
||||
if task.workflow_job_template_id in running_workflow_templates:
|
||||
if task.unified_job_template_id in running_workflow_templates:
|
||||
if not task.allow_simultaneous:
|
||||
logger.debug(six.text_type("{} is blocked from running, workflow already running").format(task.log_format))
|
||||
continue
|
||||
else:
|
||||
running_workflow_templates.add(task.workflow_job_template_id)
|
||||
running_workflow_templates.add(task.unified_job_template_id)
|
||||
self.start_task(task, None, task.get_jobs_fail_chain(), None)
|
||||
continue
|
||||
for rampart_group in preferred_instance_groups:
|
||||
@@ -528,105 +514,6 @@ class TaskManager():
|
||||
if not found_acceptable_queue:
|
||||
logger.debug(six.text_type("{} couldn't be scheduled on graph, waiting for next cycle").format(task.log_format))
|
||||
|
||||
def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=False):
|
||||
for task in node_jobs:
|
||||
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
|
||||
if isinstance(task, WorkflowJob):
|
||||
continue
|
||||
if task.modified > celery_task_start_time:
|
||||
continue
|
||||
new_status = 'failed'
|
||||
if isolated:
|
||||
new_status = 'error'
|
||||
task.status = new_status
|
||||
task.start_args = '' # blank field to remove encrypted passwords
|
||||
if isolated:
|
||||
# TODO: cancel and reap artifacts of lost jobs from heartbeat
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but its ',
|
||||
'controller management daemon was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
'Task may still be running, but contactability is unknown.'
|
||||
))
|
||||
else:
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
))
|
||||
try:
|
||||
task.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
except DatabaseError:
|
||||
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
|
||||
continue
|
||||
if hasattr(task, 'send_notification_templates'):
|
||||
task.send_notification_templates('failed')
|
||||
task.websocket_emit_status(new_status)
|
||||
logger.error("{}Task {} has no record in celery. Marking as failed".format(
|
||||
'Isolated ' if isolated else '', task.log_format))
|
||||
|
||||
def cleanup_inconsistent_celery_tasks(self):
|
||||
'''
|
||||
Rectify tower db <-> celery inconsistent view of jobs state
|
||||
'''
|
||||
last_cleanup = cache.get('last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
|
||||
if (tz_now() - last_cleanup).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
|
||||
return
|
||||
|
||||
logger.debug("Failing inconsistent running jobs.")
|
||||
celery_task_start_time = tz_now()
|
||||
active_task_queues, active_queues = self.get_active_tasks()
|
||||
cache.set('last_celery_task_cleanup', tz_now())
|
||||
|
||||
if active_queues is None:
|
||||
logger.error('Failed to retrieve active tasks from celery')
|
||||
return None
|
||||
|
||||
'''
|
||||
Only consider failing tasks on instances for which we obtained a task
|
||||
list from celery for.
|
||||
'''
|
||||
running_tasks, waiting_tasks = self.get_running_tasks()
|
||||
all_celery_task_ids = []
|
||||
for node, node_jobs in active_queues.iteritems():
|
||||
all_celery_task_ids.extend(node_jobs)
|
||||
|
||||
self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time)
|
||||
|
||||
for node, node_jobs in running_tasks.iteritems():
|
||||
isolated = False
|
||||
if node in active_queues:
|
||||
active_tasks = active_queues[node]
|
||||
else:
|
||||
'''
|
||||
Node task list not found in celery. We may branch into cases:
|
||||
- instance is unknown to tower, system is improperly configured
|
||||
- instance is reported as down, then fail all jobs on the node
|
||||
- instance is an isolated node, then check running tasks
|
||||
among all allowed controller nodes for management process
|
||||
- valid healthy instance not included in celery task list
|
||||
probably a netsplit case, leave it alone
|
||||
'''
|
||||
instance = Instance.objects.filter(hostname=node).first()
|
||||
|
||||
if instance is None:
|
||||
logger.error("Execution node Instance {} not found in database. "
|
||||
"The node is currently executing jobs {}".format(
|
||||
node, [j.log_format for j in node_jobs]))
|
||||
active_tasks = []
|
||||
elif instance.capacity == 0:
|
||||
active_tasks = []
|
||||
elif instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
active_tasks = all_celery_task_ids
|
||||
isolated = True
|
||||
else:
|
||||
continue
|
||||
|
||||
self.fail_jobs_if_not_in_celery(
|
||||
node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=isolated
|
||||
)
|
||||
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
@@ -672,22 +559,30 @@ class TaskManager():
|
||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
||||
|
||||
previously_running_workflow_tasks = running_workflow_tasks
|
||||
running_workflow_tasks = []
|
||||
for workflow_job in previously_running_workflow_tasks:
|
||||
if workflow_job.status == 'running':
|
||||
running_workflow_tasks.append(workflow_job)
|
||||
else:
|
||||
logger.debug('Removed %s from job spawning consideration.', workflow_job.log_format)
|
||||
|
||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
||||
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def schedule(self):
|
||||
with transaction.atomic():
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
with transaction.atomic():
|
||||
if acquired is False:
|
||||
logger.debug("Not running scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
|
||||
self.cleanup_inconsistent_celery_tasks()
|
||||
finished_wfjs = self._schedule()
|
||||
with task_manager_bulk_reschedule():
|
||||
finished_wfjs = self._schedule()
|
||||
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
for wfj in WorkflowJob.objects.filter(id__in=finished_wfjs):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user