mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 12:34:43 -03:30
Compare commits
699 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6fa283fc98 | ||
|
|
87e695ff6c | ||
|
|
8a8d710e20 | ||
|
|
d02221702f | ||
|
|
4955dd9ab3 | ||
|
|
40f57b2552 | ||
|
|
e19b4e0c8c | ||
|
|
2f8155763b | ||
|
|
5c5293783d | ||
|
|
7486503a16 | ||
|
|
e7f4e79248 | ||
|
|
f49ed838c7 | ||
|
|
8c117e9ef1 | ||
|
|
681770e25a | ||
|
|
06210624ce | ||
|
|
713c9614a9 | ||
|
|
cbb95b3bd4 | ||
|
|
f3c5607d6e | ||
|
|
c9ffc0e7c1 | ||
|
|
3d71c93262 | ||
|
|
140af9ea47 | ||
|
|
8980e70918 | ||
|
|
ca4a1f37cd | ||
|
|
c234b9fb99 | ||
|
|
480e20136a | ||
|
|
91bceefee7 | ||
|
|
b338959279 | ||
|
|
d9e360e575 | ||
|
|
a7ec5876ce | ||
|
|
cdc6096515 | ||
|
|
31e559b6a1 | ||
|
|
1bb72cc680 | ||
|
|
128eaaf60f | ||
|
|
c57d39dcd9 | ||
|
|
cd35704b0b | ||
|
|
46287f6923 | ||
|
|
9219f0f682 | ||
|
|
feb0cd5e7c | ||
|
|
e877651a1e | ||
|
|
3c29baea61 | ||
|
|
b05925cc0e | ||
|
|
a5c028cd45 | ||
|
|
3e38a0c17d | ||
|
|
e8b45d3be1 | ||
|
|
f4c9617f95 | ||
|
|
8a8a064821 | ||
|
|
0b11a0d642 | ||
|
|
7861cda6fe | ||
|
|
32920739ef | ||
|
|
0eadd69934 | ||
|
|
297904462d | ||
|
|
a886747676 | ||
|
|
d4d2f04fc3 | ||
|
|
1e9b5824a4 | ||
|
|
dcdb094c92 | ||
|
|
cfe8a6580b | ||
|
|
61b741030d | ||
|
|
6c19bbb6de | ||
|
|
0193377d18 | ||
|
|
f3a8d612f3 | ||
|
|
fddecfdd25 | ||
|
|
09a6a326d1 | ||
|
|
069ca1c755 | ||
|
|
e1804400ec | ||
|
|
e233926a4a | ||
|
|
1752a97315 | ||
|
|
4d24318f70 | ||
|
|
ba23f1728a | ||
|
|
2ec675cf54 | ||
|
|
5b29e51a24 | ||
|
|
e67549e4a1 | ||
|
|
bfccd49a64 | ||
|
|
5a95f2c793 | ||
|
|
723f4b0a7a | ||
|
|
b89d1781a2 | ||
|
|
9bae656676 | ||
|
|
45c516bf02 | ||
|
|
2e7b4529f8 | ||
|
|
ddb689bded | ||
|
|
a6e6298575 | ||
|
|
50b4002280 | ||
|
|
22887c47bc | ||
|
|
2677a17c8a | ||
|
|
8dae16bc98 | ||
|
|
4c0fd573c6 | ||
|
|
5bec43fc8e | ||
|
|
827cea53e4 | ||
|
|
82b2fd102c | ||
|
|
3201436edb | ||
|
|
647d272a31 | ||
|
|
bb625264d4 | ||
|
|
8fff5db758 | ||
|
|
37003a52cf | ||
|
|
a4a17fe14c | ||
|
|
101c1d7229 | ||
|
|
2ad26008cc | ||
|
|
c9ce751bf0 | ||
|
|
bd5e33c2f4 | ||
|
|
fa28d680c4 | ||
|
|
b9f2aa3437 | ||
|
|
4ad2af30cf | ||
|
|
0bb7b0700d | ||
|
|
0b0afe91ab | ||
|
|
3c062b36df | ||
|
|
39cc427668 | ||
|
|
9e9692971f | ||
|
|
e9e027ecd7 | ||
|
|
723449818d | ||
|
|
851bccccee | ||
|
|
75eace1b67 | ||
|
|
b2b45ddeda | ||
|
|
904cb4af34 | ||
|
|
8d9ef4445a | ||
|
|
913d65106a | ||
|
|
851f01ddd2 | ||
|
|
34ca4e5623 | ||
|
|
ba9668623a | ||
|
|
73a37a281d | ||
|
|
46eb1727ad | ||
|
|
93341d6108 | ||
|
|
9662719660 | ||
|
|
a2e947ff70 | ||
|
|
55259b4445 | ||
|
|
d0a38816b5 | ||
|
|
523734a6af | ||
|
|
7a052e6f0c | ||
|
|
003e0e7a91 | ||
|
|
bc7640e4ac | ||
|
|
186d61aac4 | ||
|
|
159fdfe7ad | ||
|
|
26d393e5c2 | ||
|
|
a298f797b5 | ||
|
|
4236654b0c | ||
|
|
328b70ceb8 | ||
|
|
dba83674a2 | ||
|
|
25e503219f | ||
|
|
44af8ac629 | ||
|
|
6b6dcc46ca | ||
|
|
4aa841f44b | ||
|
|
28c1718965 | ||
|
|
557780bf08 | ||
|
|
d0cb11acb3 | ||
|
|
2306c34689 | ||
|
|
1e03654c0b | ||
|
|
7960525c20 | ||
|
|
f00dc426e0 | ||
|
|
428eca1c35 | ||
|
|
f1219ea001 | ||
|
|
0d08509e6d | ||
|
|
062ff7153d | ||
|
|
439beeab3b | ||
|
|
0b1d8ccd92 | ||
|
|
e6777577b6 | ||
|
|
095a93d895 | ||
|
|
c265ed2722 | ||
|
|
0f4523fabf | ||
|
|
80a944dd31 | ||
|
|
2889df8013 | ||
|
|
11b2bc33fe | ||
|
|
1beaccb9c9 | ||
|
|
9db91b4344 | ||
|
|
892ca98709 | ||
|
|
7268dd473d | ||
|
|
d6a6087ff9 | ||
|
|
c868f7e91f | ||
|
|
bab4cbbcf7 | ||
|
|
2ea1aee8ba | ||
|
|
4c7c1aa8b3 | ||
|
|
d31afdc749 | ||
|
|
44be46a436 | ||
|
|
55662efd4c | ||
|
|
e4fa0e8a39 | ||
|
|
e23be23a83 | ||
|
|
ef4388f414 | ||
|
|
1f083b57de | ||
|
|
b7d4f0b5ff | ||
|
|
9c2cf6b7ca | ||
|
|
0b50794614 | ||
|
|
fad12e3a13 | ||
|
|
777ef3fe90 | ||
|
|
cec9507504 | ||
|
|
6b7126ab6b | ||
|
|
bb6fab08a3 | ||
|
|
ee762d4bbd | ||
|
|
d271757eba | ||
|
|
3fb6b5594d | ||
|
|
88ffb32d4f | ||
|
|
9b7eb52772 | ||
|
|
1af9c43b5b | ||
|
|
b46c6e4aad | ||
|
|
52aa38e742 | ||
|
|
476dae5418 | ||
|
|
bc4b622e6b | ||
|
|
458779d897 | ||
|
|
b593adcf84 | ||
|
|
3b83d6639a | ||
|
|
f455f1b257 | ||
|
|
f5ccb51ef2 | ||
|
|
120b5297e3 | ||
|
|
5c108bfa1b | ||
|
|
710aaaa2c8 | ||
|
|
1925a439d7 | ||
|
|
50d6981695 | ||
|
|
c63c523089 | ||
|
|
e6631d1516 | ||
|
|
87b58e6bc2 | ||
|
|
84f9c49b6f | ||
|
|
2fe5c2ac83 | ||
|
|
21e3078853 | ||
|
|
64415872a0 | ||
|
|
0ee3b0e59b | ||
|
|
cfe1f1e8e4 | ||
|
|
14b0f9aa24 | ||
|
|
4dd265633e | ||
|
|
4c28e04e5e | ||
|
|
0b40331107 | ||
|
|
a08a158672 | ||
|
|
540bda1d09 | ||
|
|
97609746c0 | ||
|
|
68b924efe5 | ||
|
|
37487b2427 | ||
|
|
6a4b4edea3 | ||
|
|
ebbd42322d | ||
|
|
c07a4ff93d | ||
|
|
8091b84344 | ||
|
|
cba8914aae | ||
|
|
663257a690 | ||
|
|
6a9bfa74c4 | ||
|
|
4bbd486389 | ||
|
|
27d5eb4ef9 | ||
|
|
f1a9c4ef0e | ||
|
|
a2f4134ee0 | ||
|
|
2c7abc8bca | ||
|
|
f7312691e1 | ||
|
|
9cbe2faed6 | ||
|
|
dafd6acf1a | ||
|
|
2a9c97f96d | ||
|
|
573d35feff | ||
|
|
ab87f81ad8 | ||
|
|
19f96b1f6f | ||
|
|
05b4b875e2 | ||
|
|
ece93c45c9 | ||
|
|
ca72722b39 | ||
|
|
1b48b99ba7 | ||
|
|
52c0a37174 | ||
|
|
ff385ddf17 | ||
|
|
80e692656c | ||
|
|
6374be15fd | ||
|
|
f9185b02b5 | ||
|
|
418acbf218 | ||
|
|
9765623d03 | ||
|
|
af56a75dce | ||
|
|
325e7f3cce | ||
|
|
24465bee79 | ||
|
|
8576e8d51a | ||
|
|
8b7639fd3a | ||
|
|
713e368557 | ||
|
|
47c976ee91 | ||
|
|
645d2d852f | ||
|
|
7a958a1af1 | ||
|
|
1bb6c17fe2 | ||
|
|
4657f29df9 | ||
|
|
68d240b77b | ||
|
|
cd3ba08c7d | ||
|
|
bdcd3f9dc2 | ||
|
|
69122a5454 | ||
|
|
e8e06f2b71 | ||
|
|
684c637ac0 | ||
|
|
03832c9065 | ||
|
|
29451cf88c | ||
|
|
f347caf825 | ||
|
|
436df168f0 | ||
|
|
7ccedfb1df | ||
|
|
bd95197709 | ||
|
|
5897aebdf9 | ||
|
|
dd62e8ce92 | ||
|
|
eab3cb8efd | ||
|
|
57c9224b5c | ||
|
|
8c2b9905d1 | ||
|
|
bd5846d143 | ||
|
|
39994186df | ||
|
|
549737405b | ||
|
|
a5ade10bff | ||
|
|
28a26e6e81 | ||
|
|
7dc7f7815a | ||
|
|
e7ce2fcc8d | ||
|
|
25c457f578 | ||
|
|
7c7c7db345 | ||
|
|
acbf9f517b | ||
|
|
9cf683ea75 | ||
|
|
5cc0552b05 | ||
|
|
f44adb98cb | ||
|
|
f90771ee1a | ||
|
|
e7eeb86709 | ||
|
|
93927717c8 | ||
|
|
761e42c5d6 | ||
|
|
1ce3c7937b | ||
|
|
d3df5de0ce | ||
|
|
ca094f0e86 | ||
|
|
19d838ff49 | ||
|
|
6cd77dc6b8 | ||
|
|
769ee8ac54 | ||
|
|
28fa5077d5 | ||
|
|
f0cf325831 | ||
|
|
b8651bfd72 | ||
|
|
7e754e20ae | ||
|
|
2dda863baf | ||
|
|
10a3959f52 | ||
|
|
309577f3ef | ||
|
|
39756e6dc4 | ||
|
|
66d78bca8a | ||
|
|
bd42dfe474 | ||
|
|
50e3a9dee6 | ||
|
|
bd088d31ca | ||
|
|
d308946360 | ||
|
|
8a282030a1 | ||
|
|
de2a77adec | ||
|
|
87eab79f70 | ||
|
|
9827a4b758 | ||
|
|
18fa0d5057 | ||
|
|
c2b4756a4f | ||
|
|
8e22bc2ce5 | ||
|
|
2882ac4da8 | ||
|
|
b0d7a16f49 | ||
|
|
ae5b309de0 | ||
|
|
96156f148e | ||
|
|
5009f283d5 | ||
|
|
c695234ed1 | ||
|
|
d9e50ab6d5 | ||
|
|
644dc9f4a2 | ||
|
|
f1c9d5a8f4 | ||
|
|
9f3a0c0716 | ||
|
|
87472484d6 | ||
|
|
c7f498fd01 | ||
|
|
f44dcd7c02 | ||
|
|
e00c65072b | ||
|
|
6dbbd7d605 | ||
|
|
4213960ec3 | ||
|
|
054710e1a2 | ||
|
|
a9c9ecb5ea | ||
|
|
a2ca0e6012 | ||
|
|
6068eafeb6 | ||
|
|
f2ddf2af95 | ||
|
|
3118092e51 | ||
|
|
434d115fb3 | ||
|
|
d38264660e | ||
|
|
4cc58a221b | ||
|
|
e5043093eb | ||
|
|
b39db745d4 | ||
|
|
0e04b8e4d4 | ||
|
|
6f26f88bbd | ||
|
|
2f14dc7e5d | ||
|
|
7296bfd5ee | ||
|
|
bf7c96defb | ||
|
|
d503091b30 | ||
|
|
25612f8809 | ||
|
|
3e50a5da70 | ||
|
|
b63d4da048 | ||
|
|
b8e628958e | ||
|
|
d3ca35ac42 | ||
|
|
22359b427f | ||
|
|
a1bd84af5e | ||
|
|
bb0e968704 | ||
|
|
66865c8b63 | ||
|
|
7c920c305f | ||
|
|
aa8bb1f45d | ||
|
|
0cd34c1498 | ||
|
|
399e0e5e24 | ||
|
|
2fb9b6cf25 | ||
|
|
c6ae6b84d5 | ||
|
|
2620da1f4e | ||
|
|
11b06a2e5e | ||
|
|
f4770d065e | ||
|
|
42ee804464 | ||
|
|
c5c926f29c | ||
|
|
98c7f1181e | ||
|
|
1098f7d9f4 | ||
|
|
493094dc18 | ||
|
|
4067cf639b | ||
|
|
ccbf7af7f2 | ||
|
|
19ebd0aa68 | ||
|
|
8e1e60c187 | ||
|
|
133e9fed4a | ||
|
|
fba8a48d91 | ||
|
|
75699f82e9 | ||
|
|
7a2a4cc4a9 | ||
|
|
e131c0e5b9 | ||
|
|
05ce5e6e2f | ||
|
|
878e7ef49f | ||
|
|
1a9379b41d | ||
|
|
0da416b141 | ||
|
|
a825c84521 | ||
|
|
a8841e2e2b | ||
|
|
21ae187d02 | ||
|
|
6915c21092 | ||
|
|
37bbb0758c | ||
|
|
3816791aa4 | ||
|
|
534fefd82f | ||
|
|
c57c17546e | ||
|
|
33c0f8c721 | ||
|
|
dfaca1c42b | ||
|
|
6abffcb1df | ||
|
|
0489d945fb | ||
|
|
023431165a | ||
|
|
830012e2f4 | ||
|
|
0388568ea0 | ||
|
|
7965f9df6c | ||
|
|
81ef293361 | ||
|
|
b565fd2ec1 | ||
|
|
2f3124edf4 | ||
|
|
8cb55107d0 | ||
|
|
56e9d7b8e2 | ||
|
|
9524a5c860 | ||
|
|
42bbd7d47a | ||
|
|
b833451257 | ||
|
|
ea61aa1e1c | ||
|
|
8f4bb3f19c | ||
|
|
4704800ce3 | ||
|
|
2f915c2182 | ||
|
|
77f07ca0e3 | ||
|
|
41940687f1 | ||
|
|
bfea00f6dc | ||
|
|
44702c5cfd | ||
|
|
37c2d01260 | ||
|
|
ea91fabba0 | ||
|
|
a8d882dcc5 | ||
|
|
35b44af030 | ||
|
|
4127aad3d4 | ||
|
|
67d1a86d81 | ||
|
|
df8ab0d9a9 | ||
|
|
a93cdf8c86 | ||
|
|
ecf92e8fe4 | ||
|
|
9329092c32 | ||
|
|
94d4a7e3f0 | ||
|
|
48e583026f | ||
|
|
ce6aea7437 | ||
|
|
986f2c0941 | ||
|
|
1238c788e1 | ||
|
|
64b6b18a81 | ||
|
|
03932c7bdb | ||
|
|
8a4d915c8d | ||
|
|
86a6e5ee63 | ||
|
|
757a91a7d2 | ||
|
|
a09bd5a90d | ||
|
|
c70362cb57 | ||
|
|
768c7ba3dc | ||
|
|
e053e5a84e | ||
|
|
2857bfd8a0 | ||
|
|
4877dc4612 | ||
|
|
10a0c0164b | ||
|
|
a9f4b4883a | ||
|
|
f314367bc8 | ||
|
|
c866b9d768 | ||
|
|
6f96df6bbb | ||
|
|
7318cbae9b | ||
|
|
a36a141141 | ||
|
|
e2a0fd7b0b | ||
|
|
1cdab96e02 | ||
|
|
a3c8bd6b6f | ||
|
|
faa45cc606 | ||
|
|
ddc2af0691 | ||
|
|
381a27957a | ||
|
|
41799521c6 | ||
|
|
1617700ee0 | ||
|
|
839f3a4d2c | ||
|
|
2b99ddda02 | ||
|
|
c3a7adcb0d | ||
|
|
9efdd01e2b | ||
|
|
5cbdadc3e8 | ||
|
|
c5d4ea01f6 | ||
|
|
12537c2739 | ||
|
|
15f57eb911 | ||
|
|
3530b643b6 | ||
|
|
ddb588f7a6 | ||
|
|
68391301b0 | ||
|
|
276bed2d0b | ||
|
|
2027047220 | ||
|
|
00a49482f2 | ||
|
|
e60fcf93ba | ||
|
|
35110ef738 | ||
|
|
383d6d6071 | ||
|
|
2419e5594b | ||
|
|
5f416cce74 | ||
|
|
ceafa3803a | ||
|
|
39a4dee84f | ||
|
|
ec4877f10b | ||
|
|
89db868d02 | ||
|
|
e0a10fb9d6 | ||
|
|
1c05c60bd4 | ||
|
|
5501d42de0 | ||
|
|
f8c6690d6d | ||
|
|
33c470ebc5 | ||
|
|
c1ffa6e5d9 | ||
|
|
853d38b757 | ||
|
|
f1af6b9bf2 | ||
|
|
69710366b6 | ||
|
|
de58d6764f | ||
|
|
7192d5c4bb | ||
|
|
de3b9fc70d | ||
|
|
2385c28046 | ||
|
|
d3507eb42c | ||
|
|
78ab13a44e | ||
|
|
2daaef2a99 | ||
|
|
3adc332ea7 | ||
|
|
6941df940f | ||
|
|
20e82eba42 | ||
|
|
5754952660 | ||
|
|
7a1f674abc | ||
|
|
3874476d84 | ||
|
|
69c12aa9e3 | ||
|
|
e679997ecc | ||
|
|
5d3bc95283 | ||
|
|
ff96a750e1 | ||
|
|
067beb90c9 | ||
|
|
ca62c8931f | ||
|
|
100413fdff | ||
|
|
0362d42007 | ||
|
|
9b15beead7 | ||
|
|
beae7ffa20 | ||
|
|
663d95c388 | ||
|
|
9d91875832 | ||
|
|
748c8543b9 | ||
|
|
327be00e8f | ||
|
|
2f6e16f7ba | ||
|
|
68cccb77a0 | ||
|
|
a3b29a196c | ||
|
|
d7fd3a467a | ||
|
|
a144738094 | ||
|
|
e2d943fd1a | ||
|
|
ba8326285e | ||
|
|
d54eb93f26 | ||
|
|
351b88b6ca | ||
|
|
41b1dc2ddd | ||
|
|
4043ec3bbd | ||
|
|
2b1df7ba63 | ||
|
|
8d6de99a12 | ||
|
|
a5321179ac | ||
|
|
ffcab7e33b | ||
|
|
291d5fb0b5 | ||
|
|
c0c6170ba6 | ||
|
|
1cbe3c3fe5 | ||
|
|
0be16239a0 | ||
|
|
d0c9d8ce4e | ||
|
|
c2d59e4815 | ||
|
|
5327a4c622 | ||
|
|
d01d8b86b2 | ||
|
|
48d37c8522 | ||
|
|
495d6e9887 | ||
|
|
c904ed62fe | ||
|
|
3b2149f252 | ||
|
|
fd08314dcb | ||
|
|
d5c1f34016 | ||
|
|
ce3c969c08 | ||
|
|
34a375542e | ||
|
|
d97cf263c1 | ||
|
|
853b9d1001 | ||
|
|
90bff467a4 | ||
|
|
11e924b344 | ||
|
|
e6d4de301b | ||
|
|
baa7478ff0 | ||
|
|
db3ee977fa | ||
|
|
a64bd7e829 | ||
|
|
5cc5d4deff | ||
|
|
5be6d5bf26 | ||
|
|
e1ee95234e | ||
|
|
ddbb845e36 | ||
|
|
78b132580b | ||
|
|
5be3105d60 | ||
|
|
d1503e974b | ||
|
|
74e9834c42 | ||
|
|
bd7cd9281b | ||
|
|
50ff18ba25 | ||
|
|
b4e78b6131 | ||
|
|
be4ddab218 | ||
|
|
6e12fee5ec | ||
|
|
097c6463ad | ||
|
|
dbc95b39b7 | ||
|
|
3aa9d2e64c | ||
|
|
29fcf336c1 | ||
|
|
1270a9365b | ||
|
|
ba1d196a42 | ||
|
|
9ac29a95ce | ||
|
|
ba88d1822b | ||
|
|
991e1916bf | ||
|
|
51c3d77a92 | ||
|
|
0cbe083dcf | ||
|
|
105cead4da | ||
|
|
4e6722a171 | ||
|
|
05b4bff388 | ||
|
|
dd7ebbc461 | ||
|
|
813a66cafe | ||
|
|
e7e47c8906 | ||
|
|
2d0cac7c4d | ||
|
|
d3826c5c50 | ||
|
|
f107372464 | ||
|
|
c3fec00562 | ||
|
|
5d79d7a14a | ||
|
|
9503a5c07b | ||
|
|
c2e8200c81 | ||
|
|
d2be1e83ce | ||
|
|
a9dcf6650b | ||
|
|
16ef7e1950 | ||
|
|
d2378637fa | ||
|
|
6e4d886ea4 | ||
|
|
9007965a8e | ||
|
|
4fe8cde660 | ||
|
|
03903c59b6 | ||
|
|
031b5f3269 | ||
|
|
223a198bc4 | ||
|
|
c321819547 | ||
|
|
f3f10df361 | ||
|
|
022de2aa3e | ||
|
|
4d41e31639 | ||
|
|
f14a47caa0 | ||
|
|
1df3432130 | ||
|
|
59fcf4b8ca | ||
|
|
ee4364996d | ||
|
|
f6b60bb200 | ||
|
|
7d7b917523 | ||
|
|
45813bea16 | ||
|
|
b5903ab7f4 | ||
|
|
3d03855cd9 | ||
|
|
49aa43f8e4 | ||
|
|
b281b563e0 | ||
|
|
e5efbcf42f | ||
|
|
a589bc4562 | ||
|
|
22afd8b044 | ||
|
|
50852e406a | ||
|
|
7a5899a968 | ||
|
|
9b84670cf0 | ||
|
|
d243b587f4 | ||
|
|
0b68ad9b10 | ||
|
|
ff51fe3050 | ||
|
|
0daa203fe0 | ||
|
|
ca9a1a0ca1 | ||
|
|
f8c2b466a8 | ||
|
|
eb6a27653f | ||
|
|
c352ea7596 | ||
|
|
a1083d45c5 | ||
|
|
708efbd165 | ||
|
|
66a3680134 | ||
|
|
caf15abc4f | ||
|
|
54d6c4ebfd | ||
|
|
05eac58c65 | ||
|
|
d39c70d9f2 | ||
|
|
dc65f459ec | ||
|
|
b0a1988c29 | ||
|
|
bcd8e13c24 | ||
|
|
fc73bdcc18 | ||
|
|
5ba76f28ce | ||
|
|
90b5d98e5c | ||
|
|
50782b9465 | ||
|
|
a7a607e440 | ||
|
|
da70c11da5 | ||
|
|
2e719e5bc6 | ||
|
|
1f2a7801fd | ||
|
|
0121e5c22b | ||
|
|
08574428f1 | ||
|
|
c4a1dfc4e8 | ||
|
|
438d41c986 | ||
|
|
d2595944fc | ||
|
|
4d83e696f7 | ||
|
|
f1c7286448 | ||
|
|
6bd5679429 | ||
|
|
043523b6d2 | ||
|
|
4e077e2b95 | ||
|
|
856d1a8047 | ||
|
|
74a1dbfe6c | ||
|
|
9254bcaf16 | ||
|
|
1944c7fbd7 | ||
|
|
3957797815 | ||
|
|
66a8170bbc | ||
|
|
f495615435 | ||
|
|
ba1ed5b6fd | ||
|
|
cebc9326cd | ||
|
|
b5c7753291 | ||
|
|
d64e804bf6 | ||
|
|
da18033a46 | ||
|
|
d0c8366bb4 | ||
|
|
bd0d911376 | ||
|
|
7ac2376a01 | ||
|
|
1edc688acb | ||
|
|
d48c1bd71d | ||
|
|
67c25570f3 | ||
|
|
116a47fc6a | ||
|
|
6915e95f58 | ||
|
|
07d9d4bca6 | ||
|
|
cfc83f5a23 | ||
|
|
dfc4070dba | ||
|
|
ad4b1650e3 | ||
|
|
1df47a2ddd | ||
|
|
1b6e4af9a7 | ||
|
|
2908a0c2bd | ||
|
|
2bccb5e753 | ||
|
|
ee06dab4d6 | ||
|
|
d615e2e9ff | ||
|
|
01c3f62ed7 | ||
|
|
4bc26310ac | ||
|
|
b5a34e45d2 |
16
.github/BOTMETA.yml
vendored
Normal file
16
.github/BOTMETA.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
files:
|
||||
awx/ui/:
|
||||
labels: component:ui
|
||||
maintainers: $team_ui
|
||||
awx/api/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
awx/main/:
|
||||
labels: component:api
|
||||
maintainers: $team_api
|
||||
installer/:
|
||||
labels: component:installer
|
||||
|
||||
macros:
|
||||
team_api: wwitzel3 matburt chrismeyersfsu cchurch AlanCoding ryanpetrello jangstur
|
||||
team_ui: jlmitch5 jaredevantabor mabashian gconsidine marshmalien benthomasson
|
||||
25
ISSUE_TEMPLATE.md → .github/ISSUE_TEMPLATE.md
vendored
25
ISSUE_TEMPLATE.md → .github/ISSUE_TEMPLATE.md
vendored
@@ -1,31 +1,42 @@
|
||||
### Summary
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Bug Report
|
||||
- Feature Idea
|
||||
- Documentation
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
### Environment
|
||||
|
||||
##### ENVIRONMENT
|
||||
<!--
|
||||
* AWX version: X.Y.Z
|
||||
* AWX install method: openshift, minishift, docker on linux, docker for mac, boot2docker
|
||||
* Ansible version: X.Y.Z
|
||||
* Operating System:
|
||||
* Web Browser:
|
||||
-->
|
||||
|
||||
### Steps To Reproduce:
|
||||
##### STEPS TO REPRODUCE
|
||||
|
||||
<!-- For bugs, please show exactly how to reproduce the problem. For new
|
||||
features, show how the feature would be used. -->
|
||||
|
||||
### Expected Results:
|
||||
##### EXPECTED RESULTS
|
||||
|
||||
<!-- For bug reports, what did you expect to happen when running the steps
|
||||
above? -->
|
||||
|
||||
### Actual Results:
|
||||
##### ACTUAL RESULTS
|
||||
|
||||
<!-- For bug reports, what actually happened? -->
|
||||
|
||||
### Additional Information:
|
||||
##### ADDITIONAL INFORMATION
|
||||
|
||||
<!-- Include any links to sosreport, database dumps, screenshots or other
|
||||
information. -->
|
||||
9
.github/LABEL_MAP.md
vendored
Normal file
9
.github/LABEL_MAP.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
Bug Report: type:bug
|
||||
Bugfix Pull Request: type:bug
|
||||
Feature Request: type:enhancement
|
||||
Feature Pull Request: type:enhancement
|
||||
UI: component:ui
|
||||
API: component:api
|
||||
Installer: component:installer
|
||||
Docs Pull Request: component:docs
|
||||
Documentation: component:docs
|
||||
39
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
39
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
##### SUMMARY
|
||||
<!--- Describe the change, including rationale and design decisions -->
|
||||
|
||||
<!---
|
||||
If you are fixing an existing issue, please include "related #nnn" in your
|
||||
commit message and your description; but you should still explain what
|
||||
the change does.
|
||||
-->
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Feature Pull Request
|
||||
- Bugfix Pull Request
|
||||
- Docs Pull Request
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!--- Name of the module/plugin/module/task -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
<!---
|
||||
Include additional information to help people understand the change here.
|
||||
For bugs that don't have a linked bug report, a step-by-step reproduction
|
||||
of the problem is helpful.
|
||||
-->
|
||||
|
||||
<!--- Paste verbatim command output below, e.g. before and after your change -->
|
||||
```
|
||||
|
||||
```
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -20,6 +20,8 @@ celerybeat-schedule
|
||||
awx/ui/static
|
||||
awx/ui/build_test
|
||||
awx/ui/client/languages
|
||||
awx/ui/templates/ui/index.html
|
||||
awx/ui/templates/ui/installing.html
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
@@ -108,9 +110,12 @@ local/
|
||||
requirements/vendor
|
||||
.i18n_built
|
||||
VERSION
|
||||
.idea/*
|
||||
|
||||
# AWX python libs populated by requirements.txt
|
||||
awx/lib/.deps_built
|
||||
awx/lib/site-packages
|
||||
venv/*
|
||||
use_dev_supervisor.txt
|
||||
|
||||
.idea/*
|
||||
|
||||
421
CONTRIBUTING.md
421
CONTRIBUTING.md
@@ -1,233 +1,301 @@
|
||||
|
||||
AWX
|
||||
===========
|
||||
# AWX
|
||||
|
||||
Hi there! We're excited to have you as a contributor.
|
||||
|
||||
Have questions about this document or anything not covered here? Come chat with us on IRC (#ansible-awx on freenode) or the mailing list.
|
||||
Have questions about this document or anything not covered here? Come chat with us at `#ansible-awx` on irc.freenode.net, or submit your question to the [mailing list](https://groups.google.com/forum/#!forum/awx-project) .
|
||||
|
||||
Table of contents
|
||||
-----------------
|
||||
## Table of contents
|
||||
|
||||
* [Contributing Agreement](#dco)
|
||||
* [Code of Conduct](#code-of-conduct)
|
||||
* [Setting up the development environment](#setting-up-the-development-environment)
|
||||
* [Things to know prior to submitting code](#things-to-know-before-contributing-code)
|
||||
* [Setting up your development environment](#setting-up-your-development-environment)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Local Settings](#local-settings)
|
||||
* [Building the base image](#building-the-base-image)
|
||||
* [Building the user interface](#building-the-user-interface)
|
||||
* [Starting up the development environment](#starting-up-the-development-environment)
|
||||
* [Starting the development environment at the container shell](#starting-the-container-environment-at-the-container-shell)
|
||||
* [Using the development environment](#using-the-development-environment)
|
||||
* [Docker](#docker)
|
||||
* [Docker compose](#docker-compose)
|
||||
* [Node and npm](#node-and-npm)
|
||||
* [Building the environment](#building-the-environment)
|
||||
* [Clone the AWX repo](#clone-the-awx-repo)
|
||||
* [Create local settings](#create-local-settings)
|
||||
* [Build the base image](#build-the-base-image)
|
||||
* [Build the user interface](#build-the-user-interface)
|
||||
# [Running the environment](#running-the-environment)
|
||||
* [Start the containers](#start-the-containers)
|
||||
* [Start from the container shell](#start-from-the-container-shell)
|
||||
* [Post Build Steps](#post-build-steps)
|
||||
* [Start a shell](#start-the-shell)
|
||||
* [Create a superuser](#create-a-superuser)
|
||||
* [Load the data](#load-the-data)
|
||||
* [Accessing the AWX web interface](#accessing-the-awx-web-interface)
|
||||
* [Purging containers and images](#purging-containers-and-images)
|
||||
* [What should I work on?](#what-should-i-work-on)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [How issues are resolved](#how-issues-are-resolved)
|
||||
* [Ansible Issue Bot](#ansible-issue-bot)
|
||||
|
||||
DCO
|
||||
===
|
||||
## Things to know prior to submitting code
|
||||
|
||||
All contributors must use "git commit --signoff" for any
|
||||
commit to be merged, and agree that usage of --signoff constitutes
|
||||
agreement with the terms of DCO 1.1. Any contribution that does not
|
||||
have such a signoff will not be merged.
|
||||
- All code submissions are done through pull requests against the `devel` branch.
|
||||
- You must use `git commit --signoff` for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md).
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.freenode.net, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
## Setting up your development environment
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
The AWX development environment workflow and toolchain is based on Docker, and the docker-compose tool, to provide dependencies, services, and databases necessary to run all of the components. It also binds the local source tree into the development container, making it possible to observe and test changes in real time.
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
### Prerequisites
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
#### Docker
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
Prior to starting the development services, you'll need `docker` and `docker-compose`. On Linux, you can generally find these in your distro's packaging, but you may find that Docker themselves maintain a separate repo that tracks more closely to the latest releases.
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
For macOS and Windows, we recommend [Docker for Mac](https://www.docker.com/docker-mac) and [Docker for Windows](https://www.docker.com/docker-windows)
|
||||
respectively.
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
For Linux platforms, refer to the following from Docker:
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
**Fedora**
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
|
||||
|
||||
**Centos**
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/centos/
|
||||
|
||||
**Ubuntu**
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
|
||||
|
||||
**Debian**
|
||||
|
||||
> https://docs.docker.com/engine/installation/linux/docker-ce/debian/
|
||||
|
||||
**Arch**
|
||||
|
||||
> https://wiki.archlinux.org/index.php/Docker
|
||||
|
||||
#### Docker compose
|
||||
|
||||
If you're not using Docker for Mac, or Docker for Windows, you may need, or choose to, install the Docker compose Python module separately, in which case you'll need to run the following:
|
||||
|
||||
```bash
|
||||
(host)$ pip install docker-compose
|
||||
```
|
||||
|
||||
Code of Conduct
|
||||
===============
|
||||
#### Node and npm
|
||||
|
||||
All contributors are expected to adhere to the Ansible Community Code of Conduct: http://docs.ansible.com/ansible/latest/community/code_of_conduct.html
|
||||
The AWX UI requires the following:
|
||||
|
||||
Setting up the development environment
|
||||
======================================
|
||||
- Node 6.x LTS version
|
||||
- NPM 3.x LTS
|
||||
|
||||
The AWX development environment workflow and toolchain is based on Docker and the docker-compose tool to contain
|
||||
the dependencies, services, and databases necessary to run everything. It will bind the local source tree into the container
|
||||
making it possible to observe changes while developing.
|
||||
### Build the environment
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
`docker` and `docker-compose` are required for starting the development services, on Linux you can generally find these in your
|
||||
distro's packaging, but you may find that Docker themselves maintain a seperate repo that tracks more closely to the latest releases.
|
||||
For macOS and Windows, we recommend Docker for Mac (https://www.docker.com/docker-mac) and Docker for Windows (https://www.docker.com/docker-windows)
|
||||
respectively. Docker for Mac/Windows automatically comes with `docker-compose`.
|
||||
#### Fork and clone the AWX repo
|
||||
|
||||
> Fedora
|
||||
If you have not done so already, you'll need to fork the AWX repo on GitHub. For more on how to do this, see [Fork a Repo](https://help.github.com/articles/fork-a-repo/).
|
||||
|
||||
https://docs.docker.com/engine/installation/linux/docker-ce/fedora/
|
||||
#### Create local settings
|
||||
|
||||
> Centos
|
||||
AWX will import the file `awx/settings/local_settings.py` and combine it with defaults in `awx/settings/defaults.py`. This file is required for starting the development environment and startup will fail if it's not provided.
|
||||
|
||||
https://docs.docker.com/engine/installation/linux/docker-ce/centos/
|
||||
An example is provided. Make a copy of it, and edit as needed (the defaults are usually fine):
|
||||
|
||||
> Ubuntu
|
||||
```bash
|
||||
(host)$ cp awx/settings/local_settings.py.docker_compose awx/settings/local_settings.py
|
||||
```
|
||||
|
||||
https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
|
||||
#### Build the base image
|
||||
|
||||
> Debian
|
||||
The AWX base container image (defined in `tools/docker-compose/Dockerfile`) contains basic OS dependencies and symbolic links into the development environment that make running the services easy.
|
||||
|
||||
https://docs.docker.com/engine/installation/linux/docker-ce/debian/
|
||||
Run the following to build the image:
|
||||
|
||||
> Arch
|
||||
```bash
|
||||
(host)$ make docker-compose-build
|
||||
```
|
||||
|
||||
https://wiki.archlinux.org/index.php/Docker
|
||||
**NOTE**
|
||||
|
||||
For `docker-compose` you may need/choose to install it seperately:
|
||||
> The image will need to be rebuilt, if the Python requirements or OS dependencies change.
|
||||
|
||||
pip install docker-compose
|
||||
Once the build completes, you will have a `ansible/awx_devel` image in your local image cache. Use the `docker images` command to view it, as follows:
|
||||
|
||||
```bash
|
||||
(host)$ docker images
|
||||
|
||||
Local Settings
|
||||
--------------
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
ansible/awx_devel latest ba9ec3e8df74 26 minutes ago 1.42GB
|
||||
```
|
||||
|
||||
In development mode (i.e. when running from a source checkout), AWX
|
||||
will import the file `awx/settings/local_settings.py` and combine it with defaults in `awx/settings/defaults.py`. This file
|
||||
is required for starting the development environment and startup will fail if it's not provided
|
||||
#### Build the user interface
|
||||
|
||||
An example file that works for the `docker-compose` tool is provided. Make a copy of it and edit as needed (the defaults are usually fine):
|
||||
Run the following to build the AWX UI:
|
||||
|
||||
(host)$ cp awx/settings/local_settings.py.docker_compose awx/settings/local_settings.py
|
||||
```bash
|
||||
(host) $ make ui-devel
|
||||
```
|
||||
### Running the environment
|
||||
|
||||
Building the base image
|
||||
-----------------------
|
||||
#### Start the containers
|
||||
|
||||
The AWX base container image (found in `tools/docker-compose/Dockerfile`) contains basic OS dependencies and
|
||||
symbolic links into the development environment that make running the services easy. You'll first need to build the image:
|
||||
Start the development containers by running the following:
|
||||
|
||||
(host)$ make docker-compose-build
|
||||
|
||||
The image will only need to be rebuilt if the requirements or OS dependencies change. A core concept about this image is that it relies
|
||||
on having your local development environment mapped in.
|
||||
|
||||
Building the user interface
|
||||
---------------------------
|
||||
|
||||
> AWX requires the 6.x LTS version of Node and 3.x LTS NPM
|
||||
|
||||
In order for the AWX user interface to load from the development environment it must be built:
|
||||
|
||||
(host)$ make ui-devel
|
||||
```bash
|
||||
(host)$ make docker-compose
|
||||
```
|
||||
|
||||
When developing features and fixes for the user interface you can find more detail here: [UI Developer README](awx/ui/README.md)
|
||||
The above utilizes the image built in the previous step, and will automatically start all required services and dependent containers. Once the containers launch, your session will be attached to the *awx* container, and you'll be able to watch log messages and events in real time. You will see messages from Django, celery, and the front end build process.
|
||||
|
||||
Starting up the development environment
|
||||
----------------------------------------------
|
||||
If you start a second terminal session, you can take a look at the running containers using the `docker ps` command. For example:
|
||||
|
||||
There are several ways of starting the development environment depending on your desired workflow. The easiest and most common way is with:
|
||||
```bash
|
||||
# List running containers
|
||||
(host)$ docker ps
|
||||
|
||||
(host)$ make docker-compose
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
aa4a75d6d77b gcr.io/ansible-tower-engineering/awx_devel:devel "/tini -- /bin/sh ..." 23 seconds ago Up 15 seconds 0.0.0.0:5555->5555/tcp, 0.0.0.0:6899-6999->6899-6999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 22/tcp, 0.0.0.0:8080->8080/tcp tools_awx_1
|
||||
e4c0afeb548c postgres:9.6 "docker-entrypoint..." 26 seconds ago Up 23 seconds 5432/tcp tools_postgres_1
|
||||
0089699d5afd tools_logstash "/docker-entrypoin..." 26 seconds ago Up 25 seconds tools_logstash_1
|
||||
4d4ff0ced266 memcached:alpine "docker-entrypoint..." 26 seconds ago Up 25 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
92842acd64cd rabbitmq:3-management "docker-entrypoint..." 26 seconds ago Up 24 seconds 4369/tcp, 5671-5672/tcp, 15671/tcp, 25672/tcp, 0.0.0.0:15672->15672/tcp tools_rabbitmq_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
> The Makefile assumes that the image you built is tagged with your current branch. This allows you to build images for different contexts or branches. When starting the containers, you can choose a specific branch by setting `COMPOSE_TAG=<branch name>` in your environment.
|
||||
|
||||
> For example, you might be working in a feature branch, but you want to run the containers using the `devel` image you built previously. To do that, start the containers using the following command: `$ COMPOSE_TAG=devel make docker-compose`
|
||||
|
||||
##### Wait for migrations to complete
|
||||
|
||||
The first time you start the environment, database migrations need to run in order to build the PostgreSQL database. It will take few moments, but eventually you will see output in your terminal session that looks like the following:
|
||||
|
||||
```bash
|
||||
awx_1 | Operations to perform:
|
||||
awx_1 | Synchronize unmigrated apps: solo, api, staticfiles, debug_toolbar, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
awx_1 | Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
awx_1 | Synchronizing apps without migrations:
|
||||
awx_1 | Creating tables...
|
||||
awx_1 | Running deferred SQL...
|
||||
awx_1 | Installing custom SQL...
|
||||
awx_1 | Running migrations:
|
||||
awx_1 | Rendering model states... DONE
|
||||
awx_1 | Applying contenttypes.0001_initial... OK
|
||||
awx_1 | Applying contenttypes.0002_remove_content_type_name... OK
|
||||
awx_1 | Applying auth.0001_initial... OK
|
||||
awx_1 | Applying auth.0002_alter_permission_name_max_length... OK
|
||||
awx_1 | Applying auth.0003_alter_user_email_max_length... OK
|
||||
awx_1 | Applying auth.0004_alter_user_username_opts... OK
|
||||
awx_1 | Applying auth.0005_alter_user_last_login_null... OK
|
||||
awx_1 | Applying auth.0006_require_contenttypes_0002... OK
|
||||
awx_1 | Applying taggit.0001_initial... OK
|
||||
awx_1 | Applying taggit.0002_auto_20150616_2121... OK
|
||||
awx_1 | Applying main.0001_initial... OK
|
||||
awx_1 | Applying main.0002_squashed_v300_release... OK
|
||||
awx_1 | Applying main.0003_squashed_v300_v303_updates... OK
|
||||
awx_1 | Applying main.0004_squashed_v310_release... OK
|
||||
awx_1 | Applying conf.0001_initial... OK
|
||||
awx_1 | Applying conf.0002_v310_copy_tower_settings... OK
|
||||
...
|
||||
```
|
||||
|
||||
Once migrations are completed, you can begin using AWX.
|
||||
|
||||
#### Start from the container shell
|
||||
|
||||
Often times you'll want to start the development environment without immediately starting all of the services in the *awx* container, and instead be taken directly to a shell. You can do this with the following:
|
||||
|
||||
```bash
|
||||
(host)$ make docker-compose-test
|
||||
```
|
||||
|
||||
Using `docker exec`, this will create a session in the running *awx* container, and place you at a command prompt, where you can run shell commands inside the container.
|
||||
|
||||
If you want to start and use the development environment, you'll first need to bootstrap it by running the following command:
|
||||
|
||||
```bash
|
||||
(container)# /bootstrap_development.sh
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it amy take a couple minutes.
|
||||
|
||||
This utilizes the image you built in the previous step and will automatically start all required services and dependent containers. You'll
|
||||
be able to watch log messages and events as they come through.
|
||||
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
|
||||
|
||||
The Makefile assumes that the image you built is tagged with your current branch. This allows you to pre-build images for different contexts
|
||||
but you may want to use a particular branch's image (for instance if you are developing a PR from a branch based on the integration branch):
|
||||
```bash
|
||||
(container)# cd /awx_devel
|
||||
(container)# make server
|
||||
```
|
||||
|
||||
(host)$ COMPOSE_TAG=devel make docker-compose
|
||||
### Post Build Steps
|
||||
|
||||
Starting the development environment at the container shell
|
||||
-----------------------------------------------------------
|
||||
Before you can log in and use the system, you will need to create an admin user. Optionally, you may also want to load some demo data.
|
||||
|
||||
Often times you'll want to start the development environment without immediately starting all services and instead be taken directly to a shell:
|
||||
##### Start a shell
|
||||
|
||||
(host)$ make docker-compose-test
|
||||
To create the admin user, and load demo data, you first need to start a shell session on the *awx* container. In a new terminal session, use the `docker exec` command as follows to start the shell session:
|
||||
|
||||
From here you'll need to bootstrap the development environment before it will be usable for you. The `docker-compose` make target will
|
||||
automatically do this:
|
||||
```bash
|
||||
(host)$ docker exec -it tools_awx_1 bash
|
||||
```
|
||||
This creates a session in the *awx* containers, just as if you were using `ssh`, and allows you execute commands within the running container.
|
||||
|
||||
(container)$ /bootstrap_development.sh
|
||||
##### Create an admin user
|
||||
|
||||
Before you can log into AWX, you need to create an admin user. With this user you will be able to create more users, and begin configuring the server. From within the container shell, run the following command:
|
||||
|
||||
```bash
|
||||
(container)# awx-manage createsuperuser
|
||||
```
|
||||
You will be prompted for a username, an email address, and a password, and you will be asked to confirm the password. The email address is not important, so just enter something that looks like an email address. Remember the username and password, as you will use them to log into the web interface for the first time.
|
||||
|
||||
From here you can start each service individually, or choose to start all service in a pre-configured tmux session:
|
||||
##### Load demo data
|
||||
|
||||
You can optionally load some demo data. This will create a demo project, inventory, and job template. From within the container shell, run the following to load the data:
|
||||
|
||||
(container)# cd /awx_devel
|
||||
(container)# make server
|
||||
```bash
|
||||
(container)# awx-manage create_preload_data
|
||||
```
|
||||
|
||||
Using the development environment
|
||||
---------------------------------
|
||||
**NOTE**
|
||||
|
||||
With the development environment running there are a few optional steps to pre-populate the environment with data. If you are using the `docker-compose`
|
||||
method above you'll first need a shell in the container:
|
||||
> This information will persist in the database running in the `tools_postgres_1` container, until the container is removed. You may periodically need to recreate
|
||||
this container, and thus the database, if the database schema changes in an upstream commit.
|
||||
|
||||
(host)$ docker exec -it tools_awx_1 bash
|
||||
### Accessing the AWX web interface
|
||||
|
||||
Create a superuser account:
|
||||
You can now log into the AWX web interface at [https://localhost:8043](https://localhost:8043), and access the API directly at [https://localhost:8043/api/](https://localhost:8043/api/).
|
||||
|
||||
(container)# awx-manage createsuperuser
|
||||
|
||||
Preload AWX with demo data:
|
||||
To log in use the admin user and password you created above in [Create an admin user](#create-an-admin-user).
|
||||
|
||||
(container)# awx-manage create_preload_data
|
||||
|
||||
This information will persist in the database running in the `tools_postgres_1` container, until it is removed. You may periodically need to recreate
|
||||
this container and database if the database schema changes in an upstream commit.
|
||||
### Purging containers and images
|
||||
|
||||
You should now be able to visit and login to the AWX user interface at https://localhost:8043 or http://localhost:8013 if you have built the UI.
|
||||
If not you can visit the API directly in your browser at: https://localhost:8043/api/ or http://localhost:8013/api/
|
||||
When necessary, remove any AWX containers and images by running the following:
|
||||
|
||||
When working on the source code for AWX the code will auto-reload for you when changes are made, with the exception of any background tasks that run in
|
||||
celery.
|
||||
```bash
|
||||
(host)$ make docker-clean
|
||||
```
|
||||
|
||||
## What should I work on?
|
||||
|
||||
Occasionally it may be necessary to purge any containers and images that may have collected:
|
||||
For feature work, take a look at the current [Enhancements](https://github.com/ansible/awx/issues?q=is%3Aissue+is%3Aopen+label%3Atype%3Aenhancement).
|
||||
|
||||
(host)$ make docker-clean
|
||||
|
||||
There are host of other shortcuts, tools, and container configurations in the Makefile designed for various purposes. Feel free to explore.
|
||||
|
||||
What should I work on?
|
||||
======================
|
||||
If it has someone assigned to it then that person is the person responsible for working the enhancement. If you feel like you could contribute then reach out to that person.
|
||||
|
||||
We list our specs in `/docs`. `/docs/current` are things that we are actively working on. `/docs/future` are ideas for future work and the direction we
|
||||
want that work to take. Fixing bugs, translations, and updates to documentation are also appreciated.
|
||||
Fixing bugs, adding translations, and updating the documentation are always appreciated, so reviewing the backlog of issues is always a good place to start.
|
||||
|
||||
Be aware that if you are working in a part of the codebase that is going through active development your changes may be rejected or you may be asked to
|
||||
rebase them. A good idea before starting work is to have a discussion with us on IRC or the mailing list.
|
||||
**NOTE**
|
||||
|
||||
Submitting Pull Requests
|
||||
========================
|
||||
> If you work in a part of the codebase that is going through active development, your changes may be rejected, or you may be asked to `rebase`. A good idea before starting work is to have a discussion with us in the `#ansible-awx` channel on irc.freenode.net, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
Fixes and Features for AWX will go through the Github PR interface. There are a few things that can be done to help the visibility of your change
|
||||
and increase the likelihood that it will be accepted
|
||||
**NOTE**
|
||||
|
||||
> Add UI detail to these
|
||||
> If you're planning to develop features or fixes for the UI, please review the [UI Developer doc](./awx/ui/README.md).
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
Fixes and Features for AWX will go through the Github pull request process. Submit your pull request (PR) agains the `devel` branch.
|
||||
|
||||
Here are a few things you can do to help the visibility of your change, and increase the likelihood that it will be accepted:
|
||||
|
||||
* No issues when running linters/code checkers
|
||||
* Python: flake8: `(container)/awx_devel$ make flake8`
|
||||
@@ -237,44 +305,17 @@ and increase the likelihood that it will be accepted
|
||||
* JavaScript: Jasmine: `(container)/awx_devel$ make ui-test-ci`
|
||||
* Write tests for new functionality, update/add tests for bug fixes
|
||||
* Make the smallest change possible
|
||||
* Write good commit messages: https://chris.beams.io/posts/git-commit/
|
||||
* Write good commit messages. See [How to write a Git commit message](https://chris.beams.io/posts/git-commit/).
|
||||
|
||||
It's generally a good idea to discuss features with us first by engaging us in IRC or on the mailing list, especially if you are unsure if it's a good
|
||||
fit.
|
||||
It's generally a good idea to discuss features with us first by engaging us in the `#ansible-awx` channel on irc.freenode.net, or on the [mailing list](https://groups.google.com/forum/#!forum/awx-project).
|
||||
|
||||
We like to keep our commit history clean and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase` rather than
|
||||
`git pull` and `git rebase` rather than `git merge`.
|
||||
We like to keep our commit history clean, and will require resubmission of pull requests that contain merge commits. Use `git pull --rebase`, rather than
|
||||
`git pull`, and `git rebase`, rather than `git merge`.
|
||||
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in pretty good working order so we review requests carefuly.
|
||||
Please be patient.
|
||||
Sometimes it might take us a while to fully review your PR. We try to keep the `devel` branch in good working order, and so we review requests carefuly. Please be patient.
|
||||
|
||||
All submitted PRs will have the linter and unit tests run against them and the status reported in the PR.
|
||||
All submitted PRs will have the linter and unit tests run against them, and the status reported in the PR.
|
||||
|
||||
Reporting Issues
|
||||
================
|
||||
## Reporting Issues
|
||||
|
||||
Use the Github issue tracker for filing bugs. In order to save time and help us respond to issues quickly, make sure to fill out as much of the issue template
|
||||
as possible. Version information and an accurate reproducing scenario are critical to helping us identify the problem.
|
||||
|
||||
When reporting issues for the UI we also appreciate having screenshots and any error messages from the web browser's console. It's not unsual for browser extensions
|
||||
and plugins to cause problems. Reporting those will also help speed up analyzing and resolving UI bugs.
|
||||
|
||||
For the API and backend services, please capture all of the logs that you can from the time the problem was occuring.
|
||||
|
||||
Don't use the issue tracker to get help on how to do something - please use the mailing list and IRC for that.
|
||||
|
||||
How issues are resolved
|
||||
-----------------------
|
||||
|
||||
We triage our issues into high, medium, and low and will tag them with the relevant component (api, ui, installer, etc). We will typically focus on high priority
|
||||
issues. There aren't hard and fast rules for determining the severity of an issue, but generally high priority issues have an increased likelihood of breaking
|
||||
existing functionality and/or negatively impacting a large number of users.
|
||||
|
||||
If your issue isn't considered `high` priority then please be patient as it may take some time to get to your report.
|
||||
|
||||
Before opening a new issue, please use the issue search feature to see if it's already been reported. If you have any extra detail to provide then please comment.
|
||||
Rather than posting a "me too" comment you might consider giving it a "thumbs up" on github.
|
||||
|
||||
Ansible Issue Bot
|
||||
-----------------
|
||||
> Fill in
|
||||
We welcome your feedback, and encourage you to file an issue when you run into a problem. But before opening a new issues, we ask that you please view our [Issues guide](./ISSUES.md).
|
||||
|
||||
19
COPYING
19
COPYING
@@ -1,19 +0,0 @@
|
||||
ANSIBLE TOWER BY RED HAT END USER LICENSE AGREEMENT
|
||||
|
||||
This end user license agreement (“EULA”) governs the use of the Ansible Tower software and any related updates, upgrades, versions, appearance, structure and organization (the “Ansible Tower Software”), regardless of the delivery mechanism.
|
||||
|
||||
1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. and its affiliates (“Red Hat”) grant to you (“You”) a non-transferable, non-exclusive, worldwide, non-sublicensable, limited, revocable license to use the Ansible Tower Software for the term of the associated Red Hat Software Subscription(s) and in a quantity equal to the number of Red Hat Software Subscriptions purchased from Red Hat for the Ansible Tower Software (“License”), each as set forth on the applicable Red Hat ordering document. You acquire only the right to use the Ansible Tower Software and do not acquire any rights of ownership. Red Hat reserves all rights to the Ansible Tower Software not expressly granted to You. This License grant pertains solely to Your use of the Ansible Tower Software and is not intended to limit Your rights under, or grant You rights that supersede, the license terms of any software packages which may be made available with the Ansible Tower Software that are subject to an open source software license.
|
||||
|
||||
2. Intellectual Property Rights. Title to the Ansible Tower Software and each component, copy and modification, including all derivative works whether made by Red Hat, You or on Red Hat's behalf, including those made at Your suggestion and all associated intellectual property rights, are and shall remain the sole and exclusive property of Red Hat and/or it licensors. The License does not authorize You (nor may You allow any third party, specifically non-employees of Yours) to: (a) copy, distribute, reproduce, use or allow third party access to the Ansible Tower Software except as expressly authorized hereunder; (b) decompile, disassemble, reverse engineer, translate, modify, convert or apply any procedure or process to the Ansible Tower Software in order to ascertain, derive, and/or appropriate for any reason or purpose, including the Ansible Tower Software source code or source listings or any trade secret information or process contained in the Ansible Tower Software (except as permitted under applicable law); (c) execute or incorporate other software (except for approved software as appears in the Ansible Tower Software documentation or specifically approved by Red Hat in writing) into Ansible Tower Software, or create a derivative work of any part of the Ansible Tower Software; (d) remove any trademarks, trade names or titles, copyrights legends or any other proprietary marking on the Ansible Tower Software; (e) disclose the results of any benchmarking of the Ansible Tower Software (whether or not obtained with Red Hat’s assistance) to any third party; (f) attempt to circumvent any user limits or other license, timing or use restrictions that are built into, defined or agreed upon, regarding the Ansible Tower Software. You are hereby notified that the Ansible Tower Software may contain time-out devices, counter devices, and/or other devices intended to ensure the limits of the License will not be exceeded (“Limiting Devices”). If the Ansible Tower Software contains Limiting Devices, Red Hat will provide You materials necessary to use the Ansible Tower Software to the extent permitted. You may not tamper with or otherwise take any action to defeat or circumvent a Limiting Device or other control measure, including but not limited to, resetting the unit amount or using false host identification number for the purpose of extending any term of the License.
|
||||
|
||||
3. Evaluation Licenses. Unless You have purchased Ansible Tower Software Subscriptions from Red Hat or an authorized reseller under the terms of a commercial agreement with Red Hat, all use of the Ansible Tower Software shall be limited to testing purposes and not for production use (“Evaluation”). Unless otherwise agreed by Red Hat, Evaluation of the Ansible Tower Software shall be limited to an evaluation environment and the Ansible Tower Software shall not be used to manage any systems or virtual machines on networks being used in the operation of Your business or any other non-evaluation purpose. Unless otherwise agreed by Red Hat, You shall limit all Evaluation use to a single 30 day evaluation period and shall not download or otherwise obtain additional copies of the Ansible Tower Software or license keys for Evaluation.
|
||||
|
||||
4. Limited Warranty. Except as specifically stated in this Section 4, to the maximum extent permitted under applicable law, the Ansible Tower Software and the components are provided and licensed “as is” without warranty of any kind, expressed or implied, including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. Red Hat warrants solely to You that the media on which the Ansible Tower Software may be furnished will be free from defects in materials and manufacture under normal use for a period of thirty (30) days from the date of delivery to You. Red Hat does not warrant that the functions contained in the Ansible Tower Software will meet Your requirements or that the operation of the Ansible Tower Software will be entirely error free, appear precisely as described in the accompanying documentation, or comply with regulatory requirements.
|
||||
|
||||
5. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, Your exclusive remedy under this EULA is to return any defective media within thirty (30) days of delivery along with a copy of Your payment receipt and Red Hat, at its option, will replace it or refund the money paid by You for the media. To the maximum extent permitted under applicable law, neither Red Hat nor any Red Hat authorized distributor will be liable to You for any incidental or consequential damages, including lost profits or lost savings arising out of the use or inability to use the Ansible Tower Software or any component, even if Red Hat or the authorized distributor has been advised of the possibility of such damages. In no event shall Red Hat's liability or an authorized distributor’s liability exceed the amount that You paid to Red Hat for the Ansible Tower Software during the twelve months preceding the first event giving rise to liability.
|
||||
|
||||
6. Export Control. In accordance with the laws of the United States and other countries, You represent and warrant that You: (a) understand that the Ansible Tower Software and its components may be subject to export controls under the U.S. Commerce Department’s Export Administration Regulations (“EAR”); (b) are not located in any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR; (c) will not export, re-export, or transfer the Ansible Tower Software to any prohibited destination or to any end user who has been prohibited from participating in US export transactions by any federal agency of the US government; (d) will not use or transfer the Ansible Tower Software for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets or unmanned air vehicle systems; (e) understand and agree that if you are in the United States and you export or transfer the Ansible Tower Software to eligible end users, you will, to the extent required by EAR Section 740.17 obtain a license for such export or transfer and will submit semi-annual reports to the Commerce Department’s Bureau of Industry and Security, which include the name and address (including country) of each transferee; and (f) understand that countries including the United States may restrict the import, use, or export of encryption products (which may include the Ansible Tower Software) and agree that you shall be solely responsible for compliance with any such import, use, or export restrictions.
|
||||
|
||||
7. General. If any provision of this EULA is held to be unenforceable, that shall not affect the enforceability of the remaining provisions. This agreement shall be governed by the laws of the State of New York and of the United States, without regard to any conflict of laws provisions. The rights and obligations of the parties to this EULA shall not be governed by the United Nations Convention on the International Sale of Goods.
|
||||
|
||||
Copyright © 2015 Red Hat, Inc. All rights reserved. "Red Hat" and “Ansible Tower” are registered trademarks of Red Hat, Inc. All other trademarks are the property of their respective owners.
|
||||
45
DCO_1_1.md
Normal file
45
DCO_1_1.md
Normal file
@@ -0,0 +1,45 @@
|
||||
DCO
|
||||
===
|
||||
|
||||
All contributors must use `git commit --signoff` for any
|
||||
commit to be merged, and agree that usage of --signoff constitutes
|
||||
agreement with the terms of DCO 1.1, which appears below:
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
1 Letterman Drive
|
||||
Suite D4700
|
||||
San Francisco, CA, 94129
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
456
INSTALL.md
456
INSTALL.md
@@ -1,2 +1,454 @@
|
||||
Installing AWX
|
||||
==============
|
||||
# Installing AWX
|
||||
|
||||
This document provides a guide for installing AWX.
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [Getting started](#getting-started)
|
||||
- [Clone the repo](#clone-the-repo)
|
||||
- [AWX branding](#awx-branding)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [AWX Tunables](#awx-tunables)
|
||||
- [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
- [Official vs Building Images](#official-vs-building-images)
|
||||
- [OpenShift](#openshift)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Deploying to Minishift](#deploying-to-minishift)
|
||||
- [Pre-build steps](#pre-build-steps)
|
||||
- [PostgreSQL](#postgresql)
|
||||
- [Start the build](#start-the-build)
|
||||
- [Post build](#post-build)
|
||||
- [Accessing AWX](#accessing-awx)
|
||||
- [Docker](#docker)
|
||||
- [Prerequisites](#prerequisites-2)
|
||||
- [Pre-build steps](#pre-build-steps-1)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
- [Inventory variables](#inventory-variables)
|
||||
- [Docker registry](#docker-registry)
|
||||
- [PostgreSQL](#postgresql-1)
|
||||
- [Proxy settings](#proxy-settings)
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Post build](#post-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
|
||||
## Getting started
|
||||
|
||||
### Clone the repo
|
||||
|
||||
If you have not already done so, you will need to clone, or create a local copy, of the [AWX repo](https://github.com/ansible/awx). For more on how to clone the repo, view [git clone help](https://git-scm.com/docs/git-clone).
|
||||
|
||||
Once you have a local copy, run commands within the root of the project tree.
|
||||
|
||||
### AWX branding
|
||||
|
||||
You can optionally install the AWX branding assets from the [awx-logos repo](https://github.com/ansible/awx-logos). Prior to installing, please review and agree to the [trademark guidelines](https://github.com/ansible/awx-logos/blob/master/TRADEMARKS.md).
|
||||
|
||||
To install the assets, clone the `awx-logos` repo so that it is next to your `awx` clone. As you progress through the installation steps, you'll be setting variables in the [inventory](./installer/inventory) file. To include the assets in the build, set `awx_official=true`.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before you can run a deployment, you'll need the following installed in your local environment:
|
||||
|
||||
- [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) Requires Version 2.3+
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/)
|
||||
|
||||
### AWX Tunables
|
||||
|
||||
**TODO** add tunable bits
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
|
||||
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
|
||||
|
||||
In the sections below, you'll find deployment details and instructions for each platform. To deploy to Docker, view the [Docker section](#docker), and for OpenShift, view the [OpenShift section](#openshift).
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
When installing AWX you have the option of building your own images or using the images provided on DockerHub (see [awx_web](https://hub.docker.com/r/ansible/awx_web/) and [awx_task](https://hub.docker.com/r/ansible/awx_task/))
|
||||
|
||||
This is controlled by the following variables in the `inventory` file
|
||||
|
||||
```
|
||||
dockerhub_base=ansible
|
||||
dockerhub_version=latest
|
||||
```
|
||||
|
||||
If these variables are present then all deployments will use these hosted images. If the variables are not present then the images will be built during the install.
|
||||
|
||||
*dockerhub_base*
|
||||
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls container images named `ansible/awx_web:tag` and `ansible/awx_task:tag`)
|
||||
|
||||
*dockerhub_version*
|
||||
|
||||
> Multiple versions are provided. `latest` always pulls the most recent. You may also select version numbers at different granularities: 1, 1.0, 1.0.1, 1.0.0.123
|
||||
|
||||
## OpenShift
|
||||
|
||||
### Prerequisites
|
||||
|
||||
To complete a deployment to OpenShift, you will obviously need access to an OpenShift cluster. For demo and testing purposes, you can use [Minishift](https://github.com/minishift/minishift) to create a single node cluster running inside a virtual machine.
|
||||
|
||||
You will also need to have the `oc` command in your PATH. The `install.yml` playbook will call out to `oc` when logging into, and creating objects on the cluster.
|
||||
|
||||
#### Deploying to Minishift
|
||||
|
||||
Install Minishift by following the [installation guide](https://docs.openshift.org/latest/minishift/getting-started/installing.html).
|
||||
|
||||
The Minishift VM contains a Docker daemon, which you can use to build the AWX images. This is generally the approach you should take, and we recommend doing so. To use this instance, run the following command to setup your environment:
|
||||
|
||||
```bash
|
||||
# Set DOCKER environment variable to point to the Minishift VM
|
||||
$ eval $(minishift docker-env)
|
||||
```
|
||||
|
||||
**Note**
|
||||
|
||||
> If you choose to not use the Docker instance running inside the VM, and build the images externally, you will have to enable the OpenShift cluster to access the images. This involves pushing the images to an external Docker registry, and granting the cluster access to it, or exposing the internal registry, and pushing the images into it.
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*openshift_host*
|
||||
|
||||
> IP address or hostname of the OpenShift cluster. If you're using Minishift, this will be the value returned by `minishift ip`.
|
||||
|
||||
*awx_openshift_project*
|
||||
|
||||
> Name of the OpenShift project that will be created, and used as the namespace for the AWX app. Defaults to *awx*.
|
||||
|
||||
*awx_node_port*
|
||||
|
||||
> The web server port running inside the AWX pod. Defaults to *30083*.
|
||||
|
||||
*openshift_user*
|
||||
|
||||
> Username of the OpenShift user that will create the project, and deploy the application. Defaults to *developer*.
|
||||
|
||||
*docker_registry*
|
||||
|
||||
> IP address and port, or URL, for accessing a registry that the OpenShift cluster can access. Defaults to *172.30.1.1:5000*, the internal registry delivered with Minishift. This is not needed if you are using official hosted images.
|
||||
n
|
||||
*docker_registry_repository*
|
||||
|
||||
> Namespace to use when pushing and pulling images to and from the registry. Generally this will match the project name. It defaults to *awx*. This is not needed if you are using official hosted images.
|
||||
|
||||
*docker_registry_username*
|
||||
|
||||
> Username of the user that will push images to the registry. Will generally match the *openshift_user* value. Defaults to *developer*. This is not needed if you are using official hosted images.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
AWX requires access to a PostgreSQL database, and by default, one will be created and deployed in a pod. The database is configured for persistence and will create a persistent volume claim named `postgresql`. By default it will claim 5GB from the available persistent volume pool. This can be tuned by setting a variable in the inventory file or on the command line during the `ansible-playbook` run.
|
||||
|
||||
ansible-playbook ... -e pg_volume_capacity=n
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_database`, and `pg_port` with the connection information. When setting `pg_hostname` the installer will assume you have configured the database in that location and will not launch the postgresql pod.
|
||||
|
||||
### Start the build
|
||||
|
||||
To start the build, you will pass two *extra* variables on the command line. The first is *openshift_password*, which is the password for the *openshift_user*, and the second is *docker_registry_password*, which is the password associated with *docker_registry_username*.
|
||||
|
||||
If you're using the OpenShift internal registry, then you'll pass an access token for the *docker_registry_password* value, rather than a password. The `oc whoami -t` command will generate the required token, as long as you're logged into the cluster via `oc cluster login`.
|
||||
|
||||
To start the build and deployment, run the following (docker_registry_password is optional if using official images):
|
||||
|
||||
```bash
|
||||
# Start the build and deployment
|
||||
$ ansible-playbook -i inventory install.yml -e openshift_password=developer -e docker_registry_password=$(oc whoami -t)
|
||||
```
|
||||
|
||||
### Post build
|
||||
|
||||
After the playbook run completes, check the status of the deployment by running `oc get pods`:
|
||||
|
||||
```bash
|
||||
# View the running pods
|
||||
$ oc get pods
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
awx-3886581826-5mv0l 4/4 Running 0 8s
|
||||
postgresql-1-l85fh 1/1 Running 0 20m
|
||||
|
||||
```
|
||||
|
||||
In the above example, the name of the AWX pod is `awx-3886581826-5mv0l`. Before accessing the AWX web interface, setup tasks and database migrations need to complete. These tasks are running in the `awx_task` container inside the AWX pod. To monitor their status, tail the container's STDOUT by running the following command, replacing the AWX pod name with the pod name from your environment:
|
||||
|
||||
```bash
|
||||
# Follow the awx_task log output
|
||||
$ oc logs -f awx-3886581826-5mv0l -c awx-celery
|
||||
```
|
||||
|
||||
You will see the following indicating that database migrations are running:
|
||||
|
||||
```bash
|
||||
Using /etc/ansible/ansible.cfg as config file
|
||||
127.0.0.1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"db": "awx"
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
Installing custom SQL...
|
||||
Running migrations:
|
||||
Rendering model states... DONE
|
||||
Applying contenttypes.0001_initial... OK
|
||||
Applying contenttypes.0002_remove_content_type_name... OK
|
||||
Applying auth.0001_initial... OK
|
||||
Applying auth.0002_alter_permission_name_max_length... OK
|
||||
Applying auth.0003_alter_user_email_max_length... OK
|
||||
Applying auth.0004_alter_user_username_opts... OK
|
||||
Applying auth.0005_alter_user_last_login_null... OK
|
||||
Applying auth.0006_require_contenttypes_0002... OK
|
||||
Applying taggit.0001_initial... OK
|
||||
Applying taggit.0002_auto_20150616_2121... OK
|
||||
...
|
||||
```
|
||||
|
||||
When you see output similar to the following, you'll know that database migrations have completed, and you can access the web interface:
|
||||
|
||||
```bash
|
||||
Python 2.7.5 (default, Nov 6 2016, 00:28:07)
|
||||
[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
(InteractiveConsole)
|
||||
|
||||
>>> <User: admin>
|
||||
>>> Default organization added.
|
||||
Demo Credential, Inventory, and Job Template added.
|
||||
Successfully registered instance awx-3886581826-5mv0l
|
||||
(changed: True)
|
||||
Creating instance group tower
|
||||
Added instance awx-3886581826-5mv0l to tower
|
||||
```
|
||||
|
||||
Once database migrations complete, the web interface will be accessible.
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web interface is running in the AWX pod, behind the `awx-web-svc` service. To view the service, and its port value, run the following command:
|
||||
|
||||
```bash
|
||||
# View available services
|
||||
$ oc get services
|
||||
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
awx-web-svc 172.30.111.74 <nodes> 8052:30083/TCP 37m
|
||||
postgresql 172.30.102.9 <none> 5432/TCP 38m
|
||||
```
|
||||
|
||||
The deployment process creates a route, `awx-web-svc`, to expose the service. How the ingres is actually created will vary depending on your environment, and how the cluster is configured. You can view the route, and the external IP address and hostname assigned to it, by running the following command:
|
||||
|
||||
```bash
|
||||
# View available routes
|
||||
$ oc get routes
|
||||
|
||||
NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
|
||||
awx-web-svc awx-web-svc-awx.192.168.64.2.nip.io awx-web-svc http edge/Allow None
|
||||
```
|
||||
|
||||
The above example is taken from a Minishift instance. From a web browser, use `https` to access the `HOST/PORT` value from your environment. Using the above example, the URL to access the server would be [https://awx-web-svc-awx.192.168.64.2.nip.io](https://awx-web-svc-awx.192.168.64.2.nip.io).
|
||||
|
||||
Once you access the AWX server, you will be prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
## Docker
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You will need the following installed on the host where AWX will be deployed:
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
|
||||
Note: After installing Docker, the Docker service must be started.
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
#### Deploying to a remote host
|
||||
|
||||
By default, the delivered [installer/inventory](./installer/inventory) file will deploy AWX to the local host. It is possible; however, to deploy to a remote host. The [installer/install.yml](./installer/install.yml) playbook can be used to build images on the local host, and ship the built images to, and run deployment tasks on, a remote host. To do this, modify the [installer/inventory](./installer/inventory) file, by commenting out `localhost`, and adding the remote host.
|
||||
|
||||
For example, suppose you wish to build images locally on your CI/CD host, and deploy them to a remote host named *awx-server*. To do this, add *awx-server* to the [installer/inventory](./installer/inventory) file, and comment out or remove `localhost`, as demonstrated by the following:
|
||||
|
||||
```yaml
|
||||
# localhost ansible_connection=local
|
||||
awx-server
|
||||
|
||||
[all:vars]
|
||||
...
|
||||
```
|
||||
|
||||
In the above example, image build tasks will be delegated to `localhost`, which is typically where the clone of the AWX project exists. Built images will be archived, copied to remote host, and imported into the remote Docker image cache. Tasks to start the AWX containers will then execute on the remote host.
|
||||
|
||||
If you choose to use the official images then the remote host will be the one to pull those images.
|
||||
|
||||
**Note**
|
||||
|
||||
> You may also want to set additional variables to control how Ansible connects to the host. For more information about this, view [Behavioral Inventory Parameters](http://docs.ansible.com/ansible/latest/intro_inventory.html#id12).
|
||||
|
||||
> As mentioned above, in [Prerequisites](#prerequisites-1), the prerequisites are required on the remote host.
|
||||
|
||||
> When deploying to a remote host, the playook does not execute tasks with the `become` option. For this reason, make sure the user that connects to the remote host has privileges to run the `docker` command. This typically means that non-privileged users need to be part of the `docker` group.
|
||||
|
||||
|
||||
#### Inventory variables
|
||||
|
||||
Before starting the build process, review the [inventory](./installer/inventory) file, and uncomment and provide values for the following variables found in the `[all:vars]` section:
|
||||
|
||||
*postgres_data_dir*
|
||||
|
||||
> If you're using the default PostgreSQL container (see [PostgreSQL](#postgresql-1) below), provide a path that can be mounted to the container, and where the database can be persisted.
|
||||
|
||||
*host_port*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
|
||||
|
||||
#### Docker registry
|
||||
|
||||
If you wish to tag and push built images to a Docker registry, set the following variables in the inventory file:
|
||||
|
||||
*docker_registry*
|
||||
|
||||
> IP address and port, or URL, for accessing a registry.
|
||||
|
||||
*docker_registry_repository*
|
||||
|
||||
> Namespace to use when pushing and pulling images to and from the registry. Defaults to *awx*.
|
||||
|
||||
*docker_registry_username*
|
||||
|
||||
> Username of the user that will push images to the registry. Defaults to *developer*.
|
||||
|
||||
**Note**
|
||||
|
||||
> These settings are ignored if using official images
|
||||
|
||||
|
||||
#### Proxy settings
|
||||
|
||||
*http_proxy*
|
||||
|
||||
> IP address and port, or URL, for using an http_proxy.
|
||||
|
||||
*https_proxy*
|
||||
|
||||
> IP address and port, or URL, for using an https_proxy.
|
||||
|
||||
*no_proxy*
|
||||
|
||||
> Exclude IP address or URL from the proxy.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
AWX requires access to a PostgreSQL database, and by default, one will be created and deployed in a container, and data will be persisted to a host volume. In this scenario, you must set the value of `postgres_data_dir` to a path that can be mounted to the container. When the container is stopped, the database files will still exist in the specified path.
|
||||
|
||||
If you wish to use an external database, in the inventory file, set the value of `pg_hostname`, and update `pg_username`, `pg_password`, `pg_database`, and `pg_port` with the connection information.
|
||||
|
||||
### Start the build
|
||||
|
||||
If you are not pushing images to a Docker registry, start the build by running the following:
|
||||
|
||||
```bash
|
||||
# Set the working directory to installer
|
||||
$ cd installer
|
||||
|
||||
# Run the Ansible playbook
|
||||
$ ansible-playbook -i inventory install.yml
|
||||
```
|
||||
|
||||
If you're pushing built images to a repository, then use the `-e` option to pass the registry password as follows, replacing *password* with the password of the username assigned to `docker_registry_username` (note that you will also need to remove `dockerhub_base` and `dockerhub_version` from the inventory file):
|
||||
|
||||
```bash
|
||||
# Set the working directory to installer
|
||||
$ cd installer
|
||||
|
||||
# Run the Ansible playbook
|
||||
$ ansible-playbook -i inventory -e docker_registry_password=password install.yml
|
||||
```
|
||||
|
||||
### Post build
|
||||
|
||||
After the playbook run completes, Docker will report up to 5 running containers. If you chose to use an existing PostgresSQL database, then it will report 4. You can view the running containers using the `docker ps` command, as follows:
|
||||
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 8052/tcp awx_task
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:80->8052/tcp awx_web
|
||||
55a552142bcd memcached:alpine "docker-entrypoint..." 2 minutes ago Up 2 minutes 11211/tcp memcached
|
||||
84011c072aad rabbitmq:3 "docker-entrypoint..." 2 minutes ago Up 2 minutes 4369/tcp, 5671-5672/tcp, 25672/tcp rabbitmq
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
```
|
||||
|
||||
Immediately after the containers start, the *awx_task* container will perform required setup tasks, including database migrations. These tasks need to complete before the web interface can be accessed. To monitor the progress, you can follow the container's STDOUT by running the following:
|
||||
|
||||
```bash
|
||||
# Tail the the awx_task log
|
||||
$ docker logs -f awx_task
|
||||
```
|
||||
|
||||
You will see output similar to the following:
|
||||
|
||||
```bash
|
||||
Using /etc/ansible/ansible.cfg as config file
|
||||
127.0.0.1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"db": "awx"
|
||||
}
|
||||
Operations to perform:
|
||||
Synchronize unmigrated apps: solo, api, staticfiles, messages, channels, django_extensions, ui, rest_framework, polymorphic
|
||||
Apply all migrations: sso, taggit, sessions, djcelery, sites, kombu_transport_django, social_auth, contenttypes, auth, conf, main
|
||||
Synchronizing apps without migrations:
|
||||
Creating tables...
|
||||
Running deferred SQL...
|
||||
Installing custom SQL...
|
||||
Running migrations:
|
||||
Rendering model states... DONE
|
||||
Applying contenttypes.0001_initial... OK
|
||||
Applying contenttypes.0002_remove_content_type_name... OK
|
||||
Applying auth.0001_initial... OK
|
||||
Applying auth.0002_alter_permission_name_max_length... OK
|
||||
Applying auth.0003_alter_user_email_max_length... OK
|
||||
Applying auth.0004_alter_user_username_opts... OK
|
||||
Applying auth.0005_alter_user_last_login_null... OK
|
||||
Applying auth.0006_require_contenttypes_0002... OK
|
||||
Applying taggit.0001_initial... OK
|
||||
Applying taggit.0002_auto_20150616_2121... OK
|
||||
Applying main.0001_initial... OK
|
||||
...
|
||||
```
|
||||
|
||||
Once migrations complete, you will see the following log output, indicating that migrations have completed:
|
||||
|
||||
```bash
|
||||
Python 2.7.5 (default, Nov 6 2016, 00:28:07)
|
||||
[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
(InteractiveConsole)
|
||||
|
||||
>>> <User: admin>
|
||||
>>> Default organization added.
|
||||
Demo Credential, Inventory, and Job Template added.
|
||||
Successfully registered instance awx
|
||||
(changed: True)
|
||||
Creating instance group tower
|
||||
Added instance awx to tower
|
||||
(changed: True)
|
||||
...
|
||||
```
|
||||
|
||||
### Accessing AWX
|
||||
|
||||
The AWX web server is accessible on the deployment host, using the *host_port* value set in the *inventory* file. The default URL is [http://localhost](http://localhost).
|
||||
|
||||
You will prompted with a login dialog. The default administrator username is `admin`, and the password is `password`.
|
||||
|
||||
87
ISSUES.md
Normal file
87
ISSUES.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Issues
|
||||
|
||||
## Reporting
|
||||
|
||||
Use the GitHub [issue tracker](https://github.com/ansible/awx/issues) for filing bugs. In order to save time, and help us respond to issues quickly, make sure to fill out as much of the issue template
|
||||
as possible. Version information, and an accurate reproducing scenario are critical to helping us identify the problem.
|
||||
|
||||
Please don't use the issue tracker as a way to ask how to do something. Instead, use the [mailing list](https://groups.google.com/forum/#!forum/awx-project) , and the `#ansible-awx` channel on irc.freenode.net to get help.
|
||||
|
||||
Before opening a new issue, please use the issue search feature to see if what you're experiencing has already been reported. If you have any extra detail to provide, please comment. Otherwise, rather than posting a "me too" comment, please consider giving it a ["thumbs up"](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comment) to give us an indication of the severity of the problem.
|
||||
|
||||
### UI Issues
|
||||
|
||||
When reporting issues for the UI, we also appreciate having screen shots and any error messages from the web browser's console. It's not unusual for browser extensions
|
||||
and plugins to cause problems. Reporting those will also help speed up analyzing and resolving UI bugs.
|
||||
|
||||
### API and backend issues
|
||||
|
||||
For the API and backend services, please capture all of the logs that you can from the time the problem occurred.
|
||||
|
||||
## How issues are resolved
|
||||
|
||||
We triage our issues into high, medium, and low, and tag them with the relevant component (e.g. api, ui, installer, etc.). We typically focus on higher priority issues first. There aren't hard and fast rules for determining the severity of an issue, but generally high priority issues have an increased likelihood of breaking existing functionality, and negatively impacting a large number of users.
|
||||
|
||||
If your issue isn't considered high priority, then please be patient as it may take some time to get to it.
|
||||
|
||||
|
||||
### Issue states
|
||||
|
||||
`state:needs_triage` This issue has not been looked at by a person yet and still needs to be triaged. This is the initial state for all new issues/pull requests.
|
||||
|
||||
`state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state.
|
||||
|
||||
`state:needs_review` The the issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familar with an area of the code base the issue is for.
|
||||
|
||||
`state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on.
|
||||
|
||||
`state:in_progress` The issue is actively being worked on and you should be in contact with who ever is assigned if you are also working on or plan to work on a similar issue.
|
||||
|
||||
`state:in_testing` The issue or pull request is currently being tested.
|
||||
|
||||
|
||||
### AWX Issue Bot (awxbot)
|
||||
We use an issue bot to help us label and organize incoming issues, this bot, awxbot, is a version of [ansible/ansibullbot](https://github.com/ansible/ansibullbot).
|
||||
|
||||
#### Overview
|
||||
|
||||
AWXbot performs many functions:
|
||||
|
||||
* Respond quickly to issues and pull requests.
|
||||
* Identify the maintainers responsible for reviewing pull requests.
|
||||
* Identify issues and pull request types and components (e.g. type:bug, component: api)
|
||||
|
||||
#### For issue submitters
|
||||
|
||||
The bot requires a minimal subset of information from the issue template:
|
||||
|
||||
* issue type
|
||||
* component
|
||||
* summary
|
||||
|
||||
If any of those items are missing your issue will still get the `needs_triage` label, but may end up being responded to slower than issues that have the complete set of information.
|
||||
So please use the template whenever possible.
|
||||
|
||||
Currently you can expect the bot to add common labels such as `state:needs_triage`, `type:bug`, `type:enhancement`, `component:ui`, etc...
|
||||
These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
|
||||
|
||||
The `state:needs_triage` label will remain on your issue until a person has looked at it.
|
||||
|
||||
#### For pull request submitters
|
||||
|
||||
The bot requires a minimal subset of information from the pull request template:
|
||||
|
||||
* issue type
|
||||
* component
|
||||
* summary
|
||||
|
||||
If any of those items are missing your pull request will still get the `needs_triage` label, but may end up being responded to slower than other pull requests that have a complete set of information.
|
||||
|
||||
Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc...
|
||||
These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
|
||||
|
||||
The `state:needs_triage` label will will remain on your pull request until a person has looked at it.
|
||||
|
||||
You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request.
|
||||
The comment will look something like `CC @matburt @wwitzel3 ...`.
|
||||
|
||||
168
LICENSE.md
Normal file
168
LICENSE.md
Normal file
@@ -0,0 +1,168 @@
|
||||
Apache License
|
||||
==============
|
||||
|
||||
_Version 2.0, January 2004_
|
||||
_<<http://www.apache.org/licenses/>>_
|
||||
|
||||
### Terms and Conditions for use, reproduction, and distribution
|
||||
|
||||
#### 1. Definitions
|
||||
|
||||
“License” shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
“Licensor” shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
“Legal Entity” shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, “control” means **(i)** the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or **(iii)** beneficial ownership of such entity.
|
||||
|
||||
“You” (or “Your”) shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
“Source” form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
“Object” form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
“Work” shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
“Derivative Works” shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
“Contribution” shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
“submitted” means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as “Not a Contribution.”
|
||||
|
||||
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
#### 2. Grant of Copyright License
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
#### 3. Grant of Patent License
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
#### 4. Redistribution
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
* **(b)** You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
#### 5. Submission of Contributions
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
#### 6. Trademarks
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
#### 7. Disclaimer of Warranty
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
#### 8. Limitation of Liability
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
#### 9. Accepting Warranty or Additional Liability
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
17
Makefile
17
Makefile
@@ -77,7 +77,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
receiver test test_unit test_ansible test_coverage coverage_html \
|
||||
dev_build release_build release_clean sdist \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
ui-test ui-deps ui-test-ci ui-test-saucelabs VERSION
|
||||
ui-test ui-deps ui-test-ci VERSION
|
||||
|
||||
# remove ui build artifacts
|
||||
clean-ui:
|
||||
@@ -362,7 +362,7 @@ pylint: reports
|
||||
|
||||
check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
TEST_DIRS ?= awx/main/tests awx/conf/tests awx/sso/tests
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
# Run all API unit tests.
|
||||
test: test_ansible
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -502,9 +502,6 @@ testjs_ci:
|
||||
jshint: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
|
||||
ui-test-saucelabs: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run test:saucelabs
|
||||
|
||||
# END UI TASKS
|
||||
# --------------------------------------
|
||||
|
||||
@@ -606,11 +603,5 @@ psql-container:
|
||||
docker run -it --net tools_default --rm postgres:9.4.1 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
echo $(VERSION_TARGET) > $@
|
||||
|
||||
production-openshift-image: sdist
|
||||
cat installer/openshift/Dockerfile | sed "s/{{ version }}/$(VERSION_TARGET)/g" | sed "s/{{ tar }}/$(SDIST_TAR_FILE)/g" > ./Dockerfile.production
|
||||
cp installer/openshift/Dockerfile.celery ./Dockerfile.celery.production
|
||||
docker build -t awx_web -f ./Dockerfile.production .
|
||||
docker build -t awx_task -f ./Dockerfile.celery.production .
|
||||
|
||||
@echo $(VERSION_TARGET) > $@
|
||||
@echo "awx: $(VERSION_TARGET)"
|
||||
|
||||
48
README.md
48
README.md
@@ -1,16 +1,46 @@
|
||||
[](https://requires.io/github/ansible/awx/requirements/?branch=devel)
|
||||
[](https://app.shippable.com/github/ansible/awx)
|
||||
|
||||
AWX
|
||||
=============
|
||||
===
|
||||
|
||||
AWX provides a web-based user interface, REST API and task engine built on top of
|
||||
Ansible.
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is the upstream project for [Tower](https://www.ansible.com/tower), a commercial derivative of AWX.
|
||||
|
||||
Resources
|
||||
---------
|
||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||
|
||||
Refer to `CONTRIBUTING.md` to get started developing, testing and building AWX.
|
||||
To learn more about using AWX, and Tower, view the [Tower docs site](http://docs.ansible.com/ansible-tower/index.html).
|
||||
|
||||
Refer to `INSTALL.md` to get started deploying AWX.
|
||||
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
- Refer to the [Contributing guide](./CONTRIBUTING.md) to get started developing, testing, and building AWX.
|
||||
- All code submissions are done through pull requests against the `devel` branch.
|
||||
- All contributors must use git commit --signoff for any commit to be merged, and agree that usage of --signoff constitutes agreement with the terms of [DCO 1.1](./DCO_1_1.md)
|
||||
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
|
||||
- If submitting a large code change, it's a good idea to join the `#ansible-awx` channel on irc.freenode.net, and talk about what you would like to do or add first. This not only helps everyone know what's going on, it also helps save time and effort, if the community decides some changes are needed.
|
||||
|
||||
Reporting Issues
|
||||
----------------
|
||||
|
||||
If you're experiencing a problem, we encourage you to open an issue, and share your feedback. But before opening a new issue, we ask that you please take a look at our [Issues guide](./ISSUES.md).
|
||||
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.freenode.net
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- [Open an Issue](https://github.com/ansible/awx/issues)
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
[Apache v2](./LICENSE.md)
|
||||
|
||||
Refer to `LOCALIZATION.md` for translation and localization help.
|
||||
|
||||
@@ -89,7 +89,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
# Special handling of inventory source_region choices that vary based on
|
||||
# selected inventory source.
|
||||
if field.field_name == 'source_regions':
|
||||
for cp in ('azure', 'ec2', 'gce'):
|
||||
for cp in ('azure_rm', 'ec2', 'gce'):
|
||||
get_regions = getattr(InventorySource, 'get_%s_region_choices' % cp)
|
||||
field_info['%s_region_choices' % cp] = get_regions()
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
|
||||
def check_get_permissions(self, request, view, obj=None):
|
||||
if hasattr(view, 'parent_model'):
|
||||
parent_obj = get_object_or_400(view.parent_model, pk=view.kwargs['pk'])
|
||||
parent_obj = view.get_parent_object()
|
||||
if not check_user_access(request.user, view.parent_model, 'read',
|
||||
parent_obj):
|
||||
return False
|
||||
@@ -44,12 +44,12 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
|
||||
def check_post_permissions(self, request, view, obj=None):
|
||||
if hasattr(view, 'parent_model'):
|
||||
parent_obj = get_object_or_400(view.parent_model, pk=view.kwargs['pk'])
|
||||
parent_obj = view.get_parent_object()
|
||||
if not check_user_access(request.user, view.parent_model, 'read',
|
||||
parent_obj):
|
||||
return False
|
||||
if hasattr(view, 'parent_key'):
|
||||
if not check_user_access(request.user, view.model, 'add', {view.parent_key: parent_obj.pk}):
|
||||
if not check_user_access(request.user, view.model, 'add', {view.parent_key: parent_obj}):
|
||||
return False
|
||||
return True
|
||||
elif getattr(view, 'is_job_start', False):
|
||||
|
||||
@@ -48,7 +48,8 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
|
||||
obj = getattr(view, 'object', None)
|
||||
if obj is None and hasattr(view, 'get_object') and hasattr(view, 'retrieve'):
|
||||
try:
|
||||
obj = view.get_object()
|
||||
view.object = view.get_object()
|
||||
obj = view.object
|
||||
except Exception:
|
||||
obj = None
|
||||
with override_method(view, request, method) as request:
|
||||
|
||||
@@ -38,7 +38,7 @@ from rest_framework.utils.serializer_helpers import ReturnList
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.constants import SCHEDULEABLE_PROVIDERS
|
||||
from awx.main.constants import SCHEDULEABLE_PROVIDERS, ANSI_SGR_PATTERN
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
@@ -89,8 +89,8 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
|
||||
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud'),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'),
|
||||
'job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
|
||||
@@ -343,6 +343,8 @@ class BaseSerializer(serializers.ModelSerializer):
|
||||
continue
|
||||
summary_fields[fk] = OrderedDict()
|
||||
for field in related_fields:
|
||||
if field == 'credential_type_id' and fk == 'credential' and self.version < 2: # TODO: remove version check in 3.3
|
||||
continue
|
||||
|
||||
fval = getattr(fkval, field, None)
|
||||
|
||||
@@ -779,10 +781,10 @@ class UserSerializer(BaseSerializer):
|
||||
'username', 'first_name', 'last_name',
|
||||
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'external_account')
|
||||
|
||||
def to_representation(self, obj):
|
||||
def to_representation(self, obj): # TODO: Remove in 3.3
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
ret.pop('password', None)
|
||||
if obj:
|
||||
if obj and type(self) is UserSerializer or self.version == 1:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
|
||||
@@ -1171,24 +1173,41 @@ class InventorySerializer(BaseSerializerWithVariables):
|
||||
ret['organization'] = None
|
||||
return ret
|
||||
|
||||
def validate_host_filter(self, host_filter):
|
||||
if host_filter:
|
||||
try:
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError, e:
|
||||
raise models.base.ValidationError(e)
|
||||
return host_filter
|
||||
|
||||
def validate(self, attrs):
|
||||
kind = attrs.get('kind', 'standard')
|
||||
if kind == 'smart':
|
||||
host_filter = attrs.get('host_filter')
|
||||
if host_filter is not None:
|
||||
try:
|
||||
SmartFilter().query_from_string(host_filter)
|
||||
except RuntimeError, e:
|
||||
raise models.base.ValidationError(e)
|
||||
kind = None
|
||||
if 'kind' in attrs:
|
||||
kind = attrs['kind']
|
||||
elif self.instance:
|
||||
kind = self.instance.kind
|
||||
|
||||
host_filter = None
|
||||
if 'host_filter' in attrs:
|
||||
host_filter = attrs['host_filter']
|
||||
elif self.instance:
|
||||
host_filter = self.instance.host_filter
|
||||
|
||||
if kind == 'smart' and not host_filter:
|
||||
raise serializers.ValidationError({'host_filter': _(
|
||||
'Smart inventories must specify host_filter')})
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
|
||||
# TODO: Remove entire serializer in 3.3, replace with normal serializer
|
||||
class InventoryDetailSerializer(InventorySerializer):
|
||||
|
||||
class Meta:
|
||||
fields = ('*', 'can_run_ad_hoc_commands')
|
||||
|
||||
can_run_ad_hoc_commands = serializers.SerializerMethodField()
|
||||
def get_fields(self):
|
||||
fields = super(InventoryDetailSerializer, self).get_fields()
|
||||
if self.version == 1:
|
||||
fields['can_run_ad_hoc_commands'] = serializers.SerializerMethodField()
|
||||
return fields
|
||||
|
||||
def get_can_run_ad_hoc_commands(self, obj):
|
||||
view = self.context.get('view', None)
|
||||
@@ -1551,11 +1570,11 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
errors['inventory'] = _("Must provide an inventory.")
|
||||
else:
|
||||
dest_inventory = self.instance.inventory
|
||||
if source_script.organization != dest_inventory.organization:
|
||||
if dest_inventory and source_script.organization != dest_inventory.organization:
|
||||
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
errors['source_script'] = _("'source_script' doesn't exist.")
|
||||
logger.error(str(exc))
|
||||
logger.exception('Problem processing source_script validation.')
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError(errors)
|
||||
@@ -1670,7 +1689,7 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
return value
|
||||
|
||||
@@ -2141,6 +2160,14 @@ class CredentialSerializer(BaseSerializer):
|
||||
return value
|
||||
return super(CredentialSerializer, self).to_internal_value(data)
|
||||
|
||||
def validate_credential_type(self, credential_type):
|
||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality'
|
||||
' of the resources using it.'),
|
||||
)
|
||||
return credential_type
|
||||
|
||||
|
||||
class CredentialSerializerCreate(CredentialSerializer):
|
||||
|
||||
@@ -2307,8 +2334,13 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
if obj.vault_credential:
|
||||
res['vault_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.vault_credential.pk})
|
||||
if self.version > 1:
|
||||
view = 'api:%s_extra_credentials_list' % camelcase_to_underscore(obj.__class__.__name__)
|
||||
res['extra_credentials'] = self.reverse(view, kwargs={'pk': obj.pk})
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': obj.pk}
|
||||
)
|
||||
elif isinstance(obj, UnifiedJob):
|
||||
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
|
||||
else:
|
||||
cloud_cred = obj.cloud_credential
|
||||
if cloud_cred:
|
||||
@@ -2490,6 +2522,22 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
def validate_extra_vars(self, value):
|
||||
return vars_validate_or_raise(value)
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
|
||||
if 'pk' in self.context['view'].kwargs and self.version > 1: # TODO: remove version check in 3.3
|
||||
extra_creds = []
|
||||
for cred in obj.extra_credentials.all():
|
||||
extra_creds.append({
|
||||
'id': cred.pk,
|
||||
'name': cred.name,
|
||||
'description': cred.description,
|
||||
'kind': cred.kind,
|
||||
'credential_type_id': cred.credential_type_id
|
||||
})
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
return summary_fields
|
||||
|
||||
|
||||
|
||||
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
|
||||
@@ -2525,7 +2573,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
if obj.job_template:
|
||||
res['job_template'] = self.reverse('api:job_template_detail',
|
||||
kwargs={'pk': obj.job_template.pk})
|
||||
if obj.can_start or True:
|
||||
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
|
||||
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
|
||||
if obj.can_cancel or True:
|
||||
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
|
||||
@@ -2577,6 +2625,21 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
ret['extra_vars'] = obj.display_extra_vars()
|
||||
return ret
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
|
||||
if 'pk' in self.context['view'].kwargs and self.version > 1: # TODO: remove version check in 3.3
|
||||
extra_creds = []
|
||||
for cred in obj.extra_credentials.all():
|
||||
extra_creds.append({
|
||||
'id': cred.pk,
|
||||
'name': cred.name,
|
||||
'description': cred.description,
|
||||
'kind': cred.kind,
|
||||
'credential_type_id': cred.credential_type_id
|
||||
})
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
return summary_fields
|
||||
|
||||
|
||||
class JobCancelSerializer(JobSerializer):
|
||||
|
||||
@@ -2630,7 +2693,7 @@ class JobRelaunchSerializer(JobSerializer):
|
||||
raise serializers.ValidationError(dict(credential=[_("Credential not found or deleted.")]))
|
||||
if obj.project is None:
|
||||
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
|
||||
if obj.inventory is None:
|
||||
if obj.inventory is None or obj.inventory.pending_deletion:
|
||||
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
|
||||
attrs = super(JobRelaunchSerializer, self).validate(attrs)
|
||||
return attrs
|
||||
@@ -3064,6 +3127,14 @@ class JobEventSerializer(BaseSerializer):
|
||||
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
|
||||
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
|
||||
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
|
||||
set_count = 0
|
||||
reset_count = 0
|
||||
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
|
||||
if m.string[m.start():m.end()] == u'\u001b[0m':
|
||||
reset_count += 1
|
||||
else:
|
||||
set_count += 1
|
||||
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -3095,6 +3166,14 @@ class AdHocCommandEventSerializer(BaseSerializer):
|
||||
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
|
||||
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
|
||||
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
|
||||
set_count = 0
|
||||
reset_count = 0
|
||||
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
|
||||
if m.string[m.start():m.end()] == u'\u001b[0m':
|
||||
reset_count += 1
|
||||
else:
|
||||
set_count += 1
|
||||
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -3564,6 +3643,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
|
||||
consumed_capacity = serializers.SerializerMethodField()
|
||||
percent_capacity_remaining = serializers.SerializerMethodField()
|
||||
jobs_running = serializers.SerializerMethodField()
|
||||
instances = serializers.SerializerMethodField()
|
||||
@@ -3581,17 +3661,37 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
|
||||
return res
|
||||
|
||||
def get_jobs_qs(self):
|
||||
# Store running jobs queryset in context, so it will be shared in ListView
|
||||
if 'running_jobs' not in self.context:
|
||||
self.context['running_jobs'] = UnifiedJob.objects.filter(
|
||||
status__in=('running', 'waiting'))
|
||||
return self.context['running_jobs']
|
||||
|
||||
def get_capacity_dict(self):
|
||||
# Store capacity values (globally computed) in the context
|
||||
if 'capacity_map' not in self.context:
|
||||
ig_qs = None
|
||||
if self.parent: # Is ListView:
|
||||
ig_qs = self.parent.instance
|
||||
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
|
||||
qs=ig_qs, tasks=self.get_jobs_qs(), breakdown=True)
|
||||
return self.context['capacity_map']
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return obj.consumed_capacity
|
||||
return self.get_capacity_dict()[obj.name]['consumed_capacity']
|
||||
|
||||
def get_percent_capacity_remaining(self, obj):
|
||||
if not obj.capacity or obj.consumed_capacity == obj.capacity:
|
||||
if not obj.capacity:
|
||||
return 0.0
|
||||
else:
|
||||
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
|
||||
return float("{0:.2f}".format(
|
||||
((float(obj.capacity) - float(self.get_consumed_capacity(obj))) / (float(obj.capacity))) * 100)
|
||||
)
|
||||
|
||||
def get_jobs_running(self, obj):
|
||||
return UnifiedJob.objects.filter(instance_group=obj, status__in=('running', 'waiting',)).count()
|
||||
jobs_qs = self.get_jobs_qs()
|
||||
return sum(1 for job in jobs_qs if job.instance_group_id == obj.id)
|
||||
|
||||
def get_instances(self, obj):
|
||||
return obj.instances.count()
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
ANSIBLE TOWER BY RED HAT END USER LICENSE AGREEMENT
|
||||
|
||||
This end user license agreement (“EULA”) governs the use of the Ansible Tower software and any related updates, upgrades, versions, appearance, structure and organization (the “Ansible Tower Software”), regardless of the delivery mechanism.
|
||||
|
||||
1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. and its affiliates (“Red Hat”) grant to you (“You”) a non-transferable, non-exclusive, worldwide, non-sublicensable, limited, revocable license to use the Ansible Tower Software for the term of the associated Red Hat Software Subscription(s) and in a quantity equal to the number of Red Hat Software Subscriptions purchased from Red Hat for the Ansible Tower Software (“License”), each as set forth on the applicable Red Hat ordering document. You acquire only the right to use the Ansible Tower Software and do not acquire any rights of ownership. Red Hat reserves all rights to the Ansible Tower Software not expressly granted to You. This License grant pertains solely to Your use of the Ansible Tower Software and is not intended to limit Your rights under, or grant You rights that supersede, the license terms of any software packages which may be made available with the Ansible Tower Software that are subject to an open source software license.
|
||||
|
||||
2. Intellectual Property Rights. Title to the Ansible Tower Software and each component, copy and modification, including all derivative works whether made by Red Hat, You or on Red Hat's behalf, including those made at Your suggestion and all associated intellectual property rights, are and shall remain the sole and exclusive property of Red Hat and/or it licensors. The License does not authorize You (nor may You allow any third party, specifically non-employees of Yours) to: (a) copy, distribute, reproduce, use or allow third party access to the Ansible Tower Software except as expressly authorized hereunder; (b) decompile, disassemble, reverse engineer, translate, modify, convert or apply any procedure or process to the Ansible Tower Software in order to ascertain, derive, and/or appropriate for any reason or purpose, including the Ansible Tower Software source code or source listings or any trade secret information or process contained in the Ansible Tower Software (except as permitted under applicable law); (c) execute or incorporate other software (except for approved software as appears in the Ansible Tower Software documentation or specifically approved by Red Hat in writing) into Ansible Tower Software, or create a derivative work of any part of the Ansible Tower Software; (d) remove any trademarks, trade names or titles, copyrights legends or any other proprietary marking on the Ansible Tower Software; (e) disclose the results of any benchmarking of the Ansible Tower Software (whether or not obtained with Red Hat’s assistance) to any third party; (f) attempt to circumvent any user limits or other license, timing or use restrictions that are built into, defined or agreed upon, regarding the Ansible Tower Software. You are hereby notified that the Ansible Tower Software may contain time-out devices, counter devices, and/or other devices intended to ensure the limits of the License will not be exceeded (“Limiting Devices”). If the Ansible Tower Software contains Limiting Devices, Red Hat will provide You materials necessary to use the Ansible Tower Software to the extent permitted. You may not tamper with or otherwise take any action to defeat or circumvent a Limiting Device or other control measure, including but not limited to, resetting the unit amount or using false host identification number for the purpose of extending any term of the License.
|
||||
|
||||
3. Evaluation Licenses. Unless You have purchased Ansible Tower Software Subscriptions from Red Hat or an authorized reseller under the terms of a commercial agreement with Red Hat, all use of the Ansible Tower Software shall be limited to testing purposes and not for production use (“Evaluation”). Unless otherwise agreed by Red Hat, Evaluation of the Ansible Tower Software shall be limited to an evaluation environment and the Ansible Tower Software shall not be used to manage any systems or virtual machines on networks being used in the operation of Your business or any other non-evaluation purpose. Unless otherwise agreed by Red Hat, You shall limit all Evaluation use to a single 30 day evaluation period and shall not download or otherwise obtain additional copies of the Ansible Tower Software or license keys for Evaluation.
|
||||
|
||||
4. Limited Warranty. Except as specifically stated in this Section 4, to the maximum extent permitted under applicable law, the Ansible Tower Software and the components are provided and licensed “as is” without warranty of any kind, expressed or implied, including the implied warranties of merchantability, non-infringement or fitness for a particular purpose. Red Hat warrants solely to You that the media on which the Ansible Tower Software may be furnished will be free from defects in materials and manufacture under normal use for a period of thirty (30) days from the date of delivery to You. Red Hat does not warrant that the functions contained in the Ansible Tower Software will meet Your requirements or that the operation of the Ansible Tower Software will be entirely error free, appear precisely as described in the accompanying documentation, or comply with regulatory requirements.
|
||||
|
||||
5. Limitation of Remedies and Liability. To the maximum extent permitted by applicable law, Your exclusive remedy under this EULA is to return any defective media within thirty (30) days of delivery along with a copy of Your payment receipt and Red Hat, at its option, will replace it or refund the money paid by You for the media. To the maximum extent permitted under applicable law, neither Red Hat nor any Red Hat authorized distributor will be liable to You for any incidental or consequential damages, including lost profits or lost savings arising out of the use or inability to use the Ansible Tower Software or any component, even if Red Hat or the authorized distributor has been advised of the possibility of such damages. In no event shall Red Hat's liability or an authorized distributor’s liability exceed the amount that You paid to Red Hat for the Ansible Tower Software during the twelve months preceding the first event giving rise to liability.
|
||||
|
||||
6. Export Control. In accordance with the laws of the United States and other countries, You represent and warrant that You: (a) understand that the Ansible Tower Software and its components may be subject to export controls under the U.S. Commerce Department’s Export Administration Regulations (“EAR”); (b) are not located in any country listed in Country Group E:1 in Supplement No. 1 to part 740 of the EAR; (c) will not export, re-export, or transfer the Ansible Tower Software to any prohibited destination or to any end user who has been prohibited from participating in US export transactions by any federal agency of the US government; (d) will not use or transfer the Ansible Tower Software for use in connection with the design, development or production of nuclear, chemical or biological weapons, or rocket systems, space launch vehicles, or sounding rockets or unmanned air vehicle systems; (e) understand and agree that if you are in the United States and you export or transfer the Ansible Tower Software to eligible end users, you will, to the extent required by EAR Section 740.17 obtain a license for such export or transfer and will submit semi-annual reports to the Commerce Department’s Bureau of Industry and Security, which include the name and address (including country) of each transferee; and (f) understand that countries including the United States may restrict the import, use, or export of encryption products (which may include the Ansible Tower Software) and agree that you shall be solely responsible for compliance with any such import, use, or export restrictions.
|
||||
|
||||
7. General. If any provision of this EULA is held to be unenforceable, that shall not affect the enforceability of the remaining provisions. This agreement shall be governed by the laws of the State of New York and of the United States, without regard to any conflict of laws provisions. The rights and obligations of the parties to this EULA shall not be governed by the United Nations Convention on the International Sale of Goods.
|
||||
|
||||
Copyright © 2015 Red Hat, Inc. All rights reserved. "Red Hat" and “Ansible Tower” are registered trademarks of Red Hat, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
@@ -215,7 +215,7 @@ job_template_urls = patterns('awx.api.views',
|
||||
job_urls = patterns('awx.api.views',
|
||||
url(r'^$', 'job_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', 'job_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/start/$', 'job_start'),
|
||||
url(r'^(?P<pk>[0-9]+)/start/$', 'job_start'), # TODO: remove in 3.3
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', 'job_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/relaunch/$', 'job_relaunch'),
|
||||
url(r'^(?P<pk>[0-9]+)/job_host_summaries/$', 'job_job_host_summaries_list'),
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
@@ -122,7 +121,7 @@ class WorkflowsEnforcementMixin(object):
|
||||
Mixin to check that license supports workflows.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS'):
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
|
||||
raise LicenseForbids(_('Your license does not allow use of workflows.'))
|
||||
return super(WorkflowsEnforcementMixin, self).check_permissions(request)
|
||||
|
||||
@@ -2384,24 +2383,24 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
host = get_object_or_404(obj.hosts, name=hostname, **hosts_q)
|
||||
data = host.variables_dict
|
||||
else:
|
||||
data = OrderedDict()
|
||||
data = dict()
|
||||
if obj.variables_dict:
|
||||
all_group = data.setdefault('all', OrderedDict())
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['vars'] = obj.variables_dict
|
||||
if obj.kind == 'smart':
|
||||
if len(obj.hosts.all()) == 0:
|
||||
return Response({})
|
||||
else:
|
||||
all_group = data.setdefault('all', OrderedDict())
|
||||
smart_hosts_qs = obj.hosts.all().order_by('name')
|
||||
all_group = data.setdefault('all', dict())
|
||||
smart_hosts_qs = obj.hosts.all()
|
||||
smart_hosts = list(smart_hosts_qs.values_list('name', flat=True))
|
||||
all_group['hosts'] = smart_hosts
|
||||
else:
|
||||
# Add hosts without a group to the all group.
|
||||
groupless_hosts_qs = obj.hosts.filter(groups__isnull=True, **hosts_q).order_by('name')
|
||||
groupless_hosts_qs = obj.hosts.filter(groups__isnull=True, **hosts_q)
|
||||
groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True))
|
||||
if groupless_hosts:
|
||||
all_group = data.setdefault('all', OrderedDict())
|
||||
all_group = data.setdefault('all', dict())
|
||||
all_group['hosts'] = groupless_hosts
|
||||
|
||||
# Build in-memory mapping of groups and their hosts.
|
||||
@@ -2409,7 +2408,6 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
if 'enabled' in hosts_q:
|
||||
group_hosts_kw['host__enabled'] = hosts_q['enabled']
|
||||
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
|
||||
group_hosts_qs = group_hosts_qs.order_by('host__name')
|
||||
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name')
|
||||
group_hosts_map = {}
|
||||
for group_id, host_id, host_name in group_hosts_qs:
|
||||
@@ -2421,7 +2419,6 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
from_group__inventory_id=obj.id,
|
||||
to_group__inventory_id=obj.id,
|
||||
)
|
||||
group_parents_qs = group_parents_qs.order_by('from_group__name')
|
||||
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name', 'to_group_id')
|
||||
group_children_map = {}
|
||||
for from_group_id, from_group_name, to_group_id in group_parents_qs:
|
||||
@@ -2430,15 +2427,15 @@ class InventoryScriptView(RetrieveAPIView):
|
||||
|
||||
# Now use in-memory maps to build up group info.
|
||||
for group in obj.groups.all():
|
||||
group_info = OrderedDict()
|
||||
group_info = dict()
|
||||
group_info['hosts'] = group_hosts_map.get(group.id, [])
|
||||
group_info['children'] = group_children_map.get(group.id, [])
|
||||
group_info['vars'] = group.variables_dict
|
||||
data[group.name] = group_info
|
||||
|
||||
if hostvars:
|
||||
data.setdefault('_meta', OrderedDict())
|
||||
data['_meta'].setdefault('hostvars', OrderedDict())
|
||||
data.setdefault('_meta', dict())
|
||||
data['_meta'].setdefault('hostvars', dict())
|
||||
for host in obj.hosts.filter(**hosts_q):
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
|
||||
@@ -2670,6 +2667,12 @@ class InventoryUpdateList(ListAPIView):
|
||||
model = InventoryUpdate
|
||||
serializer_class = InventoryUpdateListSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = super(InventoryUpdateList, self).get_queryset()
|
||||
# TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436
|
||||
qs = qs.defer('result_stdout_text')
|
||||
return qs
|
||||
|
||||
|
||||
class InventoryUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView):
|
||||
|
||||
@@ -2836,7 +2839,6 @@ class JobTemplateSchedulesList(SubListCreateAPIView):
|
||||
class JobTemplateSurveySpec(GenericAPIView):
|
||||
|
||||
model = JobTemplate
|
||||
parent_model = JobTemplate
|
||||
serializer_class = EmptySerializer
|
||||
new_in_210 = True
|
||||
|
||||
@@ -3085,6 +3087,8 @@ class JobTemplateCallback(GenericAPIView):
|
||||
matches.update(host_mappings[host_name])
|
||||
except socket.gaierror:
|
||||
pass
|
||||
except UnicodeError:
|
||||
pass
|
||||
return matches
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
@@ -3366,7 +3370,6 @@ class WorkflowJobTemplateDetail(WorkflowsEnforcementMixin, RetrieveUpdateDestroy
|
||||
class WorkflowJobTemplateCopy(WorkflowsEnforcementMixin, GenericAPIView):
|
||||
|
||||
model = WorkflowJobTemplate
|
||||
parent_model = WorkflowJobTemplate
|
||||
serializer_class = EmptySerializer
|
||||
new_in_310 = True
|
||||
|
||||
@@ -3739,6 +3742,13 @@ class JobList(ListCreateAPIView):
|
||||
methods.remove('POST')
|
||||
return methods
|
||||
|
||||
# NOTE: Remove in 3.3, switch ListCreateAPIView to ListAPIView
|
||||
def post(self, request, *args, **kwargs):
|
||||
if get_request_version(self.request) > 1:
|
||||
return Response({"error": _("POST not allowed for Job launching in version 2 of the api")},
|
||||
status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return super(JobList, self).post(request, *args, **kwargs)
|
||||
|
||||
|
||||
class JobDetail(UnifiedJobDeletionMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
@@ -3788,6 +3798,7 @@ class JobActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
|
||||
new_in_145 = True
|
||||
|
||||
|
||||
# TODO: remove endpoint in 3.3
|
||||
class JobStart(GenericAPIView):
|
||||
|
||||
model = Job
|
||||
@@ -3795,7 +3806,13 @@ class JobStart(GenericAPIView):
|
||||
is_job_start = True
|
||||
deprecated = True
|
||||
|
||||
def v2_not_allowed(self):
|
||||
return Response({'detail': 'Action only possible through v1 API.'},
|
||||
status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
if get_request_version(request) > 1:
|
||||
return self.v2_not_allowed()
|
||||
obj = self.get_object()
|
||||
data = dict(
|
||||
can_start=obj.can_start,
|
||||
@@ -3806,6 +3823,8 @@ class JobStart(GenericAPIView):
|
||||
return Response(data)
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if get_request_version(request) > 1:
|
||||
return self.v2_not_allowed()
|
||||
obj = self.get_object()
|
||||
if obj.can_start:
|
||||
result = obj.signal_start(**request.data)
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.core.signals import setting_changed
|
||||
from django.dispatch import receiver
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
@@ -9,6 +11,7 @@ from rest_framework.exceptions import APIException
|
||||
|
||||
# Tower
|
||||
from awx.main.utils.common import get_licenser
|
||||
from awx.main.utils import memoize, memoize_delete
|
||||
|
||||
__all__ = ['LicenseForbids', 'get_license', 'get_licensed_features',
|
||||
'feature_enabled', 'feature_exists']
|
||||
@@ -23,6 +26,13 @@ def _get_validated_license_data():
|
||||
return get_licenser().validate()
|
||||
|
||||
|
||||
@receiver(setting_changed)
|
||||
def _on_setting_changed(sender, **kwargs):
|
||||
# Clear cached result above when license changes.
|
||||
if kwargs.get('setting', None) == 'LICENSE':
|
||||
memoize_delete('feature_enabled')
|
||||
|
||||
|
||||
def get_license(show_key=False):
|
||||
"""Return a dictionary representing the active license on this Tower instance."""
|
||||
license_data = _get_validated_license_data()
|
||||
@@ -40,6 +50,7 @@ def get_licensed_features():
|
||||
return features
|
||||
|
||||
|
||||
@memoize(track_function=True)
|
||||
def feature_enabled(name):
|
||||
"""Return True if the requested feature is enabled, False otherwise."""
|
||||
validated_license_data = _get_validated_license_data()
|
||||
|
||||
@@ -54,6 +54,13 @@ class Command(BaseCommand):
|
||||
default=False,
|
||||
help=_('Skip commenting out settings in files.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--comment-only',
|
||||
action='store_true',
|
||||
dest='comment_only',
|
||||
default=False,
|
||||
help=_('Skip migrating and only comment out settings in files.'),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--backup-suffix',
|
||||
dest='backup_suffix',
|
||||
@@ -67,6 +74,7 @@ class Command(BaseCommand):
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.skip_errors = bool(options.get('skip_errors', False))
|
||||
self.no_comment = bool(options.get('no_comment', False))
|
||||
self.comment_only = bool(options.get('comment_only', False))
|
||||
self.backup_suffix = options.get('backup_suffix', '')
|
||||
self.categories = options.get('category', None) or ['all']
|
||||
self.style.HEADING = self.style.MIGRATE_HEADING
|
||||
@@ -103,7 +111,7 @@ class Command(BaseCommand):
|
||||
def _get_settings_file_patterns(self):
|
||||
if MODE == 'development':
|
||||
return [
|
||||
'/etc/tower/settings.py',
|
||||
'/etc/tower/settings.py',
|
||||
'/etc/tower/conf.d/*.py',
|
||||
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'settings', 'local_*.py')
|
||||
]
|
||||
@@ -360,14 +368,15 @@ class Command(BaseCommand):
|
||||
if filename:
|
||||
self._display_diff_summary(filename, lines_added, lines_removed)
|
||||
|
||||
def _migrate_settings(self, registered_settings):
|
||||
patterns = self._get_settings_file_patterns()
|
||||
|
||||
# Determine which settings need to be commented/migrated.
|
||||
def _discover_settings(self, registered_settings):
|
||||
if self.verbosity >= 1:
|
||||
self.stdout.write(self.style.HEADING('Discovering settings to be migrated and commented:'))
|
||||
|
||||
# Determine which settings need to be commented/migrated.
|
||||
to_migrate = collections.OrderedDict()
|
||||
to_comment = collections.OrderedDict()
|
||||
patterns = self._get_settings_file_patterns()
|
||||
|
||||
for name in registered_settings:
|
||||
comment_error, migrate_error = None, None
|
||||
files_to_comment = []
|
||||
@@ -398,8 +407,9 @@ class Command(BaseCommand):
|
||||
self._display_tbd(name, files_to_comment, migrate_value, comment_error, migrate_error)
|
||||
if self.verbosity == 1 and not to_migrate and not to_comment:
|
||||
self.stdout.write(' No settings found to migrate or comment!')
|
||||
return (to_migrate, to_comment)
|
||||
|
||||
# Now migrate those settings to the database.
|
||||
def _migrate(self, to_migrate):
|
||||
if self.verbosity >= 1:
|
||||
if self.dry_run:
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database (dry-run):'))
|
||||
@@ -407,6 +417,8 @@ class Command(BaseCommand):
|
||||
self.stdout.write(self.style.HEADING('Migrating settings to database:'))
|
||||
if not to_migrate:
|
||||
self.stdout.write(' No settings to migrate!')
|
||||
|
||||
# Now migrate those settings to the database.
|
||||
for name, db_value in to_migrate.items():
|
||||
display_value = json.dumps(db_value, indent=4)
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
@@ -422,7 +434,7 @@ class Command(BaseCommand):
|
||||
setting.save(update_fields=['value'])
|
||||
self._display_migrate(name, action, display_value)
|
||||
|
||||
# Now comment settings in settings files.
|
||||
def _comment(self, to_comment):
|
||||
if self.verbosity >= 1:
|
||||
if bool(self.dry_run or self.no_comment):
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files (dry-run):'))
|
||||
@@ -430,6 +442,8 @@ class Command(BaseCommand):
|
||||
self.stdout.write(self.style.HEADING('Commenting settings in files:'))
|
||||
if not to_comment:
|
||||
self.stdout.write(' No settings to comment!')
|
||||
|
||||
# Now comment settings in settings files.
|
||||
if to_comment:
|
||||
to_comment_patterns = []
|
||||
license_file_to_comment = None
|
||||
@@ -457,3 +471,10 @@ class Command(BaseCommand):
|
||||
if custom_logo_file_to_comment:
|
||||
diffs.extend(self._comment_custom_logo_file(dry_run=False))
|
||||
self._display_comment(diffs)
|
||||
|
||||
def _migrate_settings(self, registered_settings):
|
||||
to_migrate, to_comment = self._discover_settings(registered_settings)
|
||||
|
||||
if not bool(self.comment_only):
|
||||
self._migrate(to_migrate)
|
||||
self._comment(to_comment)
|
||||
|
||||
@@ -74,6 +74,10 @@ class Setting(CreatedModifiedModel):
|
||||
def get_cache_key(self, key):
|
||||
return key
|
||||
|
||||
@classmethod
|
||||
def get_cache_id_key(self, key):
|
||||
return '{}_ID'.format(key)
|
||||
|
||||
|
||||
import awx.conf.signals # noqa
|
||||
|
||||
|
||||
@@ -69,6 +69,12 @@ def _log_database_error():
|
||||
pass
|
||||
|
||||
|
||||
def filter_sensitive(registry, key, value):
|
||||
if registry.is_setting_encrypted(key):
|
||||
return '$encrypted$'
|
||||
return value
|
||||
|
||||
|
||||
class EncryptedCacheProxy(object):
|
||||
|
||||
def __init__(self, cache, registry, encrypter=None, decrypter=None):
|
||||
@@ -105,9 +111,13 @@ class EncryptedCacheProxy(object):
|
||||
six.text_type(value)
|
||||
except UnicodeDecodeError:
|
||||
value = value.decode('utf-8')
|
||||
logger.debug('cache get(%r, %r) -> %r', key, empty, filter_sensitive(self.registry, key, value))
|
||||
return value
|
||||
|
||||
def set(self, key, value, **kwargs):
|
||||
def set(self, key, value, log=True, **kwargs):
|
||||
if log is True:
|
||||
logger.debug('cache set(%r, %r, %r)', key, filter_sensitive(self.registry, key, value),
|
||||
SETTING_CACHE_TIMEOUT)
|
||||
self.cache.set(
|
||||
key,
|
||||
self._handle_encryption(self.encrypter, key, value),
|
||||
@@ -115,8 +125,13 @@ class EncryptedCacheProxy(object):
|
||||
)
|
||||
|
||||
def set_many(self, data, **kwargs):
|
||||
filtered_data = dict(
|
||||
(key, filter_sensitive(self.registry, key, value))
|
||||
for key, value in data.items()
|
||||
)
|
||||
logger.debug('cache set_many(%r, %r)', filtered_data, SETTING_CACHE_TIMEOUT)
|
||||
for key, value in data.items():
|
||||
self.set(key, value, **kwargs)
|
||||
self.set(key, value, log=False, **kwargs)
|
||||
|
||||
def _handle_encryption(self, method, key, value):
|
||||
TransientSetting = namedtuple('TransientSetting', ['pk', 'value'])
|
||||
@@ -124,9 +139,16 @@ class EncryptedCacheProxy(object):
|
||||
if value is not empty and self.registry.is_setting_encrypted(key):
|
||||
# If the setting exists in the database, we'll use its primary key
|
||||
# as part of the AES key when encrypting/decrypting
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s',
|
||||
Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(
|
||||
TransientSetting(
|
||||
pk=getattr(self._get_setting_from_db(key), 'pk', None),
|
||||
pk=obj_id,
|
||||
value=value
|
||||
),
|
||||
'value'
|
||||
@@ -241,11 +263,13 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
# to indicate from the cache that the setting is not configured without
|
||||
# a database lookup.
|
||||
settings_to_cache = get_settings_to_cache(self.registry)
|
||||
setting_ids = {}
|
||||
# Load all settings defined in the database.
|
||||
for setting in Setting.objects.filter(key__in=settings_to_cache.keys(), user__isnull=True).order_by('pk'):
|
||||
if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET:
|
||||
continue
|
||||
if self.registry.is_setting_encrypted(setting.key):
|
||||
setting_ids[setting.key] = setting.id
|
||||
try:
|
||||
value = decrypt_field(setting, 'value')
|
||||
except ValueError, e:
|
||||
@@ -264,12 +288,18 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
field = self.registry.get_setting_field(key)
|
||||
try:
|
||||
settings_to_cache[key] = get_cache_value(field.get_default())
|
||||
if self.registry.is_setting_encrypted(key):
|
||||
# No database pk, so None will be passed to encryption algorithm
|
||||
setting_ids[key] = SETTING_CACHE_NOTSET
|
||||
except SkipField:
|
||||
pass
|
||||
# Generate a cache key for each setting and store them all at once.
|
||||
settings_to_cache = dict([(Setting.get_cache_key(k), v) for k, v in settings_to_cache.items()])
|
||||
for k, id_val in setting_ids.items():
|
||||
logger.debug('Saving id in cache for encrypted setting %s, %s',
|
||||
Setting.get_cache_id_key(k), id_val)
|
||||
self.cache.cache.set(Setting.get_cache_id_key(k), id_val)
|
||||
settings_to_cache['_awx_conf_preload_expires'] = self._awx_conf_preload_expires
|
||||
logger.debug('cache set_many(%r, %r)', settings_to_cache, SETTING_CACHE_TIMEOUT)
|
||||
self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT)
|
||||
|
||||
def _get_local(self, name):
|
||||
@@ -279,7 +309,6 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
cache_value = self.cache.get(cache_key, default=empty)
|
||||
except ValueError:
|
||||
cache_value = empty
|
||||
logger.debug('cache get(%r, %r) -> %r', cache_key, empty, cache_value)
|
||||
if cache_value == SETTING_CACHE_NOTSET:
|
||||
value = empty
|
||||
elif cache_value == SETTING_CACHE_NONE:
|
||||
@@ -293,6 +322,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
field = self.registry.get_setting_field(name)
|
||||
if value is empty:
|
||||
setting = None
|
||||
setting_id = None
|
||||
if not field.read_only or name in (
|
||||
# these two values are read-only - however - we *do* want
|
||||
# to fetch their value from the database
|
||||
@@ -303,6 +333,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
setting_id = setting.id
|
||||
else:
|
||||
value = setting.value
|
||||
else:
|
||||
@@ -310,15 +341,17 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
if SETTING_CACHE_DEFAULTS:
|
||||
try:
|
||||
value = field.get_default()
|
||||
if getattr(field, 'encrypted', False):
|
||||
setting_id = SETTING_CACHE_NONE
|
||||
except SkipField:
|
||||
pass
|
||||
# If None implies not set, convert when reading the value.
|
||||
if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
|
||||
value = SETTING_CACHE_NOTSET
|
||||
if cache_value != value:
|
||||
logger.debug('cache set(%r, %r, %r)', cache_key,
|
||||
get_cache_value(value),
|
||||
SETTING_CACHE_TIMEOUT)
|
||||
if setting_id:
|
||||
logger.debug('Saving id in cache for encrypted setting %s', cache_key)
|
||||
self.cache.cache.set(Setting.get_cache_id_key(cache_key), setting_id)
|
||||
self.cache.set(cache_key, get_cache_value(value), timeout=SETTING_CACHE_TIMEOUT)
|
||||
if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS:
|
||||
try:
|
||||
|
||||
43
awx/conf/tests/functional/conftest.py
Normal file
43
awx/conf/tests/functional/conftest.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import pytest
|
||||
|
||||
from django.core.urlresolvers import resolve
|
||||
from django.utils.six.moves.urllib.parse import urlparse
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from rest_framework.test import (
|
||||
APIRequestFactory,
|
||||
force_authenticate,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def normal_user():
|
||||
try:
|
||||
user = User.objects.get(username='conf-normal')
|
||||
except User.DoesNotExist:
|
||||
user = User(username='conf-normal', is_superuser=False, password='conf-normal')
|
||||
user.save()
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def admin():
|
||||
try:
|
||||
user = User.objects.get(username='conf-admin')
|
||||
except User.DoesNotExist:
|
||||
user = User(username='conf-admin', is_superuser=True, password='conf-admin')
|
||||
user.save()
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def api_request(admin):
|
||||
def rf(verb, url, data=None, user=admin):
|
||||
view, view_args, view_kwargs = resolve(urlparse(url)[2])
|
||||
request = getattr(APIRequestFactory(), verb)(url, data=data, format='json')
|
||||
if user:
|
||||
force_authenticate(request, user=user)
|
||||
response = view(request, *view_args, **view_kwargs)
|
||||
response.render()
|
||||
return response
|
||||
return rf
|
||||
350
awx/conf/tests/functional/test_api.py
Normal file
350
awx/conf/tests/functional/test_api.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import pytest
|
||||
import mock
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils.encryption import decrypt_field
|
||||
from awx.conf import fields
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dummy_setting():
|
||||
class context_manager(object):
|
||||
def __init__(self, name, **kwargs):
|
||||
self.name = name
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __enter__(self):
|
||||
settings_registry.register(self.name, **(self.kwargs))
|
||||
|
||||
def __exit__(self, *args):
|
||||
settings_registry.unregister(self.name)
|
||||
|
||||
return context_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dummy_validate():
|
||||
class context_manager(object):
|
||||
def __init__(self, category_slug, func):
|
||||
self.category_slug = category_slug
|
||||
self.func = func
|
||||
|
||||
def __enter__(self):
|
||||
settings_registry.register_validate(self.category_slug, self.func)
|
||||
|
||||
def __exit__(self, *args):
|
||||
settings_registry.unregister_validate(self.category_slug)
|
||||
|
||||
return context_manager
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_non_admin_user_does_not_see_categories(api_request, dummy_setting, normal_user):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v2'})
|
||||
)
|
||||
assert response.data['results']
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v2'}),
|
||||
user=normal_user
|
||||
)
|
||||
assert not response.data['results']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch(
|
||||
'awx.conf.views.VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE',
|
||||
{
|
||||
1: set([]),
|
||||
2: set(['foobar']),
|
||||
}
|
||||
)
|
||||
def test_version_specific_category_slug_to_exclude_does_not_show_up(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v2'})
|
||||
)
|
||||
for item in response.data['results']:
|
||||
assert item['slug'] != 'foobar'
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v1'})
|
||||
)
|
||||
contains = False
|
||||
for item in response.data['results']:
|
||||
if item['slug'] != 'foobar':
|
||||
contains = True
|
||||
break
|
||||
assert contains
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_detail_retrieve(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR_1',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), dummy_setting(
|
||||
'FOO_BAR_2',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert 'FOO_BAR_1' in response.data and response.data['FOO_BAR_1'] is None
|
||||
assert 'FOO_BAR_2' in response.data and response.data['FOO_BAR_2'] is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_detail_invalid_retrieve(api_request, dummy_setting, normal_user):
|
||||
with dummy_setting(
|
||||
'FOO_BAR_1',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), dummy_setting(
|
||||
'FOO_BAR_2',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'barfoo'})
|
||||
)
|
||||
assert response.status_code == 404
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
user = normal_user
|
||||
)
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_signleton_retrieve_hierachy(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
default=0,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 0
|
||||
s = Setting(key='FOO_BAR', value=1)
|
||||
s.save()
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_signleton_retrieve_readonly(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
read_only=True,
|
||||
default=2,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 3}
|
||||
)
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 4}
|
||||
)
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 4
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
read_only=True,
|
||||
default=4,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 5}
|
||||
)
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 4
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encripted_mark(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.CharField,
|
||||
encrypted=True,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 'password'}
|
||||
)
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == '$encrypted$'
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': '$encrypted$'}
|
||||
)
|
||||
assert decrypt_field(Setting.objects.get(key='FOO_BAR'), 'value') == 'password'
|
||||
api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 'new_pw'}
|
||||
)
|
||||
assert decrypt_field(Setting.objects.get(key='FOO_BAR'), 'value') == 'new_pw'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_runs_custom_validate(api_request, dummy_setting, dummy_validate):
|
||||
|
||||
def func_raising_exception(serializer, attrs):
|
||||
raise serializers.ValidationError('Error')
|
||||
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
response = api_request(
|
||||
'patch',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
|
||||
data={'FOO_BAR': 23}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
api_request(
|
||||
'delete',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert not response.data['FOO_BAR']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
read_only=True,
|
||||
default=23,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
api_request(
|
||||
'delete',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 23
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_logging_test(api_request):
|
||||
with mock.patch('awx.conf.views.BaseHTTPSHandler.perform_test') as mock_func:
|
||||
api_request(
|
||||
'post',
|
||||
reverse('api:setting_logging_test'),
|
||||
data={'LOG_AGGREGATOR_HOST': 'http://foobar', 'LOG_AGGREGATOR_TYPE': 'logstash'}
|
||||
)
|
||||
test_arguments = mock_func.call_args[0][0]
|
||||
assert test_arguments.LOG_AGGREGATOR_HOST == 'http://foobar'
|
||||
assert test_arguments.LOG_AGGREGATOR_TYPE == 'logstash'
|
||||
assert test_arguments.LOG_AGGREGATOR_LEVEL == 'DEBUG'
|
||||
@@ -264,7 +264,7 @@ def test_setting_from_db_with_unicode(settings, mocker, encrypted):
|
||||
# this simulates a bug in python-memcached; see https://github.com/linsomniac/python-memcached/issues/79
|
||||
value = six.u('Iñtërnâtiônàlizætiøn').encode('utf-8')
|
||||
|
||||
setting_from_db = mocker.Mock(key='AWX_SOME_SETTING', value=value)
|
||||
setting_from_db = mocker.Mock(id=1, key='AWX_SOME_SETTING', value=value)
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([setting_from_db]),
|
||||
@@ -391,7 +391,20 @@ def test_charfield_properly_sets_none(settings, mocker):
|
||||
)
|
||||
|
||||
|
||||
def test_settings_use_an_encrypted_cache(settings):
|
||||
def test_settings_use_cache(settings, mocker):
|
||||
settings.registry.register(
|
||||
'AWX_VAR',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system'
|
||||
)
|
||||
settings.cache.set('AWX_VAR', 'foobar')
|
||||
settings.cache.set('_awx_conf_preload_expires', 100)
|
||||
# Will fail test if database is used
|
||||
getattr(settings, 'AWX_VAR')
|
||||
|
||||
|
||||
def test_settings_use_an_encrypted_cache(settings, mocker):
|
||||
settings.registry.register(
|
||||
'AWX_ENCRYPTED',
|
||||
field_class=fields.CharField,
|
||||
@@ -402,6 +415,11 @@ def test_settings_use_an_encrypted_cache(settings):
|
||||
assert isinstance(settings.cache, EncryptedCacheProxy)
|
||||
assert settings.cache.__dict__['encrypter'] == encrypt_field
|
||||
assert settings.cache.__dict__['decrypter'] == decrypt_field
|
||||
settings.cache.set('AWX_ENCRYPTED_ID', 402)
|
||||
settings.cache.set('AWX_ENCRYPTED', 'foobar')
|
||||
settings.cache.set('_awx_conf_preload_expires', 100)
|
||||
# Will fail test if database is used
|
||||
getattr(settings, 'AWX_ENCRYPTED')
|
||||
|
||||
|
||||
def test_sensitive_cache_data_is_encrypted(settings, mocker):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
4893
awx/locale/nl/LC_MESSAGES/django.po
Normal file
4893
awx/locale/nl/LC_MESSAGES/django.po
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,12 +17,17 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied, ValidationError
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
get_pk_from_dict,
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models.unified_jobs import ACTIVE_STATES
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
|
||||
from awx.conf.license import LicenseForbids
|
||||
from awx.conf.license import LicenseForbids, feature_enabled
|
||||
|
||||
__all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors',
|
||||
'user_accessible_objects', 'consumer_access',
|
||||
@@ -36,6 +41,36 @@ access_registry = {
|
||||
}
|
||||
|
||||
|
||||
def get_object_from_data(field, Model, data, obj=None):
|
||||
"""
|
||||
Utility method to obtain related object in data according to fallbacks:
|
||||
- if data contains key with pointer to Django object, return that
|
||||
- if contains integer, get object from database
|
||||
- if this does not work, raise exception
|
||||
"""
|
||||
try:
|
||||
raw_value = data[field]
|
||||
except KeyError:
|
||||
# Calling method needs to deal with non-existence of key
|
||||
raise ParseError(_("Required related field %s for permission check." % field))
|
||||
|
||||
if isinstance(raw_value, Model):
|
||||
return raw_value
|
||||
elif raw_value is None:
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
new_pk = int(raw_value)
|
||||
# Avoid database query by comparing pk to model for similarity
|
||||
if obj and new_pk == getattr(obj, '%s_id' % field, None):
|
||||
return getattr(obj, field)
|
||||
else:
|
||||
# Get the new resource from the database
|
||||
return get_object_or_400(Model, pk=new_pk)
|
||||
except (TypeError, ValueError):
|
||||
raise ParseError(_("Bad data found in related field %s." % field))
|
||||
|
||||
|
||||
class StateConflict(ValidationError):
|
||||
status_code = 409
|
||||
|
||||
@@ -105,7 +140,14 @@ def get_user_capabilities(user, instance, **kwargs):
|
||||
convenient for the user interface to consume and hide or show various
|
||||
actions in the interface.
|
||||
'''
|
||||
access_class = access_registry[instance.__class__]
|
||||
cls = instance.__class__
|
||||
# When `.defer()` is used w/ the Django ORM, the result is a subclass of
|
||||
# the original that represents e.g.,
|
||||
# awx.main.models.ad_hoc_commands.AdHocCommand_Deferred_result_stdout_text
|
||||
# We want to do the access registry lookup keyed on the base class name.
|
||||
if getattr(cls, '_deferred', False):
|
||||
cls = instance.__class__.__bases__[0]
|
||||
access_class = access_registry[cls]
|
||||
return access_class(user).get_user_capabilities(instance, **kwargs)
|
||||
|
||||
|
||||
@@ -205,24 +247,8 @@ class BaseAccess(object):
|
||||
# Use reference object's related fields, if given
|
||||
new = getattr(data['reference_obj'], field)
|
||||
elif data and field in data:
|
||||
# Obtain the resource specified in `data`
|
||||
raw_value = data[field]
|
||||
if isinstance(raw_value, Model):
|
||||
new = raw_value
|
||||
elif raw_value is None:
|
||||
new = None
|
||||
else:
|
||||
try:
|
||||
new_pk = int(raw_value)
|
||||
# Avoid database query by comparing pk to model for similarity
|
||||
if obj and new_pk == getattr(obj, '%s_id' % field, None):
|
||||
changed = False
|
||||
else:
|
||||
# Get the new resource from the database
|
||||
new = get_object_or_400(Model, pk=new_pk)
|
||||
except (TypeError, ValueError):
|
||||
raise ParseError(_("Bad data found in related field %s." % field))
|
||||
elif data is None or field not in data:
|
||||
new = get_object_from_data(field, Model, data, obj=obj)
|
||||
else:
|
||||
changed = False
|
||||
|
||||
# Obtain existing related resource
|
||||
@@ -304,6 +330,10 @@ class BaseAccess(object):
|
||||
if validation_errors:
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif isinstance(obj, (WorkflowJobTemplate, WorkflowJob)):
|
||||
if not feature_enabled('workflows'):
|
||||
user_capabilities[display_method] = (display_method == 'delete')
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, WorkflowJobTemplate) and obj.organization_id is None:
|
||||
user_capabilities[display_method] = self.user.is_superuser
|
||||
continue
|
||||
@@ -377,9 +407,11 @@ class InstanceAccess(BaseAccess):
|
||||
|
||||
def get_queryset(self):
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return Instance.objects.all().distinct()
|
||||
qs = Instance.objects.all().distinct()
|
||||
else:
|
||||
return Instance.objects.filter(rampart_groups__in=self.user.get_queryset(InstanceGroup)).distinct()
|
||||
qs = Instance.objects.filter(
|
||||
rampart_groups__in=self.user.get_queryset(InstanceGroup)).distinct()
|
||||
return qs.prefetch_related('rampart_groups')
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
@@ -397,9 +429,11 @@ class InstanceGroupAccess(BaseAccess):
|
||||
|
||||
def get_queryset(self):
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return InstanceGroup.objects.all()
|
||||
qs = InstanceGroup.objects.all()
|
||||
else:
|
||||
return InstanceGroup.objects.filter(organization__in=Organization.accessible_objects(self.user, 'admin_role'))
|
||||
qs = InstanceGroup.objects.filter(
|
||||
organization__in=Organization.accessible_pk_qs(self.user, 'admin_role'))
|
||||
return qs.prefetch_related('instances')
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
@@ -459,8 +493,10 @@ class UserAccess(BaseAccess):
|
||||
|
||||
def can_change(self, obj, data):
|
||||
if data is not None and ('is_superuser' in data or 'is_system_auditor' in data):
|
||||
if (to_python_boolean(data.get('is_superuser', 'false'), allow_none=True) or
|
||||
to_python_boolean(data.get('is_system_auditor', 'false'), allow_none=True)) and not self.user.is_superuser:
|
||||
if to_python_boolean(data.get('is_superuser', 'false'), allow_none=True) and \
|
||||
not self.user.is_superuser:
|
||||
return False
|
||||
if to_python_boolean(data.get('is_system_auditor', 'false'), allow_none=True) and not (self.user.is_superuser or self.user == obj):
|
||||
return False
|
||||
# A user can be changed if they are themselves, or by org admins or
|
||||
# superusers. Change permission implies changing only certain fields
|
||||
@@ -506,6 +542,8 @@ class OrganizationAccess(BaseAccess):
|
||||
I can change or delete organizations when:
|
||||
- I am a superuser.
|
||||
- I'm an admin of that organization.
|
||||
I can associate/disassociate instance groups when:
|
||||
- I am a superuser.
|
||||
'''
|
||||
|
||||
model = Organization
|
||||
@@ -537,7 +575,7 @@ class OrganizationAccess(BaseAccess):
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if relationship == "instance_groups":
|
||||
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.admin_role:
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
return False
|
||||
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -596,9 +634,18 @@ class InventoryAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj, data):
|
||||
# Host filter may only be modified by org admin level
|
||||
org_admin_mandatory = False
|
||||
new_host_filter = data.get('host_filter', None) if data else None
|
||||
if new_host_filter and new_host_filter != obj.host_filter:
|
||||
org_admin_mandatory = True
|
||||
# Verify that the user has access to the new organization if moving an
|
||||
# inventory to a new organization. Otherwise, just check for admin permission.
|
||||
return self.check_related('organization', Organization, data, obj=obj) and self.user in obj.admin_role
|
||||
return (
|
||||
self.check_related('organization', Organization, data, obj=obj,
|
||||
mandatory=org_admin_mandatory) and
|
||||
self.user in obj.admin_role
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_update(self, obj):
|
||||
@@ -834,6 +881,10 @@ class InventoryUpdateAccess(BaseAccess):
|
||||
def get_queryset(self):
|
||||
qs = InventoryUpdate.objects.distinct()
|
||||
qs = qs.select_related('created_by', 'modified_by', 'inventory_source__inventory')
|
||||
qs = qs.prefetch_related(
|
||||
'unified_job_template',
|
||||
'instance_group'
|
||||
)
|
||||
inventory_sources_qs = self.user.get_queryset(InventorySource)
|
||||
return qs.filter(inventory_source__in=inventory_sources_qs)
|
||||
|
||||
@@ -921,17 +972,14 @@ class CredentialAccess(BaseAccess):
|
||||
def can_add(self, data):
|
||||
if not data: # So the browseable API will work
|
||||
return True
|
||||
user_pk = get_pk_from_dict(data, 'user')
|
||||
if user_pk:
|
||||
user_obj = get_object_or_400(User, pk=user_pk)
|
||||
if data and data.get('user', None):
|
||||
user_obj = get_object_from_data('user', User, data)
|
||||
return check_user_access(self.user, User, 'change', user_obj, None)
|
||||
team_pk = get_pk_from_dict(data, 'team')
|
||||
if team_pk:
|
||||
team_obj = get_object_or_400(Team, pk=team_pk)
|
||||
if data and data.get('team', None):
|
||||
team_obj = get_object_from_data('team', Team, data)
|
||||
return check_user_access(self.user, Team, 'change', team_obj, None)
|
||||
organization_pk = get_pk_from_dict(data, 'organization')
|
||||
if organization_pk:
|
||||
organization_obj = get_object_or_400(Organization, pk=organization_pk)
|
||||
if data and data.get('organization', None):
|
||||
organization_obj = get_object_from_data('organization', Organization, data)
|
||||
return check_user_access(self.user, Organization, 'change', organization_obj, None)
|
||||
return False
|
||||
|
||||
@@ -1080,11 +1128,17 @@ class ProjectUpdateAccess(BaseAccess):
|
||||
|
||||
def get_queryset(self):
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return self.model.objects.all()
|
||||
qs = ProjectUpdate.objects.distinct()
|
||||
qs = self.model.objects.all()
|
||||
else:
|
||||
qs = self.model.objects.filter(
|
||||
project__in=Project.accessible_pk_qs(self.user, 'read_role')
|
||||
)
|
||||
qs = qs.select_related('created_by', 'modified_by', 'project')
|
||||
project_ids = set(self.user.get_queryset(Project).values_list('id', flat=True))
|
||||
return qs.filter(project_id__in=project_ids)
|
||||
qs = qs.prefetch_related(
|
||||
'unified_job_template',
|
||||
'instance_group'
|
||||
)
|
||||
return qs
|
||||
|
||||
@check_superuser
|
||||
def can_cancel(self, obj):
|
||||
@@ -1148,9 +1202,8 @@ class JobTemplateAccess(BaseAccess):
|
||||
if reference_obj:
|
||||
return getattr(reference_obj, field, None)
|
||||
else:
|
||||
pk = get_pk_from_dict(data, field)
|
||||
if pk:
|
||||
return get_object_or_400(Class, pk=pk)
|
||||
if data and data.get(field, None):
|
||||
return get_object_from_data(field, Class, data)
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -1236,23 +1289,6 @@ class JobTemplateAccess(BaseAccess):
|
||||
return False
|
||||
return True
|
||||
|
||||
def can_update_sensitive_fields(self, obj, data):
|
||||
project_id = data.get('project', obj.project.id if obj.project else None)
|
||||
inventory_id = data.get('inventory', obj.inventory.id if obj.inventory else None)
|
||||
credential_id = data.get('credential', obj.credential.id if obj.credential else None)
|
||||
vault_credential_id = data.get('credential', obj.vault_credential.id if obj.vault_credential else None)
|
||||
|
||||
if project_id and self.user not in Project.objects.get(pk=project_id).use_role:
|
||||
return False
|
||||
if inventory_id and self.user not in Inventory.objects.get(pk=inventory_id).use_role:
|
||||
return False
|
||||
if credential_id and self.user not in Credential.objects.get(pk=credential_id).use_role:
|
||||
return False
|
||||
if vault_credential_id and self.user not in Credential.objects.get(pk=vault_credential_id).use_role:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def can_delete(self, obj):
|
||||
is_delete_allowed = self.user.is_superuser or self.user in obj.admin_role
|
||||
if not is_delete_allowed:
|
||||
@@ -1304,7 +1340,11 @@ class JobAccess(BaseAccess):
|
||||
qs = self.model.objects
|
||||
qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory',
|
||||
'project', 'credential', 'job_template')
|
||||
qs = qs.prefetch_related('unified_job_template')
|
||||
qs = qs.prefetch_related(
|
||||
'unified_job_template',
|
||||
'instance_group',
|
||||
Prefetch('labels', queryset=Label.objects.all().order_by('name'))
|
||||
)
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return qs.all()
|
||||
|
||||
@@ -1358,9 +1398,8 @@ class JobAccess(BaseAccess):
|
||||
add_data = dict(data.items())
|
||||
|
||||
# If a job template is provided, the user should have read access to it.
|
||||
job_template_pk = get_pk_from_dict(data, 'job_template')
|
||||
if job_template_pk:
|
||||
job_template = get_object_or_400(JobTemplate, pk=job_template_pk)
|
||||
if data and data.get('job_template', None):
|
||||
job_template = get_object_from_data('job_template', JobTemplate, data)
|
||||
add_data.setdefault('inventory', job_template.inventory.pk)
|
||||
add_data.setdefault('project', job_template.project.pk)
|
||||
add_data.setdefault('job_type', job_template.job_type)
|
||||
@@ -1802,7 +1841,9 @@ class WorkflowJobAccess(BaseAccess):
|
||||
def can_cancel(self, obj):
|
||||
if not obj.can_cancel:
|
||||
return False
|
||||
return self.can_delete(obj) or self.user == obj.created_by
|
||||
if self.user == obj.created_by or self.can_delete(obj):
|
||||
return True
|
||||
return obj.workflow_job_template is not None and self.user in obj.workflow_job_template.admin_role
|
||||
|
||||
|
||||
class AdHocCommandAccess(BaseAccess):
|
||||
@@ -2042,6 +2083,8 @@ class UnifiedJobAccess(BaseAccess):
|
||||
# 'job_template__project',
|
||||
# 'job_template__credential',
|
||||
#)
|
||||
# TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436
|
||||
qs = qs.defer('result_stdout_text')
|
||||
return qs.all()
|
||||
|
||||
|
||||
@@ -2170,10 +2213,13 @@ class LabelAccess(BaseAccess):
|
||||
|
||||
def get_queryset(self):
|
||||
if self.user.is_superuser or self.user.is_system_auditor:
|
||||
return self.model.objects.all()
|
||||
return self.model.objects.all().filter(
|
||||
organization__in=Organization.accessible_objects(self.user, 'read_role')
|
||||
)
|
||||
qs = self.model.objects.all()
|
||||
else:
|
||||
qs = self.model.objects.all().filter(
|
||||
organization__in=Organization.accessible_pk_qs(self.user, 'read_role')
|
||||
)
|
||||
qs = qs.prefetch_related('modified_by', 'created_by', 'organization')
|
||||
return qs
|
||||
|
||||
@check_superuser
|
||||
def can_read(self, obj):
|
||||
|
||||
@@ -311,7 +311,7 @@ register(
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Inventory Update Timeout'),
|
||||
help_text=_('Maximum time to allow inventory updates to run. Use value of 0 to indicate that no '
|
||||
help_text=_('Maximum time in seconds to allow inventory updates to run. Use value of 0 to indicate that no '
|
||||
'timeout should be imposed. A timeout set on an individual inventory source will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
@@ -323,7 +323,7 @@ register(
|
||||
min_value=0,
|
||||
default=0,
|
||||
label=_('Default Project Update Timeout'),
|
||||
help_text=_('Maximum time to allow project updates to run. Use value of 0 to indicate that no '
|
||||
help_text=_('Maximum time in seconds to allow project updates to run. Use value of 0 to indicate that no '
|
||||
'timeout should be imposed. A timeout set on an individual project will override this.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
@@ -446,7 +446,7 @@ register(
|
||||
register(
|
||||
'LOG_AGGREGATOR_PROTOCOL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=['https', 'tcp', 'udp'],
|
||||
choices=[('https', 'HTTPS'), ('tcp', 'TCP'), ('udp', 'UDP')],
|
||||
default='https',
|
||||
label=_('Logging Aggregator Protocol'),
|
||||
help_text=_('Protocol used to communicate with log aggregator.'),
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import re
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
CLOUD_PROVIDERS = ('azure', 'azure_rm', 'ec2', 'gce', 'rax', 'vmware', 'openstack', 'satellite6', 'cloudforms')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'satellite6', 'cloudforms')
|
||||
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [ ('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')), ('dzdo', _('DZDO')), ('pmrun', _('Pmrun')), ('runas', _('Runas'))]
|
||||
ANSI_SGR_PATTERN = re.compile(r'\x1b\[[0-9;]*m')
|
||||
|
||||
24
awx/main/exceptions.py
Normal file
24
awx/main/exceptions.py
Normal file
@@ -0,0 +1,24 @@
|
||||
class AwxTaskError(Exception):
|
||||
"""Base exception for errors in unified job runs"""
|
||||
def __init__(self, task, message=None):
|
||||
if message is None:
|
||||
message = "Execution error running {}".format(task.log_format)
|
||||
super(AwxTaskError, self).__init__(message)
|
||||
self.task = task
|
||||
|
||||
|
||||
class TaskCancel(AwxTaskError):
|
||||
"""Canceled flag caused run_pexpect to kill the job run"""
|
||||
def __init__(self, task, rc):
|
||||
super(TaskCancel, self).__init__(
|
||||
task, message="{} was canceled (rc={})".format(task.log_format, rc))
|
||||
self.rc = rc
|
||||
|
||||
|
||||
class TaskError(AwxTaskError):
|
||||
"""Userspace error (non-zero exit code) in run_pexpect subprocess"""
|
||||
def __init__(self, task, rc):
|
||||
super(TaskError, self).__init__(
|
||||
task, message="%s encountered an error (rc=%s), please see task stdout for details.".format(task.log_format, rc))
|
||||
self.rc = rc
|
||||
|
||||
@@ -6,15 +6,8 @@ from django.core.management.base import NoArgsCommand
|
||||
|
||||
|
||||
class Command(NoArgsCommand):
|
||||
"""Return 0 if licensed; 1 if unlicensed
|
||||
"""
|
||||
"""Returns license type, e.g., 'enterprise', 'open', 'none'"""
|
||||
|
||||
def handle(self, **options):
|
||||
super(Command, self).__init__()
|
||||
|
||||
license_info = get_licenser().validate()
|
||||
if license_info['valid_key'] is True:
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
return get_licenser().validate().get('license_type', 'none')
|
||||
|
||||
@@ -65,7 +65,8 @@ class Command(NoArgsCommand):
|
||||
#jobs_qs = Job.objects.exclude(status__in=('pending', 'running'))
|
||||
#jobs_qs = jobs_qs.filter(created__lte=self.cutoff)
|
||||
skipped, deleted = 0, 0
|
||||
for job in Job.objects.all():
|
||||
jobs = Job.objects.filter(created__lt=self.cutoff)
|
||||
for job in jobs.iterator():
|
||||
job_display = '"%s" (%d host summaries, %d events)' % \
|
||||
(unicode(job),
|
||||
job.job_host_summaries.count(), job.job_events.count())
|
||||
@@ -73,21 +74,20 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, job.status, job_display)
|
||||
skipped += 1
|
||||
elif job.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, job_display)
|
||||
if not self.dry_run:
|
||||
job.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += Job.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_ad_hoc_commands(self):
|
||||
skipped, deleted = 0, 0
|
||||
for ad_hoc_command in AdHocCommand.objects.all():
|
||||
ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff)
|
||||
for ad_hoc_command in ad_hoc_commands.iterator():
|
||||
ad_hoc_command_display = '"%s" (%d events)' % \
|
||||
(unicode(ad_hoc_command),
|
||||
ad_hoc_command.ad_hoc_command_events.count())
|
||||
@@ -95,21 +95,20 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s ad hoc command %s', action_text, ad_hoc_command.status, ad_hoc_command_display)
|
||||
skipped += 1
|
||||
elif ad_hoc_command.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, ad_hoc_command_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, ad_hoc_command_display)
|
||||
if not self.dry_run:
|
||||
ad_hoc_command.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_project_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
for pu in ProjectUpdate.objects.all():
|
||||
project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff)
|
||||
for pu in project_updates.iterator():
|
||||
pu_display = '"%s" (type %s)' % (unicode(pu), unicode(pu.launch_type))
|
||||
if pu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
@@ -119,21 +118,20 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, pu_display)
|
||||
skipped += 1
|
||||
elif pu.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, pu_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, pu_display)
|
||||
if not self.dry_run:
|
||||
pu.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_inventory_updates(self):
|
||||
skipped, deleted = 0, 0
|
||||
for iu in InventoryUpdate.objects.all():
|
||||
inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff)
|
||||
for iu in inventory_updates.iterator():
|
||||
iu_display = '"%s" (source %s)' % (unicode(iu), unicode(iu.source))
|
||||
if iu.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
@@ -143,36 +141,33 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, iu_display)
|
||||
skipped += 1
|
||||
elif iu.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, iu_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, iu_display)
|
||||
if not self.dry_run:
|
||||
iu.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_management_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
for sj in SystemJob.objects.all():
|
||||
system_jobs = SystemJob.objects.filter(created__lt=self.cutoff)
|
||||
for sj in system_jobs.iterator():
|
||||
sj_display = '"%s" (type %s)' % (unicode(sj), unicode(sj.job_type))
|
||||
if sj.status in ('pending', 'waiting', 'running'):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s system_job %s', action_text, sj.status, sj_display)
|
||||
skipped += 1
|
||||
elif sj.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, sj_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, sj_display)
|
||||
if not self.dry_run:
|
||||
sj.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def init_logging(self):
|
||||
@@ -187,7 +182,8 @@ class Command(NoArgsCommand):
|
||||
|
||||
def cleanup_workflow_jobs(self):
|
||||
skipped, deleted = 0, 0
|
||||
for workflow_job in WorkflowJob.objects.all():
|
||||
workflow_jobs = WorkflowJob.objects.filter(created__lt=self.cutoff)
|
||||
for workflow_job in workflow_jobs.iterator():
|
||||
workflow_job_display = '"{}" ({} nodes)'.format(
|
||||
unicode(workflow_job),
|
||||
workflow_job.workflow_nodes.count())
|
||||
@@ -195,21 +191,20 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s job %s', action_text, workflow_job.status, workflow_job_display)
|
||||
skipped += 1
|
||||
elif workflow_job.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, workflow_job_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, workflow_job_display)
|
||||
if not self.dry_run:
|
||||
workflow_job.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += WorkflowJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
def cleanup_notifications(self):
|
||||
skipped, deleted = 0, 0
|
||||
for notification in Notification.objects.all():
|
||||
notifications = Notification.objects.filter(created__lt=self.cutoff)
|
||||
for notification in notifications.iterator():
|
||||
notification_display = '"{}" (started {}, {} type, {} sent)'.format(
|
||||
unicode(notification), unicode(notification.created),
|
||||
notification.notification_type, notification.notifications_sent)
|
||||
@@ -217,16 +212,14 @@ class Command(NoArgsCommand):
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s notification %s', action_text, notification.status, notification_display)
|
||||
skipped += 1
|
||||
elif notification.created >= self.cutoff:
|
||||
action_text = 'would skip' if self.dry_run else 'skipping'
|
||||
self.logger.debug('%s %s', action_text, notification_display)
|
||||
skipped += 1
|
||||
else:
|
||||
action_text = 'would delete' if self.dry_run else 'deleting'
|
||||
self.logger.info('%s %s', action_text, notification_display)
|
||||
if not self.dry_run:
|
||||
notification.delete()
|
||||
deleted += 1
|
||||
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
@@ -255,3 +248,4 @@ class Command(NoArgsCommand):
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
|
||||
@@ -17,6 +17,11 @@ class Command(BaseCommand):
|
||||
Deprovision a Tower cluster node
|
||||
"""
|
||||
|
||||
help = (
|
||||
'Remove instance from the database. '
|
||||
'Specify `--hostname` to use this command.'
|
||||
)
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--hostname', dest='hostname', type='string',
|
||||
help='Hostname used during provisioning'),
|
||||
|
||||
@@ -83,6 +83,7 @@ class AnsibleInventoryLoader(object):
|
||||
env = dict(os.environ.items())
|
||||
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
|
||||
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
|
||||
env['ANSIBLE_INVENTORY_UNPARSED_FAILED'] = '1'
|
||||
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for python_ver in ["python2.7", "python2.6"]:
|
||||
@@ -177,6 +178,40 @@ class AnsibleInventoryLoader(object):
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
data = self.command_to_json(base_args + ['--list'])
|
||||
|
||||
# TODO: remove after we run custom scripts through ansible-inventory
|
||||
if self.is_custom and '_meta' not in data or 'hostvars' not in data['_meta']:
|
||||
# Invoke the executable once for each host name we've built up
|
||||
# to set their variables
|
||||
data.setdefault('_meta', {})
|
||||
data['_meta'].setdefault('hostvars', {})
|
||||
logger.warning('Re-calling script for hostvars individually.')
|
||||
for group_name, group_data in data.iteritems():
|
||||
if group_name == '_meta':
|
||||
continue
|
||||
|
||||
if isinstance(group_data, dict):
|
||||
group_host_list = group_data.get('hosts', [])
|
||||
elif isinstance(group_data, list):
|
||||
group_host_list = group_data
|
||||
else:
|
||||
logger.warning('Group data for "%s" is not a dict or list',
|
||||
group_name)
|
||||
group_host_list = []
|
||||
|
||||
for hostname in group_host_list:
|
||||
logger.debug('Obtaining hostvars for %s' % hostname.encode('utf-8'))
|
||||
hostdata = self.command_to_json(
|
||||
base_args + ['--host', hostname.encode("utf-8")]
|
||||
)
|
||||
if isinstance(hostdata, dict):
|
||||
data['_meta']['hostvars'][hostname] = hostdata
|
||||
else:
|
||||
logger.warning(
|
||||
'Expected dict of vars for host "%s" when '
|
||||
'calling with `--host`, got %s instead',
|
||||
k, str(type(data))
|
||||
)
|
||||
|
||||
logger.info('Processing JSON output...')
|
||||
inventory = MemInventory(
|
||||
group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
|
||||
@@ -193,7 +228,6 @@ def load_inventory_source(source, group_filter_re=None,
|
||||
'''
|
||||
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
|
||||
# good naming conventions
|
||||
source = source.replace('azure.py', 'windows_azure.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
if not os.path.exists(source):
|
||||
@@ -479,6 +513,12 @@ class Command(NoArgsCommand):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
for group_pk in groups_qs.filter(name__in=group_names).values_list('pk', flat=True):
|
||||
del_group_pks.discard(group_pk)
|
||||
if self.inventory_source.deprecated_group_id in del_group_pks: # TODO: remove in 3.3
|
||||
logger.warning(
|
||||
'Group "%s" from v1 API is not deleted by overwrite',
|
||||
self.inventory_source.deprecated_group.name
|
||||
)
|
||||
del_group_pks.discard(self.inventory_source.deprecated_group_id)
|
||||
# Now delete all remaining groups in batches.
|
||||
all_del_pks = sorted(list(del_group_pks))
|
||||
for offset in xrange(0, len(all_del_pks), self._batch_size):
|
||||
@@ -507,6 +547,12 @@ class Command(NoArgsCommand):
|
||||
group_host_count = 0
|
||||
db_groups = self.inventory_source.groups
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.info(
|
||||
'Group "%s" from v1 API child group/host connections preserved',
|
||||
db_group.name
|
||||
)
|
||||
continue
|
||||
# Delete child group relationships not present in imported data.
|
||||
db_children = db_group.children
|
||||
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
|
||||
|
||||
@@ -16,6 +16,11 @@ class Command(BaseCommand):
|
||||
Regsiter this instance with the database for HA tracking.
|
||||
"""
|
||||
|
||||
help = (
|
||||
'Add instance to the database. '
|
||||
'Specify `--hostname` to use this command.'
|
||||
)
|
||||
|
||||
option_list = BaseCommand.option_list + (
|
||||
make_option('--hostname', dest='hostname', type='string',
|
||||
help='Hostname used during provisioning'),
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
import sys
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
from django.db import models
|
||||
from django.utils.timezone import now
|
||||
@@ -11,7 +12,9 @@ from django.conf import settings
|
||||
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
___all__ = ['HostManager', 'InstanceManager']
|
||||
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
|
||||
|
||||
logger = logging.getLogger('awx.main.managers')
|
||||
|
||||
|
||||
class HostManager(models.Manager):
|
||||
@@ -34,6 +37,8 @@ class HostManager(models.Manager):
|
||||
hasattr(self.instance, 'kind')):
|
||||
if self.instance.kind == 'smart' and self.instance.host_filter is not None:
|
||||
q = SmartFilter.query_from_string(self.instance.host_filter)
|
||||
if self.instance.organization_id:
|
||||
q = q.filter(inventory__organization=self.instance.organization_id)
|
||||
# If we are using host_filters, disable the core_filters, this allows
|
||||
# us to access all of the available Host entries, not just the ones associated
|
||||
# with a specific FK/relation.
|
||||
@@ -41,11 +46,26 @@ class HostManager(models.Manager):
|
||||
# If we don't disable this, a filter of {'inventory': self.instance} gets automatically
|
||||
# injected by the related object mapper.
|
||||
self.core_filters = {}
|
||||
|
||||
qs = qs & q
|
||||
return qs.distinct()
|
||||
unique_by_name = qs.order_by('name', 'pk').distinct('name')
|
||||
return qs.filter(pk__in=unique_by_name)
|
||||
return qs
|
||||
|
||||
|
||||
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
|
||||
# Create IG mapping by union of all groups their instances are members of
|
||||
ig_ig_mapping = {}
|
||||
for group_name in ig_instance_mapping.keys():
|
||||
ig_ig_set = set()
|
||||
for instance_hostname in ig_instance_mapping[group_name]:
|
||||
ig_ig_set |= instance_ig_mapping[instance_hostname]
|
||||
else:
|
||||
ig_ig_set.add(group_name) # Group contains no instances, return self
|
||||
ig_ig_mapping[group_name] = ig_ig_set
|
||||
return ig_ig_mapping
|
||||
|
||||
|
||||
class InstanceManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
@@ -77,3 +97,100 @@ class InstanceManager(models.Manager):
|
||||
def my_role(self):
|
||||
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
|
||||
return "tower"
|
||||
|
||||
|
||||
class InstanceGroupManager(models.Manager):
|
||||
"""A custom manager class for the Instance model.
|
||||
|
||||
Used for global capacity calculations
|
||||
"""
|
||||
|
||||
def capacity_mapping(self, qs=None):
|
||||
"""
|
||||
Another entry-point to Instance manager method by same name
|
||||
"""
|
||||
if qs is None:
|
||||
qs = self.all().prefetch_related('instances')
|
||||
instance_ig_mapping = {}
|
||||
ig_instance_mapping = {}
|
||||
# Create dictionaries that represent basic m2m memberships
|
||||
for group in qs:
|
||||
ig_instance_mapping[group.name] = set(
|
||||
instance.hostname for instance in group.instances.all() if
|
||||
instance.capacity != 0
|
||||
)
|
||||
for inst in group.instances.all():
|
||||
if inst.capacity == 0:
|
||||
continue
|
||||
instance_ig_mapping.setdefault(inst.hostname, set())
|
||||
instance_ig_mapping[inst.hostname].add(group.name)
|
||||
# Get IG capacity overlap mapping
|
||||
ig_ig_mapping = get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping)
|
||||
|
||||
return instance_ig_mapping, ig_ig_mapping
|
||||
|
||||
@staticmethod
|
||||
def zero_out_group(graph, name, breakdown):
|
||||
if name not in graph:
|
||||
graph[name] = {}
|
||||
graph[name]['consumed_capacity'] = 0
|
||||
if breakdown:
|
||||
graph[name]['committed_capacity'] = 0
|
||||
graph[name]['running_capacity'] = 0
|
||||
|
||||
def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):
|
||||
"""
|
||||
Returns a dictionary of capacity values for all IGs
|
||||
"""
|
||||
if qs is None: # Optionally BYOQS - bring your own queryset
|
||||
qs = self.all().prefetch_related('instances')
|
||||
instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)
|
||||
|
||||
if tasks is None:
|
||||
tasks = self.model.unifiedjob_set.related.related_model.objects.filter(
|
||||
status__in=('running', 'waiting'))
|
||||
|
||||
if graph is None:
|
||||
graph = {group.name: {} for group in qs}
|
||||
for group_name in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
for t in tasks:
|
||||
# TODO: dock capacity for isolated job management tasks running in queue
|
||||
impact = t.task_impact
|
||||
if t.status == 'waiting' or not t.execution_node:
|
||||
# Subtract capacity from any peer groups that share instances
|
||||
if not t.instance_group:
|
||||
logger.warning('Excluded %s from capacity algorithm '
|
||||
'(missing instance_group).', t.log_format)
|
||||
impacted_groups = []
|
||||
elif t.instance_group.name not in ig_ig_mapping:
|
||||
# Waiting job in group with 0 capacity has no collateral impact
|
||||
impacted_groups = [t.instance_group.name]
|
||||
else:
|
||||
impacted_groups = ig_ig_mapping[t.instance_group.name]
|
||||
for group_name in impacted_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name]['consumed_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['committed_capacity'] += impact
|
||||
elif t.status == 'running':
|
||||
# Subtract capacity from all groups that contain the instance
|
||||
if t.execution_node not in instance_ig_mapping:
|
||||
logger.warning('Detected %s running inside lost instance, '
|
||||
'may still be waiting for reaper.', t.log_format)
|
||||
if t.instance_group:
|
||||
impacted_groups = [t.instance_group.name]
|
||||
else:
|
||||
impacted_groups = []
|
||||
else:
|
||||
impacted_groups = instance_ig_mapping[t.execution_node]
|
||||
for group_name in impacted_groups:
|
||||
if group_name not in graph:
|
||||
self.zero_out_group(graph, group_name, breakdown)
|
||||
graph[group_name]['consumed_capacity'] += impact
|
||||
if breakdown:
|
||||
graph[group_name]['running_capacity'] += impact
|
||||
else:
|
||||
logger.error('Programming error, %s not in ["running", "waiting"]', t.log_format)
|
||||
return graph
|
||||
|
||||
@@ -9,11 +9,13 @@ import six
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models.signals import post_save
|
||||
from django.db import IntegrityError
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db import IntegrityError, connection
|
||||
from django.utils.functional import curry
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.shortcuts import get_object_or_404, redirect
|
||||
from django.apps import apps
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.urlresolvers import reverse
|
||||
|
||||
from awx.main.models import ActivityStream
|
||||
from awx.api.authentication import TokenAuthentication
|
||||
@@ -143,7 +145,8 @@ class URLModificationMiddleware(object):
|
||||
|
||||
def _convert_named_url(self, url_path):
|
||||
url_units = url_path.split('/')
|
||||
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2']:
|
||||
# If the identifier is an empty string, it is always invalid.
|
||||
if len(url_units) < 6 or url_units[1] != 'api' or url_units[2] not in ['v2'] or not url_units[4]:
|
||||
return url_path
|
||||
resource = url_units[3]
|
||||
if resource in settings.NAMED_URL_MAPPINGS:
|
||||
@@ -161,3 +164,12 @@ class URLModificationMiddleware(object):
|
||||
if request.path_info != new_path:
|
||||
request.path = request.path.replace(request.path_info, new_path)
|
||||
request.path_info = new_path
|
||||
|
||||
|
||||
class MigrationRanCheckMiddleware(object):
|
||||
|
||||
def process_request(self, request):
|
||||
executor = MigrationExecutor(connection)
|
||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
||||
if bool(plan) and 'migrations_notran' not in request.path:
|
||||
return redirect(reverse("ui:migrations_notran"))
|
||||
|
||||
@@ -157,7 +157,7 @@ class Migration(migrations.Migration):
|
||||
('status', models.CharField(default=b'pending', max_length=20, editable=False, choices=[(b'pending', 'Pending'), (b'successful', 'Successful'), (b'failed', 'Failed')])),
|
||||
('error', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('notifications_sent', models.IntegerField(default=0, editable=False)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('recipients', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('subject', models.TextField(default=b'', editable=False, blank=True)),
|
||||
('body', jsonfield.fields.JSONField(default=dict, blank=True)),
|
||||
@@ -174,7 +174,7 @@ class Migration(migrations.Migration):
|
||||
('modified', models.DateTimeField(default=None, editable=False)),
|
||||
('description', models.TextField(default=b'', blank=True)),
|
||||
('name', models.CharField(unique=True, max_length=512)),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'irc', 'IRC')])),
|
||||
('notification_type', models.CharField(max_length=32, choices=[(b'email', 'Email'), (b'slack', 'Slack'), (b'twilio', 'Twilio'), (b'pagerduty', 'Pagerduty'), (b'hipchat', 'HipChat'), (b'webhook', 'Webhook'), (b'mattermost', 'Mattermost'), (b'irc', 'IRC')])),
|
||||
('notification_configuration', jsonfield.fields.JSONField(default=dict)),
|
||||
('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
|
||||
|
||||
@@ -145,12 +145,12 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a Project'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
|
||||
@@ -11,6 +11,7 @@ from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _reencrypt as reencrypt
|
||||
from awx.main.migrations import _scan_jobs as scan_jobs
|
||||
from awx.main.migrations import _credentialtypes as credentialtypes
|
||||
from awx.main.migrations import _azure_credentials as azurecreds
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
@@ -24,6 +25,8 @@ class Migration(migrations.Migration):
|
||||
# Inventory Refresh
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(invsrc.remove_rax_inventory_sources),
|
||||
migrations.RunPython(azurecreds.remove_azure_credentials),
|
||||
migrations.RunPython(invsrc.remove_azure_inventory_sources),
|
||||
migrations.RunPython(invsrc.remove_inventory_source_with_no_inventory_link),
|
||||
migrations.RunPython(invsrc.rename_inventory_sources),
|
||||
migrations.RunPython(reencrypt.replace_aesecb_fernet),
|
||||
|
||||
15
awx/main/migrations/_azure_credentials.py
Normal file
15
awx/main/migrations/_azure_credentials.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Q
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def remove_azure_credentials(apps, schema_editor):
|
||||
'''Azure is not supported as of 3.2 and greater. Instead, azure_rm is
|
||||
supported.
|
||||
'''
|
||||
Credential = apps.get_model('main', 'Credential')
|
||||
logger.debug("Removing all Azure Credentials from database.")
|
||||
Credential.objects.filter(kind='azure').delete()
|
||||
|
||||
@@ -51,3 +51,12 @@ def remove_inventory_source_with_no_inventory_link(apps, schema_editor):
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
logger.debug("Removing all InventorySource that have no link to an Inventory from database.")
|
||||
InventorySource.objects.filter(Q(inventory__organization=None) & Q(deprecated_group__inventory=None)).delete()
|
||||
|
||||
|
||||
def remove_azure_inventory_sources(apps, schema_editor):
|
||||
'''Azure inventory sources are not supported since 3.2, remove them.
|
||||
'''
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
logger.debug("Removing all Azure InventorySource from database.")
|
||||
InventorySource.objects.filter(source='azure').delete()
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from awx.main.notifications.slack_backend import SlackBackend
|
||||
from awx.main.notifications.twilio_backend import TwilioBackend
|
||||
from awx.main.notifications.pagerduty_backend import PagerDutyBackend
|
||||
from awx.main.notifications.hipchat_backend import HipChatBackend
|
||||
from awx.main.notifications.mattermost_backend import MattermostBackend
|
||||
from awx.main.notifications.webhook_backend import WebhookBackend
|
||||
from awx.main.notifications.irc_backend import IrcBackend
|
||||
|
||||
@@ -25,6 +26,7 @@ NOTIFICATION_TYPES = [('email', _('Email'), CustomEmailBackend),
|
||||
('twilio', _('Twilio'), TwilioBackend),
|
||||
('pagerduty', _('Pagerduty'), PagerDutyBackend),
|
||||
('hipchat', _('HipChat'), HipChatBackend),
|
||||
('mattermost', _('Mattermost'), MattermostBackend),
|
||||
('webhook', _('Webhook'), WebhookBackend),
|
||||
('irc', _('IRC'), IrcBackend)]
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ def _create_fact_scan_project(ContentType, Project, org):
|
||||
ct = ContentType.objects.get_for_model(Project)
|
||||
name = "Tower Fact Scan - {}".format(org.name if org else "No Organization")
|
||||
proj = Project(name=name,
|
||||
scm_url='https://github.com/ansible/tower-fact-modules',
|
||||
scm_url='https://github.com/ansible/awx-facts-playbooks',
|
||||
scm_type='git',
|
||||
scm_update_on_launch=True,
|
||||
scm_update_cache_timeout=86400,
|
||||
|
||||
@@ -145,3 +145,17 @@ activity_stream_registrar.connect(WorkflowJob)
|
||||
|
||||
# prevent API filtering on certain Django-supplied sensitive fields
|
||||
prevent_search(User._meta.get_field('password'))
|
||||
|
||||
|
||||
# Always, always, always defer result_stdout_text for polymorphic UnifiedJob rows
|
||||
# TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436
|
||||
def defer_stdout(f):
|
||||
def _wrapped(*args, **kwargs):
|
||||
objs = f(*args, **kwargs)
|
||||
objs.query.deferred_loading[0].add('result_stdout_text')
|
||||
return objs
|
||||
return _wrapped
|
||||
|
||||
|
||||
for cls in UnifiedJob.__subclasses__():
|
||||
cls.base_objects.filter = defer_stdout(cls.base_objects.filter)
|
||||
|
||||
@@ -347,7 +347,6 @@ class AdHocCommandEvent(CreatedModifiedModel):
|
||||
return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
from awx.main.models.inventory import Host
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@@ -364,16 +363,16 @@ class AdHocCommandEvent(CreatedModifiedModel):
|
||||
self.host_name = self.event_data.get('host', '').strip()
|
||||
if 'host_name' not in update_fields:
|
||||
update_fields.append('host_name')
|
||||
try:
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = Host.objects.filter(inventory__ad_hoc_commands__id=self.ad_hoc_command_id, name=self.host_name)
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = self.ad_hoc_command.inventory.hosts.filter(name=self.host_name)
|
||||
try:
|
||||
host_id = host_qs.only('id').values_list('id', flat=True)
|
||||
if host_id.exists():
|
||||
self.host_id = host_id[0]
|
||||
if 'host_id' not in update_fields:
|
||||
update_fields.append('host_id')
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
except (IndexError, AttributeError):
|
||||
pass
|
||||
super(AdHocCommandEvent, self).save(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -52,7 +52,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'rax', 'vmware', 'gce', 'azure', 'azure_rm', 'openstack', 'custom', 'satellite6', 'cloudforms', 'scm',]
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'custom', 'satellite6', 'cloudforms', 'scm',]
|
||||
|
||||
VERBOSITY_CHOICES = [
|
||||
(0, '0 (Normal)'),
|
||||
|
||||
@@ -57,7 +57,6 @@ class V1Credential(object):
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('cloudforms', 'Red Hat CloudForms'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure', 'Microsoft Azure Classic (deprecated)'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('openstack', 'OpenStack'),
|
||||
('insights', 'Insights'),
|
||||
@@ -690,6 +689,7 @@ def vault(cls):
|
||||
'secret': True,
|
||||
'ask_at_runtime': True
|
||||
}],
|
||||
'required': ['vault_password'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -735,7 +735,8 @@ def net(cls):
|
||||
'dependencies': {
|
||||
'ssh_key_unlock': ['ssh_key_data'],
|
||||
'authorize_password': ['authorize'],
|
||||
}
|
||||
},
|
||||
'required': ['username'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -822,7 +823,7 @@ def vmware(cls):
|
||||
'id': 'host',
|
||||
'label': 'VCenter Host',
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the hostname or IP address which corresponds '
|
||||
'help_text': ('Enter the hostname or IP address that corresponds '
|
||||
'to your VMware vCenter.')
|
||||
}, {
|
||||
'id': 'username',
|
||||
@@ -850,7 +851,7 @@ def satellite6(cls):
|
||||
'id': 'host',
|
||||
'label': 'Satellite 6 URL',
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the URL which corresponds to your Red Hat '
|
||||
'help_text': ('Enter the URL that corresponds to your Red Hat '
|
||||
'Satellite 6 server. For example, https://satellite.example.org')
|
||||
}, {
|
||||
'id': 'username',
|
||||
@@ -861,7 +862,8 @@ def satellite6(cls):
|
||||
'label': 'Password',
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}]
|
||||
}],
|
||||
'required': ['host', 'username', 'password'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -877,7 +879,7 @@ def cloudforms(cls):
|
||||
'id': 'host',
|
||||
'label': 'CloudForms URL',
|
||||
'type': 'string',
|
||||
'help_text': ('Enter the URL for the virtual machine which '
|
||||
'help_text': ('Enter the URL for the virtual machine that '
|
||||
'corresponds to your CloudForm instance. '
|
||||
'For example, https://cloudforms.example.org')
|
||||
}, {
|
||||
@@ -889,7 +891,8 @@ def cloudforms(cls):
|
||||
'label': 'Password',
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}]
|
||||
}],
|
||||
'required': ['host', 'username', 'password'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -912,8 +915,9 @@ def gce(cls):
|
||||
'label': 'Project',
|
||||
'type': 'string',
|
||||
'help_text': ('The Project ID is the GCE assigned identification. '
|
||||
'It is constructed as two words followed by a three '
|
||||
'digit number. Example: adjective-noun-000')
|
||||
'It is often constructed as three words or two words '
|
||||
'followed by a three-digit number. Examples: project-id-000 '
|
||||
'and another-project-id')
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'RSA Private Key',
|
||||
@@ -923,35 +927,8 @@ def gce(cls):
|
||||
'multiline': True,
|
||||
'help_text': ('Paste the contents of the PEM file associated '
|
||||
'with the service account email.')
|
||||
}]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@CredentialType.default
|
||||
def azure(cls):
|
||||
return cls(
|
||||
kind='cloud',
|
||||
name='Microsoft Azure Classic (deprecated)',
|
||||
managed_by_tower=True,
|
||||
inputs={
|
||||
'fields': [{
|
||||
'id': 'username',
|
||||
'label': 'Subscription ID',
|
||||
'type': 'string',
|
||||
'help_text': ('Subscription ID is an Azure construct, which is '
|
||||
'mapped to a username.')
|
||||
}, {
|
||||
'id': 'ssh_key_data',
|
||||
'label': 'Management Certificate',
|
||||
'type': 'string',
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
'help_text': ('Paste the contents of the PEM file that corresponds '
|
||||
'to the certificate you uploaded in the Microsoft '
|
||||
'Azure console.')
|
||||
}]
|
||||
}],
|
||||
'required': ['username', 'ssh_key_data'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -991,7 +968,8 @@ def azure_rm(cls):
|
||||
'id': 'tenant',
|
||||
'label': 'Tenant ID',
|
||||
'type': 'string'
|
||||
}]
|
||||
}],
|
||||
'required': ['subscription'],
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1022,4 +1000,3 @@ def insights(cls):
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.utils.timezone import now, timedelta
|
||||
from solo.models import SingletonModel
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.managers import InstanceManager
|
||||
from awx.main.managers import InstanceManager, InstanceGroupManager
|
||||
from awx.main.models.inventory import InventoryUpdate
|
||||
from awx.main.models.jobs import Job
|
||||
from awx.main.models.projects import ProjectUpdate
|
||||
@@ -66,6 +66,8 @@ class Instance(models.Model):
|
||||
|
||||
class InstanceGroup(models.Model):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
objects = InstanceGroupManager()
|
||||
|
||||
name = models.CharField(max_length=250, unique=True)
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
@@ -89,12 +91,7 @@ class InstanceGroup(models.Model):
|
||||
|
||||
@property
|
||||
def capacity(self):
|
||||
return sum([x[0] for x in self.instances.values_list('capacity')])
|
||||
|
||||
@property
|
||||
def consumed_capacity(self):
|
||||
return sum(x.task_impact for x in UnifiedJob.objects.filter(instance_group=self,
|
||||
status__in=('running', 'waiting')))
|
||||
return sum([inst.capacity for inst in self.instances.all()])
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
|
||||
@@ -867,7 +867,6 @@ class InventorySourceOptions(BaseModel):
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
('azure', _('Microsoft Azure Classic (deprecated)')),
|
||||
('azure_rm', _('Microsoft Azure Resource Manager')),
|
||||
('vmware', _('VMware vCenter')),
|
||||
('satellite6', _('Red Hat Satellite 6')),
|
||||
@@ -1087,7 +1086,7 @@ class InventorySourceOptions(BaseModel):
|
||||
return regions
|
||||
|
||||
@classmethod
|
||||
def get_azure_region_choices(self):
|
||||
def get_azure_rm_region_choices(self):
|
||||
"""Return a complete list of regions in Microsoft Azure, as a list of
|
||||
two-tuples.
|
||||
"""
|
||||
@@ -1095,14 +1094,10 @@ class InventorySourceOptions(BaseModel):
|
||||
# authenticating first (someone reading these might think there's
|
||||
# a pattern here!). Therefore, you guessed it, use a list from
|
||||
# settings.
|
||||
regions = list(getattr(settings, 'AZURE_REGION_CHOICES', []))
|
||||
regions = list(getattr(settings, 'AZURE_RM_REGION_CHOICES', []))
|
||||
regions.insert(0, ('all', 'All'))
|
||||
return regions
|
||||
|
||||
@classmethod
|
||||
def get_azure_rm_region_choices(self):
|
||||
return InventorySourceOptions.get_azure_region_choices()
|
||||
|
||||
@classmethod
|
||||
def get_vmware_region_choices(self):
|
||||
"""Return a complete list of regions in VMware, as a list of two-tuples
|
||||
@@ -1143,6 +1138,11 @@ class InventorySourceOptions(BaseModel):
|
||||
# from the instance metadata instead of those explicitly provided.
|
||||
elif self.source in CLOUD_PROVIDERS and self.source != 'ec2':
|
||||
raise ValidationError(_('Credential is required for a cloud source.'))
|
||||
elif self.source == 'custom' and cred and cred.credential_type.kind in ('scm', 'ssh', 'insights', 'vault'):
|
||||
raise ValidationError(_(
|
||||
'Credentials of type machine, source control, insights and vault are '
|
||||
'disallowed for custom inventory sources.'
|
||||
))
|
||||
return cred
|
||||
|
||||
def clean_source_regions(self):
|
||||
@@ -1400,7 +1400,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
self.source == 'scm' and \
|
||||
InventorySource.objects.filter(
|
||||
Q(inventory=self.inventory,
|
||||
update_on_project_update=True, source='scm') &
|
||||
update_on_project_update=True, source='scm') &
|
||||
~Q(id=self.id)).exists():
|
||||
raise ValidationError(_("More than one SCM-based inventory source with update on project update per-inventory not allowed."))
|
||||
return self.update_on_project_update
|
||||
@@ -1409,7 +1409,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
if self.update_on_project_update is True and \
|
||||
self.source == 'scm' and \
|
||||
self.update_on_launch is True:
|
||||
raise ValidationError(_("Cannot update SCM-based inventory source on launch if set to update on project update. "
|
||||
raise ValidationError(_("Cannot update SCM-based inventory source on launch if set to update on project update. "
|
||||
"Instead, configure the corresponding source project to update on launch."))
|
||||
return self.update_on_launch
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from django.db.models import Q, Count
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from dateutil import parser
|
||||
from dateutil.tz import tzutc
|
||||
from django.utils.encoding import force_text
|
||||
from django.utils.encoding import force_text, smart_str
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
@@ -785,10 +785,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
if 'insights' in ansible_facts and 'system_id' in ansible_facts['insights']:
|
||||
host.insights_system_id = ansible_facts['insights']['system_id']
|
||||
host.save()
|
||||
system_tracking_logger.info('New fact for inventory {} host {}'.format(host.inventory.name, host.name),
|
||||
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(
|
||||
smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(inventory_id=host.inventory.id, host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat()))
|
||||
|
||||
|
||||
class JobHostSummary(CreatedModifiedModel):
|
||||
@@ -830,8 +832,9 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
failed = models.BooleanField(default=False, editable=False)
|
||||
|
||||
def __unicode__(self):
|
||||
hostname = self.host.name if self.host else 'N/A'
|
||||
return '%s changed=%d dark=%d failures=%d ok=%d processed=%d skipped=%s' % \
|
||||
(self.host.name, self.changed, self.dark, self.failures, self.ok,
|
||||
(hostname, self.changed, self.dark, self.failures, self.ok,
|
||||
self.processed, self.skipped)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
@@ -1163,7 +1166,6 @@ class JobEvent(CreatedModifiedModel):
|
||||
|
||||
def _update_hosts(self, extra_host_pks=None):
|
||||
# Update job event hosts m2m from host_name, propagate to parent events.
|
||||
from awx.main.models.inventory import Host
|
||||
extra_host_pks = set(extra_host_pks or [])
|
||||
hostnames = set()
|
||||
if self.host_name:
|
||||
@@ -1174,7 +1176,7 @@ class JobEvent(CreatedModifiedModel):
|
||||
hostnames.update(v.keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
qs = Host.objects.filter(inventory__jobs__id=self.job_id)
|
||||
qs = self.job.inventory.hosts.all()
|
||||
qs = qs.filter(Q(name__in=hostnames) | Q(pk__in=extra_host_pks))
|
||||
qs = qs.exclude(job_events__pk=self.id).only('id')
|
||||
for host in qs:
|
||||
@@ -1185,30 +1187,32 @@ class JobEvent(CreatedModifiedModel):
|
||||
parent = parent[0]
|
||||
parent._update_hosts(qs.values_list('id', flat=True))
|
||||
|
||||
def _update_host_summary_from_stats(self):
|
||||
from awx.main.models.inventory import Host
|
||||
def _hostnames(self):
|
||||
hostnames = set()
|
||||
try:
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
hostnames.update(self.event_data.get(stat, {}).keys())
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
except AttributeError: # In case event_data or v isn't a dict.
|
||||
pass
|
||||
return hostnames
|
||||
|
||||
def _update_host_summary_from_stats(self, hostnames):
|
||||
with ignore_inventory_computed_fields():
|
||||
qs = Host.objects.filter(inventory__jobs__id=self.job_id,
|
||||
name__in=hostnames)
|
||||
qs = self.job.inventory.hosts.filter(name__in=hostnames)
|
||||
job = self.job
|
||||
for host in hostnames:
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ok', 'processed', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
if qs.filter(name=host).exists():
|
||||
host_actual = qs.get(name=host)
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
|
||||
else:
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
|
||||
|
||||
if not created:
|
||||
update_fields = []
|
||||
for stat, value in host_stats.items():
|
||||
@@ -1217,11 +1221,8 @@ class JobEvent(CreatedModifiedModel):
|
||||
update_fields.append(stat)
|
||||
if update_fields:
|
||||
host_summary.save(update_fields=update_fields)
|
||||
job.inventory.update_computed_fields()
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job.id))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
from awx.main.models.inventory import Host
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@@ -1236,7 +1237,7 @@ class JobEvent(CreatedModifiedModel):
|
||||
update_fields.append(field)
|
||||
# Update host related field from host_name.
|
||||
if not self.host_id and self.host_name:
|
||||
host_qs = Host.objects.filter(inventory__jobs__id=self.job_id, name=self.host_name)
|
||||
host_qs = self.job.inventory.hosts.filter(name=self.host_name)
|
||||
host_id = host_qs.only('id').values_list('id', flat=True).first()
|
||||
if host_id != self.host_id:
|
||||
self.host_id = host_id
|
||||
@@ -1249,7 +1250,12 @@ class JobEvent(CreatedModifiedModel):
|
||||
self._update_hosts()
|
||||
if self.event == 'playbook_on_stats':
|
||||
self._update_parents_failed_and_changed()
|
||||
self._update_host_summary_from_stats()
|
||||
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
self.job.inventory.update_computed_fields()
|
||||
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=self.job.id))
|
||||
|
||||
@classmethod
|
||||
def create_from_data(self, **kwargs):
|
||||
|
||||
@@ -19,6 +19,7 @@ from awx.main.notifications.twilio_backend import TwilioBackend
|
||||
from awx.main.notifications.pagerduty_backend import PagerDutyBackend
|
||||
from awx.main.notifications.hipchat_backend import HipChatBackend
|
||||
from awx.main.notifications.webhook_backend import WebhookBackend
|
||||
from awx.main.notifications.mattermost_backend import MattermostBackend
|
||||
from awx.main.notifications.irc_backend import IrcBackend
|
||||
from awx.main.fields import JSONField
|
||||
|
||||
@@ -36,6 +37,7 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
('pagerduty', _('Pagerduty'), PagerDutyBackend),
|
||||
('hipchat', _('HipChat'), HipChatBackend),
|
||||
('webhook', _('Webhook'), WebhookBackend),
|
||||
('mattermost', _('Mattermost'), MattermostBackend),
|
||||
('irc', _('IRC'), IrcBackend)]
|
||||
NOTIFICATION_TYPE_CHOICES = [(x[0], x[1]) for x in NOTIFICATION_TYPES]
|
||||
CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2]) for x in NOTIFICATION_TYPES])
|
||||
|
||||
@@ -9,7 +9,7 @@ import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
from django.db import models, connection
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -187,7 +187,7 @@ class AuthToken(BaseModel):
|
||||
if not self.pk or not self.is_expired(now=now):
|
||||
self.expires = now + datetime.timedelta(seconds=settings.AUTH_TOKEN_EXPIRATION)
|
||||
if save:
|
||||
self.save()
|
||||
connection.on_commit(lambda: self.save(update_fields=['expires']))
|
||||
|
||||
def invalidate(self, reason='timeout_reached', save=True):
|
||||
if not AuthToken.reason_long(reason):
|
||||
@@ -236,7 +236,9 @@ class AuthToken(BaseModel):
|
||||
valid_n_tokens_qs = self.user.auth_tokens.filter(
|
||||
expires__gt=now,
|
||||
reason='',
|
||||
).order_by('-created')[0:settings.AUTH_TOKEN_PER_USER]
|
||||
).order_by('-created')
|
||||
if settings.AUTH_TOKEN_PER_USER != -1:
|
||||
valid_n_tokens_qs = valid_n_tokens_qs[0:settings.AUTH_TOKEN_PER_USER]
|
||||
valid_n_tokens = valid_n_tokens_qs.values_list('key', flat=True)
|
||||
|
||||
return bool(self.key in valid_n_tokens)
|
||||
|
||||
@@ -26,7 +26,8 @@ class HipChatBackend(AWXBaseEmailBackend):
|
||||
def __init__(self, token, color, api_url, notify, fail_silently=False, **kwargs):
|
||||
super(HipChatBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.token = token
|
||||
self.color = color
|
||||
if color is not None:
|
||||
self.color = color.lower()
|
||||
self.api_url = api_url
|
||||
self.notify = notify
|
||||
|
||||
|
||||
52
awx/main/notifications/mattermost_backend.py
Normal file
52
awx/main/notifications/mattermost_backend.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
import requests
|
||||
import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.mattermost_backend')
|
||||
|
||||
|
||||
class MattermostBackend(AWXBaseEmailBackend):
|
||||
|
||||
init_parameters = {"mattermost_url": {"label": "Target URL", "type": "string"},
|
||||
"mattermost_no_verify_ssl": {"label": "Verify SSL", "type": "bool"}}
|
||||
recipient_parameter = "mattermost_url"
|
||||
sender_parameter = None
|
||||
|
||||
def __init__(self, mattermost_no_verify_ssl=False, mattermost_channel=None, mattermost_username=None,
|
||||
mattermost_icon_url=None, fail_silently=False, **kwargs):
|
||||
super(MattermostBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.mattermost_channel = mattermost_channel
|
||||
self.mattermost_username = mattermost_username
|
||||
self.mattermost_icon_url = mattermost_icon_url
|
||||
self.mattermost_no_verify_ssl = mattermost_no_verify_ssl
|
||||
|
||||
def format_body(self, body):
|
||||
return body
|
||||
|
||||
def send_messages(self, messages):
|
||||
sent_messages = 0
|
||||
for m in messages:
|
||||
payload = {}
|
||||
for opt, optval in {'mattermost_icon_url':'icon_url',
|
||||
'mattermost_channel': 'channel', 'mattermost_username': 'username'}.iteritems():
|
||||
optvalue = getattr(self, opt)
|
||||
if optvalue is not None:
|
||||
payload[optval] = optvalue.strip()
|
||||
|
||||
payload['text'] = m.subject
|
||||
|
||||
r = requests.post("{}".format(m.recipients()[0]),
|
||||
data=json.dumps(payload), verify=(not self.mattermost_no_verify_ssl))
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_text(_("Error sending notification mattermost: {}").format(r.text)))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error sending notification mattermost: {}").format(r.text)))
|
||||
sent_messages += 1
|
||||
return sent_messages
|
||||
@@ -1,536 +1,4 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
#
|
||||
|
||||
# Python
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import uuid
|
||||
from sets import Set
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import transaction, connection, DatabaseError
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.timezone import now as tz_now, utc
|
||||
from django.db.models import Q
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
#from awx.main.scheduler.dag_simple import SimpleDAG
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main import tasks as awx_tasks
|
||||
|
||||
# Celery
|
||||
from celery.task.control import inspect
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
class TaskManager():
|
||||
|
||||
def __init__(self):
|
||||
self.graph = dict()
|
||||
for rampart_group in InstanceGroup.objects.all():
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
|
||||
capacity_total=rampart_group.capacity,
|
||||
capacity_used=0)
|
||||
|
||||
def is_job_blocked(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
for g in self.graph:
|
||||
if self.graph[g]['graph'].is_job_blocked(task):
|
||||
return True
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
inventory_updates_qs = InventoryUpdate.objects.filter(status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
||||
inventory_updates = [i for i in inventory_updates_qs]
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
||||
all_tasks = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs,
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
'''
|
||||
Tasks that are running and SHOULD have a celery task.
|
||||
{
|
||||
'execution_node': [j1, j2,...],
|
||||
'execution_node': [j3],
|
||||
...
|
||||
}
|
||||
'''
|
||||
def get_running_tasks(self):
|
||||
execution_nodes = {}
|
||||
now = tz_now()
|
||||
jobs = UnifiedJob.objects.filter(Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
|
||||
[execution_nodes.setdefault(j.execution_node, [j]).append(j) for j in jobs]
|
||||
return execution_nodes
|
||||
|
||||
'''
|
||||
Tasks that are currently running in celery
|
||||
|
||||
Transform:
|
||||
{
|
||||
"celery@ec2-54-204-222-62.compute-1.amazonaws.com": [],
|
||||
"celery@ec2-54-163-144-168.compute-1.amazonaws.com": [{
|
||||
...
|
||||
"id": "5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
...
|
||||
}, {
|
||||
...,
|
||||
}, ...]
|
||||
}
|
||||
|
||||
to:
|
||||
{
|
||||
"ec2-54-204-222-62.compute-1.amazonaws.com": [
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8306",
|
||||
...
|
||||
]
|
||||
}
|
||||
'''
|
||||
def get_active_tasks(self):
|
||||
inspector = inspect()
|
||||
if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):
|
||||
active_task_queues = inspector.active()
|
||||
else:
|
||||
logger.warn("Ignoring celery task inspector")
|
||||
active_task_queues = None
|
||||
|
||||
queues = None
|
||||
|
||||
if active_task_queues is not None:
|
||||
queues = {}
|
||||
for queue in active_task_queues:
|
||||
active_tasks = set()
|
||||
map(lambda at: active_tasks.add(at['id']), active_task_queues[queue])
|
||||
|
||||
# celery worker name is of the form celery@myhost.com
|
||||
queue_name = queue.split('@')
|
||||
queue_name = queue_name[1 if len(queue_name) > 1 else 0]
|
||||
queues[queue_name] = active_tasks
|
||||
else:
|
||||
if not hasattr(settings, 'CELERY_UNIT_TEST'):
|
||||
return (None, None)
|
||||
|
||||
return (active_task_queues, queues)
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
return graph_workflow_jobs
|
||||
|
||||
def get_inventory_source_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
kv = spawn_node.get_job_kwargs()
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
if job._resources_sufficient_for_launch():
|
||||
can_start = job.signal_start(**kv)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
else:
|
||||
can_start = False
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
connection.on_commit(lambda: job.websocket_emit_status('failed'))
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
# See comment in tasks.py::RunWorkflowJob::run()
|
||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
||||
result = []
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
if workflow_job.cancel_flag:
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.save()
|
||||
dag.cancel_node_jobs()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
elif dag.is_workflow_done():
|
||||
result.append(workflow_job.id)
|
||||
if workflow_job._has_failed():
|
||||
workflow_job.status = 'failed'
|
||||
else:
|
||||
workflow_job.status = 'successful'
|
||||
workflow_job.save()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=[]):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
|
||||
success_handler = handle_work_success.s(task_actual=task_actual)
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
if not start_status:
|
||||
task.status = 'failed'
|
||||
if task.job_explanation:
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
if not task.supports_isolation() and rampart_group.controller_id:
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
logger.info('Submitting isolated %s to queue %s via %s.',
|
||||
task.log_format, task.instance_group_id, rampart_group.controller_id)
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
logger.info('Submitting %s to instance group %s.', task.log_format, task.instance_group_id)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
|
||||
self.consume_capacity(task, rampart_group.name)
|
||||
|
||||
def post_commit():
|
||||
task.websocket_emit_status(task.status)
|
||||
if task.status != 'failed':
|
||||
task.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler, queue=rampart_group.name)
|
||||
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
def process_running_tasks(self, running_tasks):
|
||||
map(lambda task: self.graph[task.instance_group.name]['graph'].add_job(task), running_tasks)
|
||||
|
||||
def create_project_update(self, task):
|
||||
project_task = Project.objects.get(id=task.project_id).create_project_update(launch_type='dependency')
|
||||
|
||||
# Project created 1 seconds behind
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
project_task.status = 'pending'
|
||||
project_task.save()
|
||||
return project_task
|
||||
|
||||
def create_inventory_update(self, task, inventory_source_task):
|
||||
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(launch_type='dependency')
|
||||
|
||||
inventory_task.created = task.created - timedelta(seconds=2)
|
||||
inventory_task.status = 'pending'
|
||||
inventory_task.save()
|
||||
# inventory_sources = self.get_inventory_source_tasks([task])
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
return inventory_task
|
||||
|
||||
def capture_chain_failure_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
for dep in dependencies:
|
||||
# Add task + all deps except self
|
||||
dep.dependent_jobs.add(*([task] + filter(lambda d: d != dep, dependencies)))
|
||||
|
||||
def should_update_inventory_source(self, job, inventory_source):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("created")
|
||||
if not latest_inventory_update.exists():
|
||||
return True
|
||||
latest_inventory_update = latest_inventory_update.first()
|
||||
'''
|
||||
If there's already a inventory update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
timeout_seconds = timedelta(seconds=latest_inventory_update.inventory_source.update_cache_timeout)
|
||||
if (latest_inventory_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
if latest_inventory_update.inventory_source.update_on_launch is True and \
|
||||
latest_inventory_update.status in ['failed', 'canceled', 'error']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def should_update_related_project(self, job):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project).order_by("created")
|
||||
if not latest_project_update.exists():
|
||||
return True
|
||||
latest_project_update = latest_project_update.first()
|
||||
if latest_project_update.status in ['failed', 'canceled']:
|
||||
return True
|
||||
|
||||
'''
|
||||
If there's already a project update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
'''
|
||||
If the latest project update has a created time == job_created_time-1
|
||||
then consider the project update found. This is so we don't enter an infinite loop
|
||||
of updating the project when cache timeout is 0.
|
||||
'''
|
||||
if latest_project_update.project.scm_update_cache_timeout == 0 and \
|
||||
latest_project_update.launch_type == 'dependency' and \
|
||||
latest_project_update.created == job.created - timedelta(seconds=1):
|
||||
return False
|
||||
'''
|
||||
Normal Cache Timeout Logic
|
||||
'''
|
||||
timeout_seconds = timedelta(seconds=latest_project_update.project.scm_update_cache_timeout)
|
||||
if (latest_project_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True and \
|
||||
self.should_update_related_project(task):
|
||||
project_task = self.create_project_update(task)
|
||||
dependencies.append(project_task)
|
||||
# Inventory created 2 seconds behind job
|
||||
if task.launch_type != 'callback':
|
||||
for inventory_source in [invsrc for invsrc in self.all_inventory_sources if invsrc.inventory == task.inventory]:
|
||||
if self.should_update_inventory_source(task, inventory_source):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
dependencies.append(inventory_task)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent %s is blocked from running", task.log_format)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group %s capacity <= 0", rampart_group.name)
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug("Starting dependent %s in group %s", task.log_format, rampart_group.name)
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = filter(lambda t: t != task, dependency_tasks)
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail)
|
||||
found_acceptable_queue = True
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent %s couldn't be scheduled on graph, waiting for next cycle", task.log_format)
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("%s is blocked from running", task.log_format)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group %s capacity <= 0", rampart_group.name)
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug("Starting %s in group %s", task.log_format, rampart_group.name)
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain())
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("%s couldn't be scheduled on graph, waiting for next cycle", task.log_format)
|
||||
|
||||
def cleanup_inconsistent_celery_tasks(self):
|
||||
'''
|
||||
Rectify tower db <-> celery inconsistent view of jobs state
|
||||
'''
|
||||
last_cleanup = cache.get('last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
|
||||
if (tz_now() - last_cleanup).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
|
||||
return
|
||||
|
||||
logger.debug("Failing inconsistent running jobs.")
|
||||
celery_task_start_time = tz_now()
|
||||
active_task_queues, active_queues = self.get_active_tasks()
|
||||
cache.set('last_celery_task_cleanup', tz_now())
|
||||
|
||||
if active_queues is None:
|
||||
logger.error('Failed to retrieve active tasks from celery')
|
||||
return None
|
||||
|
||||
'''
|
||||
Only consider failing tasks on instances for which we obtained a task
|
||||
list from celery for.
|
||||
'''
|
||||
running_tasks = self.get_running_tasks()
|
||||
for node, node_jobs in running_tasks.iteritems():
|
||||
if node in active_queues:
|
||||
active_tasks = active_queues[node]
|
||||
else:
|
||||
'''
|
||||
Node task list not found in celery. If tower thinks the node is down
|
||||
then fail all the jobs on the node.
|
||||
'''
|
||||
try:
|
||||
instance = Instance.objects.get(hostname=node)
|
||||
if instance.capacity == 0:
|
||||
active_tasks = []
|
||||
else:
|
||||
continue
|
||||
except Instance.DoesNotExist:
|
||||
logger.error("Execution node Instance {} not found in database. "
|
||||
"The node is currently executing jobs {}".format(node, [str(j) for j in node_jobs]))
|
||||
active_tasks = []
|
||||
for task in node_jobs:
|
||||
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
|
||||
if isinstance(task, WorkflowJob):
|
||||
continue
|
||||
if task.modified > celery_task_start_time:
|
||||
continue
|
||||
task.status = 'failed'
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'Celery, so it has been marked as failed.',
|
||||
))
|
||||
try:
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
except DatabaseError:
|
||||
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
|
||||
continue
|
||||
awx_tasks._send_notification_templates(task, 'failed')
|
||||
task.websocket_emit_status('failed')
|
||||
logger.error("Task {} has no record in celery. Marking as failed".format(task.log_format))
|
||||
|
||||
def calculate_capacity_used(self, tasks):
|
||||
for rampart_group in self.graph:
|
||||
self.graph[rampart_group]['capacity_used'] = 0
|
||||
for t in tasks:
|
||||
# TODO: dock capacity for isolated job management tasks running in queue
|
||||
for group_actual in InstanceGroup.objects.filter(instances__hostname=t.execution_node).values_list('name'):
|
||||
if group_actual[0] in self.graph:
|
||||
self.graph[group_actual[0]]['capacity_used'] += t.task_impact
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['capacity_used']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
self.graph[instance_group]['capacity_used'] += task.task_impact
|
||||
|
||||
def get_remaining_capacity(self, instance_group):
|
||||
return (self.graph[instance_group]['capacity_total'] - self.graph[instance_group]['capacity_used'])
|
||||
|
||||
def process_tasks(self, all_sorted_tasks):
|
||||
running_tasks = filter(lambda t: t.status in ['waiting', 'running'], all_sorted_tasks)
|
||||
|
||||
self.calculate_capacity_used(running_tasks)
|
||||
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = filter(lambda t: t.status in 'pending', all_sorted_tasks)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
finished_wfjs = []
|
||||
all_sorted_tasks = self.get_tasks()
|
||||
if len(all_sorted_tasks) > 0:
|
||||
# TODO: Deal with
|
||||
# latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_project_updates(latest_project_updates)
|
||||
|
||||
# latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_inventory_updates(latest_inventory_updates)
|
||||
|
||||
self.all_inventory_sources = self.get_inventory_source_tasks(all_sorted_tasks)
|
||||
|
||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
||||
|
||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
||||
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def schedule(self):
|
||||
logger.debug("Starting Schedule")
|
||||
with transaction.atomic():
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
return
|
||||
|
||||
self.cleanup_inconsistent_celery_tasks()
|
||||
finished_wfjs = self._schedule()
|
||||
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
for wfj in WorkflowJob.objects.filter(id__in=finished_wfjs):
|
||||
awx_tasks._send_notification_templates(wfj, 'succeeded' if wfj.status == 'successful' else 'failed')
|
||||
from awx.main.scheduler.task_manager import TaskManager # noqa
|
||||
|
||||
625
awx/main/scheduler/task_manager.py
Normal file
625
awx/main/scheduler/task_manager.py
Normal file
@@ -0,0 +1,625 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
# Python
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
import uuid
|
||||
import json
|
||||
from sets import Set
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from django.db import transaction, connection, DatabaseError
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.timezone import now as tz_now, utc
|
||||
from django.db.models import Q
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
# AWX
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
Job,
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
UnifiedJob,
|
||||
WorkflowJob,
|
||||
)
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import get_type_for_model
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main import tasks as awx_tasks
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
# Celery
|
||||
from celery.task.control import inspect
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
class TaskManager():
|
||||
|
||||
def __init__(self):
|
||||
self.graph = dict()
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
|
||||
capacity_total=rampart_group.capacity,
|
||||
consumed_capacity=0)
|
||||
|
||||
def is_job_blocked(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
for g in self.graph:
|
||||
if self.graph[g]['graph'].is_job_blocked(task):
|
||||
return True
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
inventory_updates_qs = InventoryUpdate.objects.filter(status__in=status_list).exclude(source='file').prefetch_related('inventory_source', 'instance_group')
|
||||
inventory_updates = [i for i in inventory_updates_qs]
|
||||
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
system_jobs = [s for s in SystemJob.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
ad_hoc_commands = [a for a in AdHocCommand.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
workflow_jobs = [w for w in WorkflowJob.objects.filter(status__in=status_list)]
|
||||
all_tasks = sorted(jobs + project_updates + inventory_updates + system_jobs + ad_hoc_commands + workflow_jobs,
|
||||
key=lambda task: task.created)
|
||||
return all_tasks
|
||||
|
||||
'''
|
||||
Tasks that are running and SHOULD have a celery task.
|
||||
{
|
||||
'execution_node': [j1, j2,...],
|
||||
'execution_node': [j3],
|
||||
...
|
||||
}
|
||||
'''
|
||||
def get_running_tasks(self):
|
||||
execution_nodes = {}
|
||||
waiting_jobs = []
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter((Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))) &
|
||||
~Q(polymorphic_ctype_id=workflow_ctype_id))
|
||||
for j in jobs:
|
||||
if j.execution_node:
|
||||
execution_nodes.setdefault(j.execution_node, []).append(j)
|
||||
else:
|
||||
waiting_jobs.append(j)
|
||||
return (execution_nodes, waiting_jobs)
|
||||
|
||||
'''
|
||||
Tasks that are currently running in celery
|
||||
|
||||
Transform:
|
||||
{
|
||||
"celery@ec2-54-204-222-62.compute-1.amazonaws.com": [],
|
||||
"celery@ec2-54-163-144-168.compute-1.amazonaws.com": [{
|
||||
...
|
||||
"id": "5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
...
|
||||
}, {
|
||||
...,
|
||||
}, ...]
|
||||
}
|
||||
|
||||
to:
|
||||
{
|
||||
"ec2-54-204-222-62.compute-1.amazonaws.com": [
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8304",
|
||||
"5238466a-f8c7-43b3-9180-5b78e9da8306",
|
||||
...
|
||||
]
|
||||
}
|
||||
'''
|
||||
def get_active_tasks(self):
|
||||
inspector = inspect()
|
||||
if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):
|
||||
active_task_queues = inspector.active()
|
||||
else:
|
||||
logger.warn("Ignoring celery task inspector")
|
||||
active_task_queues = None
|
||||
|
||||
queues = None
|
||||
|
||||
if active_task_queues is not None:
|
||||
queues = {}
|
||||
for queue in active_task_queues:
|
||||
active_tasks = set()
|
||||
map(lambda at: active_tasks.add(at['id']), active_task_queues[queue])
|
||||
|
||||
# celery worker name is of the form celery@myhost.com
|
||||
queue_name = queue.split('@')
|
||||
queue_name = queue_name[1 if len(queue_name) > 1 else 0]
|
||||
queues[queue_name] = active_tasks
|
||||
else:
|
||||
if not hasattr(settings, 'CELERY_UNIT_TEST'):
|
||||
return (None, None)
|
||||
|
||||
return (active_task_queues, queues)
|
||||
|
||||
def get_latest_project_update_tasks(self, all_sorted_tasks):
|
||||
project_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
project_ids.add(task.project_id)
|
||||
return ProjectUpdate.objects.filter(id__in=project_ids)
|
||||
|
||||
def get_latest_inventory_update_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return InventoryUpdate.objects.filter(id__in=inventory_ids)
|
||||
|
||||
def get_running_workflow_jobs(self):
|
||||
graph_workflow_jobs = [wf for wf in
|
||||
WorkflowJob.objects.filter(status='running')]
|
||||
return graph_workflow_jobs
|
||||
|
||||
def get_inventory_source_tasks(self, all_sorted_tasks):
|
||||
inventory_ids = Set()
|
||||
for task in all_sorted_tasks:
|
||||
if isinstance(task, Job):
|
||||
inventory_ids.add(task.inventory_id)
|
||||
return [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
||||
|
||||
def spawn_workflow_graph_jobs(self, workflow_jobs):
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
spawn_nodes = dag.bfs_nodes_to_run()
|
||||
for spawn_node in spawn_nodes:
|
||||
if spawn_node.unified_job_template is None:
|
||||
continue
|
||||
kv = spawn_node.get_job_kwargs()
|
||||
job = spawn_node.unified_job_template.create_unified_job(**kv)
|
||||
spawn_node.job = job
|
||||
spawn_node.save()
|
||||
if job._resources_sufficient_for_launch():
|
||||
can_start = job.signal_start(**kv)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
else:
|
||||
can_start = False
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
connection.on_commit(lambda: job.websocket_emit_status('failed'))
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
#emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||
|
||||
# See comment in tasks.py::RunWorkflowJob::run()
|
||||
def process_finished_workflow_jobs(self, workflow_jobs):
|
||||
result = []
|
||||
for workflow_job in workflow_jobs:
|
||||
dag = WorkflowDAG(workflow_job)
|
||||
if workflow_job.cancel_flag:
|
||||
workflow_job.status = 'canceled'
|
||||
workflow_job.save()
|
||||
dag.cancel_node_jobs()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
elif dag.is_workflow_done():
|
||||
result.append(workflow_job.id)
|
||||
if workflow_job._has_failed():
|
||||
workflow_job.status = 'failed'
|
||||
else:
|
||||
workflow_job.status = 'successful'
|
||||
workflow_job.save()
|
||||
connection.on_commit(lambda: workflow_job.websocket_emit_status(workflow_job.status))
|
||||
return result
|
||||
|
||||
def get_dependent_jobs_for_inv_and_proj_update(self, job_obj):
|
||||
return [{'type': j.model_to_str(), 'id': j.id} for j in job_obj.dependent_jobs.all()]
|
||||
|
||||
def start_task(self, task, rampart_group, dependent_tasks=[]):
|
||||
from awx.main.tasks import handle_work_error, handle_work_success
|
||||
|
||||
task_actual = {
|
||||
'type': get_type_for_model(type(task)),
|
||||
'id': task.id,
|
||||
}
|
||||
dependencies = [{'type': get_type_for_model(type(t)), 'id': t.id} for t in dependent_tasks]
|
||||
|
||||
error_handler = handle_work_error.s(subtasks=[task_actual] + dependencies)
|
||||
success_handler = handle_work_success.s(task_actual=task_actual)
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
(start_status, opts) = task.pre_start()
|
||||
if not start_status:
|
||||
task.status = 'failed'
|
||||
if task.job_explanation:
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
if not task.supports_isolation() and rampart_group.controller_id:
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
logger.info('Submitting isolated %s to queue %s via %s.',
|
||||
task.log_format, task.instance_group_id, rampart_group.controller_id)
|
||||
else:
|
||||
task.instance_group = rampart_group
|
||||
logger.info('Submitting %s to instance group %s.', task.log_format, task.instance_group_id)
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
|
||||
self.consume_capacity(task, rampart_group.name)
|
||||
|
||||
def post_commit():
|
||||
task.websocket_emit_status(task.status)
|
||||
if task.status != 'failed':
|
||||
task.start_celery_task(opts, error_callback=error_handler, success_callback=success_handler, queue=rampart_group.name)
|
||||
|
||||
connection.on_commit(post_commit)
|
||||
|
||||
def process_running_tasks(self, running_tasks):
|
||||
map(lambda task: self.graph[task.instance_group.name]['graph'].add_job(task), running_tasks)
|
||||
|
||||
def create_project_update(self, task):
|
||||
project_task = Project.objects.get(id=task.project_id).create_project_update(launch_type='dependency')
|
||||
|
||||
# Project created 1 seconds behind
|
||||
project_task.created = task.created - timedelta(seconds=1)
|
||||
project_task.status = 'pending'
|
||||
project_task.save()
|
||||
return project_task
|
||||
|
||||
def create_inventory_update(self, task, inventory_source_task):
|
||||
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(launch_type='dependency')
|
||||
|
||||
inventory_task.created = task.created - timedelta(seconds=2)
|
||||
inventory_task.status = 'pending'
|
||||
inventory_task.save()
|
||||
# inventory_sources = self.get_inventory_source_tasks([task])
|
||||
# self.process_inventory_sources(inventory_sources)
|
||||
return inventory_task
|
||||
|
||||
def capture_chain_failure_dependencies(self, task, dependencies):
|
||||
with disable_activity_stream():
|
||||
task.dependent_jobs.add(*dependencies)
|
||||
|
||||
for dep in dependencies:
|
||||
# Add task + all deps except self
|
||||
dep.dependent_jobs.add(*([task] + filter(lambda d: d != dep, dependencies)))
|
||||
|
||||
def get_latest_inventory_update(self, inventory_source):
|
||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
||||
if not latest_inventory_update.exists():
|
||||
return None
|
||||
return latest_inventory_update.first()
|
||||
|
||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
||||
now = tz_now()
|
||||
|
||||
# Already processed dependencies for this job
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_inventory_update is None:
|
||||
return True
|
||||
'''
|
||||
If there's already a inventory update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
timeout_seconds = timedelta(seconds=latest_inventory_update.inventory_source.update_cache_timeout)
|
||||
if (latest_inventory_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
if latest_inventory_update.inventory_source.update_on_launch is True and \
|
||||
latest_inventory_update.status in ['failed', 'canceled', 'error']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_latest_project_update(self, job):
|
||||
latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created")
|
||||
if not latest_project_update.exists():
|
||||
return None
|
||||
return latest_project_update.first()
|
||||
|
||||
def should_update_related_project(self, job, latest_project_update):
|
||||
now = tz_now()
|
||||
if job.dependent_jobs.all():
|
||||
return False
|
||||
|
||||
if latest_project_update is None:
|
||||
return True
|
||||
|
||||
if latest_project_update.status in ['failed', 'canceled']:
|
||||
return True
|
||||
|
||||
'''
|
||||
If there's already a project update utilizing this job that's about to run
|
||||
then we don't need to create one
|
||||
'''
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
return False
|
||||
|
||||
'''
|
||||
If the latest project update has a created time == job_created_time-1
|
||||
then consider the project update found. This is so we don't enter an infinite loop
|
||||
of updating the project when cache timeout is 0.
|
||||
'''
|
||||
if latest_project_update.project.scm_update_cache_timeout == 0 and \
|
||||
latest_project_update.launch_type == 'dependency' and \
|
||||
latest_project_update.created == job.created - timedelta(seconds=1):
|
||||
return False
|
||||
'''
|
||||
Normal Cache Timeout Logic
|
||||
'''
|
||||
timeout_seconds = timedelta(seconds=latest_project_update.project.scm_update_cache_timeout)
|
||||
if (latest_project_update.finished + timeout_seconds) < now:
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_dependencies(self, task):
|
||||
dependencies = []
|
||||
if type(task) is Job:
|
||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
||||
latest_project_update = self.get_latest_project_update(task)
|
||||
if self.should_update_related_project(task, latest_project_update):
|
||||
project_task = self.create_project_update(task)
|
||||
dependencies.append(project_task)
|
||||
else:
|
||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_project_update)
|
||||
|
||||
# Inventory created 2 seconds behind job
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||
except ValueError:
|
||||
start_args = dict()
|
||||
for inventory_source in [invsrc for invsrc in self.all_inventory_sources if invsrc.inventory == task.inventory]:
|
||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||
continue
|
||||
if not inventory_source.update_on_launch:
|
||||
continue
|
||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
||||
dependencies.append(inventory_task)
|
||||
else:
|
||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
||||
dependencies.append(latest_inventory_update)
|
||||
|
||||
if len(dependencies) > 0:
|
||||
self.capture_chain_failure_dependencies(task, dependencies)
|
||||
return dependencies
|
||||
|
||||
def process_dependencies(self, dependent_task, dependency_tasks):
|
||||
for task in dependency_tasks:
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("Dependent %s is blocked from running", task.log_format)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
for rampart_group in preferred_instance_groups:
|
||||
if self.get_remaining_capacity(rampart_group.name) <= 0:
|
||||
logger.debug("Skipping group %s capacity <= 0", rampart_group.name)
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug("Starting dependent %s in group %s", task.log_format, rampart_group.name)
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
tasks_to_fail = filter(lambda t: t != task, dependency_tasks)
|
||||
tasks_to_fail += [dependent_task]
|
||||
self.start_task(task, rampart_group, tasks_to_fail)
|
||||
found_acceptable_queue = True
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("Dependent %s couldn't be scheduled on graph, waiting for next cycle", task.log_format)
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
for task in pending_tasks:
|
||||
self.process_dependencies(task, self.generate_dependencies(task))
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("%s is blocked from running", task.log_format)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
for rampart_group in preferred_instance_groups:
|
||||
remaining_capacity = self.get_remaining_capacity(rampart_group.name)
|
||||
if remaining_capacity <= 0:
|
||||
logger.debug("Skipping group %s, remaining_capacity %s <= 0",
|
||||
rampart_group.name, remaining_capacity)
|
||||
continue
|
||||
if not self.would_exceed_capacity(task, rampart_group.name):
|
||||
logger.debug("Starting %s in group %s (remaining_capacity=%s)",
|
||||
task.log_format, rampart_group.name, remaining_capacity)
|
||||
self.graph[rampart_group.name]['graph'].add_job(task)
|
||||
self.start_task(task, rampart_group, task.get_jobs_fail_chain())
|
||||
found_acceptable_queue = True
|
||||
break
|
||||
else:
|
||||
logger.debug("Not enough capacity to run %s on %s (remaining_capacity=%s)",
|
||||
task.log_format, rampart_group.name, remaining_capacity)
|
||||
if not found_acceptable_queue:
|
||||
logger.debug("%s couldn't be scheduled on graph, waiting for next cycle", task.log_format)
|
||||
|
||||
def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=False):
|
||||
for task in node_jobs:
|
||||
if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')):
|
||||
if isinstance(task, WorkflowJob):
|
||||
continue
|
||||
if task.modified > celery_task_start_time:
|
||||
continue
|
||||
new_status = 'failed'
|
||||
if isolated:
|
||||
new_status = 'error'
|
||||
task.status = new_status
|
||||
if isolated:
|
||||
# TODO: cancel and reap artifacts of lost jobs from heartbeat
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but its ',
|
||||
'controller management daemon was not present in',
|
||||
'Celery, so it has been marked as failed.',
|
||||
'Task may still be running, but contactability is unknown.'
|
||||
))
|
||||
else:
|
||||
task.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'Celery, so it has been marked as failed.',
|
||||
))
|
||||
try:
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
except DatabaseError:
|
||||
logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format))
|
||||
continue
|
||||
awx_tasks._send_notification_templates(task, 'failed')
|
||||
task.websocket_emit_status(new_status)
|
||||
logger.error("{}Task {} has no record in celery. Marking as failed".format(
|
||||
'Isolated ' if isolated else '', task.log_format))
|
||||
|
||||
def cleanup_inconsistent_celery_tasks(self):
|
||||
'''
|
||||
Rectify tower db <-> celery inconsistent view of jobs state
|
||||
'''
|
||||
last_cleanup = cache.get('last_celery_task_cleanup') or datetime.min.replace(tzinfo=utc)
|
||||
if (tz_now() - last_cleanup).seconds < settings.AWX_INCONSISTENT_TASK_INTERVAL:
|
||||
return
|
||||
|
||||
logger.debug("Failing inconsistent running jobs.")
|
||||
celery_task_start_time = tz_now()
|
||||
active_task_queues, active_queues = self.get_active_tasks()
|
||||
cache.set('last_celery_task_cleanup', tz_now())
|
||||
|
||||
if active_queues is None:
|
||||
logger.error('Failed to retrieve active tasks from celery')
|
||||
return None
|
||||
|
||||
'''
|
||||
Only consider failing tasks on instances for which we obtained a task
|
||||
list from celery for.
|
||||
'''
|
||||
running_tasks, waiting_tasks = self.get_running_tasks()
|
||||
all_celery_task_ids = []
|
||||
for node, node_jobs in active_queues.iteritems():
|
||||
all_celery_task_ids.extend(node_jobs)
|
||||
|
||||
self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time)
|
||||
|
||||
for node, node_jobs in running_tasks.iteritems():
|
||||
isolated = False
|
||||
if node in active_queues:
|
||||
active_tasks = active_queues[node]
|
||||
else:
|
||||
'''
|
||||
Node task list not found in celery. We may branch into cases:
|
||||
- instance is unknown to tower, system is improperly configured
|
||||
- instance is reported as down, then fail all jobs on the node
|
||||
- instance is an isolated node, then check running tasks
|
||||
among all allowed controller nodes for management process
|
||||
- valid healthy instance not included in celery task list
|
||||
probably a netsplit case, leave it alone
|
||||
'''
|
||||
instance = Instance.objects.filter(hostname=node).first()
|
||||
|
||||
if instance is None:
|
||||
logger.error("Execution node Instance {} not found in database. "
|
||||
"The node is currently executing jobs {}".format(
|
||||
node, [j.log_format for j in node_jobs]))
|
||||
active_tasks = []
|
||||
elif instance.capacity == 0:
|
||||
active_tasks = []
|
||||
elif instance.rampart_groups.filter(controller__isnull=False).exists():
|
||||
active_tasks = all_celery_task_ids
|
||||
isolated = True
|
||||
else:
|
||||
continue
|
||||
|
||||
self.fail_jobs_if_not_in_celery(
|
||||
node_jobs, active_tasks, celery_task_start_time,
|
||||
isolated=isolated
|
||||
)
|
||||
|
||||
def calculate_capacity_consumed(self, tasks):
|
||||
self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph)
|
||||
|
||||
def would_exceed_capacity(self, task, instance_group):
|
||||
current_capacity = self.graph[instance_group]['consumed_capacity']
|
||||
capacity_total = self.graph[instance_group]['capacity_total']
|
||||
if current_capacity == 0:
|
||||
return False
|
||||
return (task.task_impact + current_capacity > capacity_total)
|
||||
|
||||
def consume_capacity(self, task, instance_group):
|
||||
logger.debug('%s consumed %s capacity units from %s with prior total of %s',
|
||||
task.log_format, task.task_impact, instance_group,
|
||||
self.graph[instance_group]['consumed_capacity'])
|
||||
self.graph[instance_group]['consumed_capacity'] += task.task_impact
|
||||
|
||||
def get_remaining_capacity(self, instance_group):
|
||||
return (self.graph[instance_group]['capacity_total'] - self.graph[instance_group]['consumed_capacity'])
|
||||
|
||||
def process_tasks(self, all_sorted_tasks):
|
||||
running_tasks = filter(lambda t: t.status in ['waiting', 'running'], all_sorted_tasks)
|
||||
|
||||
self.calculate_capacity_consumed(running_tasks)
|
||||
|
||||
self.process_running_tasks(running_tasks)
|
||||
|
||||
pending_tasks = filter(lambda t: t.status in 'pending', all_sorted_tasks)
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
|
||||
def _schedule(self):
|
||||
finished_wfjs = []
|
||||
all_sorted_tasks = self.get_tasks()
|
||||
if len(all_sorted_tasks) > 0:
|
||||
# TODO: Deal with
|
||||
# latest_project_updates = self.get_latest_project_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_project_updates(latest_project_updates)
|
||||
|
||||
# latest_inventory_updates = self.get_latest_inventory_update_tasks(all_sorted_tasks)
|
||||
# self.process_latest_inventory_updates(latest_inventory_updates)
|
||||
|
||||
self.all_inventory_sources = self.get_inventory_source_tasks(all_sorted_tasks)
|
||||
|
||||
running_workflow_tasks = self.get_running_workflow_jobs()
|
||||
finished_wfjs = self.process_finished_workflow_jobs(running_workflow_tasks)
|
||||
|
||||
self.spawn_workflow_graph_jobs(running_workflow_tasks)
|
||||
|
||||
self.process_tasks(all_sorted_tasks)
|
||||
return finished_wfjs
|
||||
|
||||
def schedule(self):
|
||||
with transaction.atomic():
|
||||
# Lock
|
||||
with advisory_lock('task_manager_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
logger.debug("Not running scheduler, another task holds lock")
|
||||
return
|
||||
logger.debug("Starting Scheduler")
|
||||
|
||||
self.cleanup_inconsistent_celery_tasks()
|
||||
finished_wfjs = self._schedule()
|
||||
|
||||
# Operations whose queries rely on modifications made during the atomic scheduling session
|
||||
for wfj in WorkflowJob.objects.filter(id__in=finished_wfjs):
|
||||
awx_tasks._send_notification_templates(wfj, 'succeeded' if wfj.status == 'successful' else 'failed')
|
||||
@@ -3,7 +3,7 @@
|
||||
import logging
|
||||
|
||||
# Celery
|
||||
from celery import task
|
||||
from celery import Task, task
|
||||
|
||||
# AWX
|
||||
from awx.main.scheduler import TaskManager
|
||||
@@ -15,6 +15,12 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
# updated model, the call to schedule() may get stale data.
|
||||
|
||||
|
||||
class LogErrorsTask(Task):
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
logger.exception('Task {} encountered exception.'.format(self.name), exc_info=exc)
|
||||
super(LogErrorsTask, self).on_failure(exc, task_id, args, kwargs, einfo)
|
||||
|
||||
|
||||
@task
|
||||
def run_job_launch(job_id):
|
||||
TaskManager().schedule()
|
||||
@@ -25,7 +31,7 @@ def run_job_complete(job_id):
|
||||
TaskManager().schedule()
|
||||
|
||||
|
||||
@task
|
||||
@task(base=LogErrorsTask)
|
||||
def run_task_manager():
|
||||
logger.debug("Running Tower task manager.")
|
||||
TaskManager().schedule()
|
||||
|
||||
@@ -140,6 +140,9 @@ def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwarg
|
||||
|
||||
def sync_superuser_status_to_rbac(instance, **kwargs):
|
||||
'When the is_superuser flag is changed on a user, reflect that in the membership of the System Admnistrator role'
|
||||
update_fields = kwargs.get('update_fields', None)
|
||||
if update_fields and 'is_superuser' not in update_fields:
|
||||
return
|
||||
if instance.is_superuser:
|
||||
Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).members.add(instance)
|
||||
else:
|
||||
@@ -147,6 +150,8 @@ def sync_superuser_status_to_rbac(instance, **kwargs):
|
||||
|
||||
|
||||
def create_user_role(instance, **kwargs):
|
||||
if not kwargs.get('created', True):
|
||||
return
|
||||
try:
|
||||
Role.objects.get(
|
||||
content_type=ContentType.objects.get_for_model(instance),
|
||||
@@ -383,6 +388,9 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
# Skip recording any inventory source directly associated with a group.
|
||||
if isinstance(instance, InventorySource) and instance.deprecated_group:
|
||||
return
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
object1 = camelcase_to_underscore(instance.__class__.__name__)
|
||||
changes = model_to_dict(instance, model_serializer_mapping)
|
||||
# Special case where Job survey password variables need to be hidden
|
||||
@@ -416,6 +424,9 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
changes = model_instance_diff(old, new, model_serializer_mapping)
|
||||
if changes is None:
|
||||
return
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
object1 = camelcase_to_underscore(instance.__class__.__name__)
|
||||
activity_entry = ActivityStream(
|
||||
operation='update',
|
||||
@@ -440,6 +451,9 @@ def activity_stream_delete(sender, instance, **kwargs):
|
||||
# explicitly called with flag on in Inventory.schedule_deletion.
|
||||
if isinstance(instance, Inventory) and not kwargs.get('inventory_delete_flag', False):
|
||||
return
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
changes = model_to_dict(instance)
|
||||
object1 = camelcase_to_underscore(instance.__class__.__name__)
|
||||
activity_entry = ActivityStream(
|
||||
@@ -461,6 +475,9 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
else:
|
||||
return
|
||||
obj1 = instance
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
object1=camelcase_to_underscore(obj1.__class__.__name__)
|
||||
obj_rel = sender.__module__ + "." + sender.__name__
|
||||
|
||||
@@ -471,6 +488,9 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
if not obj2_actual.exists():
|
||||
continue
|
||||
obj2_actual = obj2_actual[0]
|
||||
_type = type(obj2_actual)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
if isinstance(obj2_actual, Role) and obj2_actual.content_object is not None:
|
||||
obj2_actual = obj2_actual.content_object
|
||||
object2 = camelcase_to_underscore(obj2_actual.__class__.__name__)
|
||||
|
||||
@@ -33,7 +33,7 @@ from celery.signals import celeryd_init, worker_process_init, worker_shutdown
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction, DatabaseError, IntegrityError, OperationalError
|
||||
from django.db import transaction, DatabaseError, IntegrityError
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.core.mail import send_mail
|
||||
@@ -50,6 +50,7 @@ from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models.unified_jobs import ACTIVE_STATES
|
||||
from awx.main.exceptions import AwxTaskError, TaskCancel, TaskError
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.expect import run, isolated_manager
|
||||
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
|
||||
@@ -80,7 +81,10 @@ logger = logging.getLogger('awx.main.tasks')
|
||||
|
||||
class LogErrorsTask(Task):
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
if isinstance(self, BaseTask):
|
||||
if isinstance(exc, AwxTaskError):
|
||||
# Error caused by user / tracked in job output
|
||||
logger.warning(str(exc))
|
||||
elif isinstance(self, BaseTask):
|
||||
logger.exception(
|
||||
'%s %s execution encountered exception.',
|
||||
get_type_for_model(self.model), args[0])
|
||||
@@ -316,7 +320,11 @@ def awx_periodic_scheduler(self):
|
||||
def _send_notification_templates(instance, status_str):
|
||||
if status_str not in ['succeeded', 'failed']:
|
||||
raise ValueError(_("status_str must be either succeeded or failed"))
|
||||
notification_templates = instance.get_notification_templates()
|
||||
try:
|
||||
notification_templates = instance.get_notification_templates()
|
||||
except:
|
||||
logger.warn("No notification template defined for emitting notification")
|
||||
notification_templates = None
|
||||
if notification_templates:
|
||||
if status_str == 'succeeded':
|
||||
notification_template_type = 'success'
|
||||
@@ -447,12 +455,12 @@ def delete_inventory(self, inventory_id, user_id):
|
||||
{'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'}
|
||||
)
|
||||
logger.debug('Deleted inventory %s as user %s.' % (inventory_id, user_id))
|
||||
except OperationalError:
|
||||
logger.warning('Database error deleting inventory {}, but will retry.'.format(inventory_id))
|
||||
self.retry(countdown=10)
|
||||
except Inventory.DoesNotExist:
|
||||
logger.error("Delete Inventory failed due to missing inventory: " + str(inventory_id))
|
||||
return
|
||||
except DatabaseError:
|
||||
logger.warning('Database error deleting inventory {}, but will retry.'.format(inventory_id))
|
||||
self.retry(countdown=10)
|
||||
|
||||
|
||||
def with_path_cleanup(f):
|
||||
@@ -478,6 +486,7 @@ class BaseTask(LogErrorsTask):
|
||||
model = None
|
||||
abstract = True
|
||||
cleanup_paths = []
|
||||
proot_show_paths = []
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
"""Reload the model instance from the database and update the
|
||||
@@ -524,10 +533,6 @@ class BaseTask(LogErrorsTask):
|
||||
logger.error('Failed to update %s after %d retries.',
|
||||
self.model._meta.object_name, _attempt)
|
||||
|
||||
def signal_finished(self, pk):
|
||||
pass
|
||||
# notify_task_runner(dict(complete=pk))
|
||||
|
||||
def get_path_to(self, *args):
|
||||
'''
|
||||
Return absolute path relative to this file.
|
||||
@@ -764,7 +769,10 @@ class BaseTask(LogErrorsTask):
|
||||
'''
|
||||
Run the job/task and capture its output.
|
||||
'''
|
||||
instance = self.update_model(pk, status='running')
|
||||
execution_node = settings.CLUSTER_HOST_ID
|
||||
if isolated_host is not None:
|
||||
execution_node = isolated_host
|
||||
instance = self.update_model(pk, status='running', execution_node=execution_node)
|
||||
|
||||
instance.websocket_emit_status("running")
|
||||
status, rc, tb = 'error', None, ''
|
||||
@@ -793,6 +801,7 @@ class BaseTask(LogErrorsTask):
|
||||
# May have to serialize the value
|
||||
kwargs['private_data_files'] = self.build_private_data_files(instance, **kwargs)
|
||||
kwargs['passwords'] = self.build_passwords(instance, **kwargs)
|
||||
kwargs['proot_show_paths'] = self.proot_show_paths
|
||||
args = self.build_args(instance, **kwargs)
|
||||
safe_args = self.build_safe_args(instance, **kwargs)
|
||||
output_replacements = self.build_output_replacements(instance, **kwargs)
|
||||
@@ -850,12 +859,7 @@ class BaseTask(LogErrorsTask):
|
||||
pexpect_timeout=getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
proot_cmd=getattr(settings, 'AWX_PROOT_CMD', 'bwrap'),
|
||||
)
|
||||
execution_node = settings.CLUSTER_HOST_ID
|
||||
if isolated_host is not None:
|
||||
execution_node = isolated_host
|
||||
instance = self.update_model(instance.pk, status='running',
|
||||
execution_node=execution_node,
|
||||
output_replacements=output_replacements)
|
||||
instance = self.update_model(instance.pk, output_replacements=output_replacements)
|
||||
if isolated_host:
|
||||
manager_instance = isolated_manager.IsolatedManager(
|
||||
args, cwd, env, stdout_handle, ssh_key_path, **_kw
|
||||
@@ -900,12 +904,9 @@ class BaseTask(LogErrorsTask):
|
||||
# Raising an exception will mark the job as 'failed' in celery
|
||||
# and will stop a task chain from continuing to execute
|
||||
if status == 'canceled':
|
||||
raise Exception("%s was canceled (rc=%s)" % (instance.log_format, str(rc)))
|
||||
raise TaskCancel(instance, rc)
|
||||
else:
|
||||
raise Exception("%s encountered an error (rc=%s), please see task stdout for details." %
|
||||
(instance.log_format, str(rc)))
|
||||
if not hasattr(settings, 'CELERY_UNIT_TEST'):
|
||||
self.signal_finished(pk)
|
||||
raise TaskError(instance, rc)
|
||||
|
||||
def get_ssh_key_path(self, instance, **kwargs):
|
||||
'''
|
||||
@@ -1054,9 +1055,6 @@ class RunJob(BaseTask):
|
||||
env['GCE_EMAIL'] = cloud_cred.username
|
||||
env['GCE_PROJECT'] = cloud_cred.project
|
||||
env['GCE_PEM_FILE_PATH'] = cred_files.get(cloud_cred, '')
|
||||
elif cloud_cred and cloud_cred.kind == 'azure':
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cloud_cred.username
|
||||
env['AZURE_CERT_PATH'] = cred_files.get(cloud_cred, '')
|
||||
elif cloud_cred and cloud_cred.kind == 'azure_rm':
|
||||
if len(cloud_cred.client) and len(cloud_cred.tenant):
|
||||
env['AZURE_CLIENT_ID'] = cloud_cred.client
|
||||
@@ -1071,6 +1069,7 @@ class RunJob(BaseTask):
|
||||
env['VMWARE_USER'] = cloud_cred.username
|
||||
env['VMWARE_PASSWORD'] = decrypt_field(cloud_cred, 'password')
|
||||
env['VMWARE_HOST'] = cloud_cred.host
|
||||
env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)
|
||||
elif cloud_cred and cloud_cred.kind == 'openstack':
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cred_files.get(cloud_cred, '')
|
||||
|
||||
@@ -1288,6 +1287,10 @@ class RunProjectUpdate(BaseTask):
|
||||
name = 'awx.main.tasks.run_project_update'
|
||||
model = ProjectUpdate
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
return [settings.PROJECTS_ROOT]
|
||||
|
||||
def build_private_data(self, project_update, **kwargs):
|
||||
'''
|
||||
Return SSH private key data needed for this project update.
|
||||
@@ -1301,7 +1304,7 @@ class RunProjectUpdate(BaseTask):
|
||||
}
|
||||
}
|
||||
'''
|
||||
handle, self.revision_path = tempfile.mkstemp(dir=settings.AWX_PROOT_BASE_PATH)
|
||||
handle, self.revision_path = tempfile.mkstemp(dir=settings.PROJECTS_ROOT)
|
||||
self.cleanup_paths.append(self.revision_path)
|
||||
private_data = {'credentials': {}}
|
||||
if project_update.credential:
|
||||
@@ -1594,6 +1597,12 @@ class RunProjectUpdate(BaseTask):
|
||||
if status == 'successful' and instance.launch_type != 'sync':
|
||||
self._update_dependent_inventories(instance, dependent_inventory_sources)
|
||||
|
||||
def should_use_proot(self, instance, **kwargs):
|
||||
'''
|
||||
Return whether this task should use proot.
|
||||
'''
|
||||
return getattr(settings, 'AWX_PROOT_ENABLED', False)
|
||||
|
||||
|
||||
class RunInventoryUpdate(BaseTask):
|
||||
|
||||
@@ -1616,8 +1625,8 @@ class RunInventoryUpdate(BaseTask):
|
||||
If no private data is needed, return None.
|
||||
"""
|
||||
private_data = {'credentials': {}}
|
||||
# If this is Microsoft Azure or GCE, return the RSA key
|
||||
if inventory_update.source in ('azure', 'gce'):
|
||||
# If this is GCE, return the RSA key
|
||||
if inventory_update.source == 'gce':
|
||||
credential = inventory_update.credential
|
||||
private_data['credentials'][credential] = decrypt_field(credential, 'ssh_key_data')
|
||||
return private_data
|
||||
@@ -1705,7 +1714,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
section = 'vmware'
|
||||
cp.add_section(section)
|
||||
cp.set('vmware', 'cache_max_age', 0)
|
||||
|
||||
cp.set('vmware', 'validate_certs', str(settings.VMWARE_VALIDATE_CERTS))
|
||||
cp.set('vmware', 'username', credential.username)
|
||||
cp.set('vmware', 'password', decrypt_field(credential, 'password'))
|
||||
cp.set('vmware', 'server', credential.host)
|
||||
@@ -1779,7 +1788,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
cp.set(section, 'group_by_resource_group', 'yes')
|
||||
cp.set(section, 'group_by_location', 'yes')
|
||||
cp.set(section, 'group_by_tag', 'yes')
|
||||
if inventory_update.source_regions:
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
cp.set(
|
||||
section, 'locations',
|
||||
','.join([x.strip() for x in inventory_update.source_regions.split(',')])
|
||||
@@ -1847,9 +1856,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
env['EC2_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'vmware':
|
||||
env['VMWARE_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'azure':
|
||||
env['AZURE_SUBSCRIPTION_ID'] = passwords.get('source_username', '')
|
||||
env['AZURE_CERT_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'azure_rm':
|
||||
if len(passwords.get('source_client', '')) and \
|
||||
len(passwords.get('source_tenant', '')):
|
||||
|
||||
@@ -748,6 +748,7 @@ def test_falsey_field_data(get, post, organization, admin, field_value):
|
||||
'credential_type': net.pk,
|
||||
'organization': organization.id,
|
||||
'inputs': {
|
||||
'username': 'joe-user', # username is required
|
||||
'authorize': field_value
|
||||
}
|
||||
}
|
||||
@@ -922,6 +923,25 @@ def test_vault_create_ok(post, organization, admin, version, params):
|
||||
assert decrypt_field(cred, 'vault_password') == 'some_password'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_vault_password_required(post, organization, admin):
|
||||
vault = CredentialType.defaults['vault']()
|
||||
vault.save()
|
||||
response = post(
|
||||
reverse('api:credential_list', kwargs={'version': 'v2'}),
|
||||
{
|
||||
'credential_type': vault.pk,
|
||||
'organization': organization.id,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {}
|
||||
},
|
||||
admin
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert response.data['inputs'] == {'vault_password': ['required for Vault']}
|
||||
assert Credential.objects.count() == 0
|
||||
|
||||
|
||||
#
|
||||
# Net Credentials
|
||||
#
|
||||
@@ -1051,43 +1071,6 @@ def test_gce_create_ok(post, organization, admin, version, params):
|
||||
assert decrypt_field(cred, 'ssh_key_data') == EXAMPLE_PRIVATE_KEY
|
||||
|
||||
|
||||
#
|
||||
# Azure Classic
|
||||
#
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('version, params', [
|
||||
['v1', {
|
||||
'kind': 'azure',
|
||||
'name': 'Best credential ever',
|
||||
'username': 'some_username',
|
||||
'ssh_key_data': EXAMPLE_PRIVATE_KEY
|
||||
}],
|
||||
['v2', {
|
||||
'credential_type': 1,
|
||||
'name': 'Best credential ever',
|
||||
'inputs': {
|
||||
'username': 'some_username',
|
||||
'ssh_key_data': EXAMPLE_PRIVATE_KEY
|
||||
}
|
||||
}]
|
||||
])
|
||||
def test_azure_create_ok(post, organization, admin, version, params):
|
||||
azure = CredentialType.defaults['azure']()
|
||||
azure.save()
|
||||
params['organization'] = organization.id
|
||||
response = post(
|
||||
reverse('api:credential_list', kwargs={'version': version}),
|
||||
params,
|
||||
admin
|
||||
)
|
||||
assert response.status_code == 201
|
||||
|
||||
assert Credential.objects.count() == 1
|
||||
cred = Credential.objects.all()[:1].get()
|
||||
assert cred.inputs['username'] == 'some_username'
|
||||
assert decrypt_field(cred, 'ssh_key_data') == EXAMPLE_PRIVATE_KEY
|
||||
|
||||
|
||||
#
|
||||
# Azure Resource Manager
|
||||
#
|
||||
@@ -1426,6 +1409,34 @@ def test_field_removal(put, organization, admin, credentialtype_ssh, version, pa
|
||||
assert 'password' not in cred.inputs
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_type_immutable_in_v2(patch, organization, admin, credentialtype_ssh, credentialtype_aws):
|
||||
cred = Credential(
|
||||
credential_type=credentialtype_ssh,
|
||||
name='Best credential ever',
|
||||
organization=organization,
|
||||
inputs={
|
||||
'username': u'jim',
|
||||
'password': u'pass'
|
||||
}
|
||||
)
|
||||
cred.save()
|
||||
|
||||
response = patch(
|
||||
reverse('api:credential_detail', kwargs={'version': 'v2', 'pk': cred.pk}),
|
||||
{
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': u'jim',
|
||||
'password': u'pass'
|
||||
}
|
||||
},
|
||||
admin
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert 'credential_type' in response.data
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('version, params', [
|
||||
['v1', {
|
||||
|
||||
@@ -204,22 +204,6 @@ def test_delete_inventory_group(delete, group, alice, role_field, expected_statu
|
||||
delete(reverse('api:group_detail', kwargs={'pk': group.id}), alice, expect=expected_status_code)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_inventory_smarthost(post, get, inventory, admin_user, organization):
|
||||
data = { 'name': 'Host 1', 'description': 'Test Host'}
|
||||
smart_inventory = Inventory(name='smart',
|
||||
kind='smart',
|
||||
organization=organization,
|
||||
host_filter='inventory_sources__source=ec2')
|
||||
smart_inventory.save()
|
||||
post(reverse('api:inventory_hosts_list', kwargs={'pk': smart_inventory.id}), data, admin_user)
|
||||
resp = get(reverse('api:inventory_hosts_list', kwargs={'pk': smart_inventory.id}), admin_user)
|
||||
jdata = json.loads(resp.content)
|
||||
|
||||
assert getattr(smart_inventory, 'kind') == 'smart'
|
||||
assert jdata['count'] == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_inventory_smartgroup(post, get, inventory, admin_user, organization):
|
||||
data = { 'name': 'Group 1', 'description': 'Test Group'}
|
||||
|
||||
@@ -73,6 +73,7 @@ class TestJobTemplateCopyEdit:
|
||||
|
||||
fake_view = FakeView()
|
||||
fake_view.request = request
|
||||
fake_view.kwargs = {'pk': '42'}
|
||||
context = {}
|
||||
context['view'] = fake_view
|
||||
context['request'] = request
|
||||
|
||||
@@ -7,35 +7,21 @@ from awx.main.models import Inventory, Host
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_empty_inventory(post, get, admin_user, organization, group_factory):
|
||||
inventory = Inventory(name='basic_inventory',
|
||||
kind='',
|
||||
inventory = Inventory(name='basic_inventory',
|
||||
kind='',
|
||||
organization=organization)
|
||||
inventory.save()
|
||||
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': inventory.pk}), admin_user)
|
||||
jdata = json.loads(resp.content)
|
||||
|
||||
|
||||
assert inventory.hosts.count() == 0
|
||||
assert jdata == {}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_empty_smart_inventory(post, get, admin_user, organization, group_factory):
|
||||
smart_inventory = Inventory(name='smart',
|
||||
kind='smart',
|
||||
organization=organization,
|
||||
host_filter='enabled=True')
|
||||
smart_inventory.save()
|
||||
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': smart_inventory.pk}), admin_user)
|
||||
smartjdata = json.loads(resp.content)
|
||||
|
||||
assert smart_inventory.hosts.count() == 0
|
||||
assert smartjdata == {}
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ungrouped_hosts(post, get, admin_user, organization, group_factory):
|
||||
inventory = Inventory(name='basic_inventory',
|
||||
kind='',
|
||||
inventory = Inventory(name='basic_inventory',
|
||||
kind='',
|
||||
organization=organization)
|
||||
inventory.save()
|
||||
Host.objects.create(name='first_host', inventory=inventory)
|
||||
@@ -44,32 +30,3 @@ def test_ungrouped_hosts(post, get, admin_user, organization, group_factory):
|
||||
jdata = json.loads(resp.content)
|
||||
assert inventory.hosts.count() == 2
|
||||
assert len(jdata['all']['hosts']) == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_grouped_hosts_smart_inventory(post, get, admin_user, organization, group_factory):
|
||||
inventory = Inventory(name='basic_inventory',
|
||||
kind='',
|
||||
organization=organization)
|
||||
inventory.save()
|
||||
groupA = group_factory('test_groupA')
|
||||
host1 = Host.objects.create(name='first_host', inventory=inventory)
|
||||
host2 = Host.objects.create(name='second_host', inventory=inventory)
|
||||
Host.objects.create(name='third_host', inventory=inventory)
|
||||
groupA.hosts.add(host1)
|
||||
groupA.hosts.add(host2)
|
||||
smart_inventory = Inventory(name='smart_inventory',
|
||||
kind='smart',
|
||||
organization=organization,
|
||||
host_filter='enabled=True')
|
||||
smart_inventory.save()
|
||||
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': inventory.pk}), admin_user)
|
||||
jdata = json.loads(resp.content)
|
||||
resp = get(reverse('api:inventory_script_view', kwargs={'version': 'v2', 'pk': smart_inventory.pk}), admin_user)
|
||||
smartjdata = json.loads(resp.content)
|
||||
|
||||
assert getattr(smart_inventory, 'kind') == 'smart'
|
||||
assert inventory.hosts.count() == 3
|
||||
assert len(jdata['all']['hosts']) == 1
|
||||
assert smart_inventory.hosts.count() == 3
|
||||
assert len(smartjdata['all']['hosts']) == 3
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.main.models import (
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -104,40 +105,22 @@ def setup_inventory_groups(inventory, group_factory):
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestHostManager:
|
||||
def test_host_filter_change(self, setup_ec2_gce, organization):
|
||||
smart_inventory = Inventory(name='smart',
|
||||
kind='smart',
|
||||
organization=organization,
|
||||
host_filter='inventory_sources__source=ec2')
|
||||
smart_inventory.save()
|
||||
assert len(smart_inventory.hosts.all()) == 2
|
||||
|
||||
smart_inventory.host_filter = 'inventory_sources__source=gce'
|
||||
smart_inventory.save()
|
||||
assert len(smart_inventory.hosts.all()) == 1
|
||||
|
||||
def test_host_filter_not_smart(self, setup_ec2_gce, organization):
|
||||
smart_inventory = Inventory(name='smart',
|
||||
organization=organization,
|
||||
host_filter='inventory_sources__source=ec2')
|
||||
assert len(smart_inventory.hosts.all()) == 0
|
||||
|
||||
def test_host_objects_manager(self, setup_ec2_gce, organization):
|
||||
smart_inventory = Inventory(kind='smart',
|
||||
name='smart',
|
||||
organization=organization,
|
||||
host_filter='inventory_sources__source=ec2')
|
||||
smart_inventory.save()
|
||||
def test_host_distinctness(self, setup_inventory_groups, organization):
|
||||
"""
|
||||
two criteria would both yield the same host, check that we only get 1 copy here
|
||||
"""
|
||||
assert (
|
||||
list(SmartFilter.query_from_string('name=single_host or name__startswith=single_')) ==
|
||||
[Host.objects.get(name='single_host')]
|
||||
)
|
||||
|
||||
hosts = smart_inventory.hosts.all()
|
||||
assert len(hosts) == 2
|
||||
assert hosts[0].inventory_sources.first().source == 'ec2'
|
||||
assert hosts[1].inventory_sources.first().source == 'ec2'
|
||||
|
||||
def test_host_objects_no_dupes(self, setup_inventory_groups, organization):
|
||||
smart_inventory = Inventory(name='smart',
|
||||
kind='smart',
|
||||
organization=organization,
|
||||
host_filter='groups__name=test_groupA or groups__name=test_groupB')
|
||||
smart_inventory.save()
|
||||
assert len(smart_inventory.hosts.all()) == 1
|
||||
# Things we can not easily test due to SQLite backend:
|
||||
# 2 organizations with host of same name only has 1 entry in smart inventory
|
||||
# smart inventory in 1 organization does not include host from another
|
||||
# smart inventory correctly returns hosts in filter in same organization
|
||||
|
||||
34
awx/main/tests/functional/task_management/test_capacity.py
Normal file
34
awx/main/tests/functional/task_management/test_capacity.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import pytest
|
||||
|
||||
from django.test import TransactionTestCase
|
||||
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestCapacityMapping(TransactionTestCase):
|
||||
|
||||
def sample_cluster(self):
|
||||
ig_small = InstanceGroup.objects.create(name='ig_small')
|
||||
ig_large = InstanceGroup.objects.create(name='ig_large')
|
||||
tower = InstanceGroup.objects.create(name='tower')
|
||||
i1 = Instance.objects.create(hostname='i1', capacity=200)
|
||||
i2 = Instance.objects.create(hostname='i2', capacity=200)
|
||||
i3 = Instance.objects.create(hostname='i3', capacity=200)
|
||||
ig_small.instances.add(i1)
|
||||
ig_large.instances.add(i2, i3)
|
||||
tower.instances.add(i2)
|
||||
return [tower, ig_large, ig_small]
|
||||
|
||||
def test_mapping(self):
|
||||
self.sample_cluster()
|
||||
with self.assertNumQueries(2):
|
||||
inst_map, ig_map = InstanceGroup.objects.capacity_mapping()
|
||||
assert inst_map['i1'] == set(['ig_small'])
|
||||
assert inst_map['i2'] == set(['ig_large', 'tower'])
|
||||
assert ig_map['ig_small'] == set(['ig_small'])
|
||||
assert ig_map['ig_large'] == set(['ig_large', 'tower'])
|
||||
assert ig_map['tower'] == set(['ig_large', 'tower'])
|
||||
@@ -69,20 +69,10 @@ def test_multi_group_with_shared_dependency(instance_factory, default_instance_g
|
||||
pu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j1, ig1, [])
|
||||
j1.finished = j1.created + timedelta(seconds=2)
|
||||
j1.status = "successful"
|
||||
j1.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
pu = p.project_updates.last()
|
||||
TaskManager.start_task.assert_called_once_with(pu, default_instance_group, [j2])
|
||||
pu.finished = pu.created + timedelta(seconds=1)
|
||||
pu.status = "successful"
|
||||
pu.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j2, ig2, [])
|
||||
|
||||
TaskManager.start_task.assert_any_call(j1, ig1, [])
|
||||
TaskManager.start_task.assert_any_call(j2, ig2, [])
|
||||
assert TaskManager.start_task.call_count == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
import pytest
|
||||
import mock
|
||||
import json
|
||||
from datetime import timedelta, datetime
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
from awx.main.scheduler import TaskManager
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Instance,
|
||||
WorkflowJob,
|
||||
)
|
||||
|
||||
|
||||
@@ -153,7 +156,36 @@ def test_single_job_dependencies_inventory_update_launch(default_instance_group,
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, default_instance_group, [])
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_dependency_with_already_updated(default_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
objects = job_template_factory('jt', organization='org1', project='proj',
|
||||
inventory='inv', credential='cred',
|
||||
jobs=["job_should_start"])
|
||||
j = objects.jobs["job_should_start"]
|
||||
j.status = 'pending'
|
||||
j.save()
|
||||
i = objects.inventory
|
||||
ii = inventory_source_factory("ec2")
|
||||
ii.source = "ec2"
|
||||
ii.update_on_launch = True
|
||||
ii.update_cache_timeout = 0
|
||||
ii.save()
|
||||
i.inventory_sources.add(ii)
|
||||
j.start_args = json.dumps(dict(inventory_sources_already_updated=[ii.id]))
|
||||
j.save()
|
||||
j.start_args = encrypt_field(j, field_name="start_args")
|
||||
j.save()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
tm = TaskManager()
|
||||
with mock.patch.object(TaskManager, "create_inventory_update", wraps=tm.create_inventory_update) as mock_iu:
|
||||
tm.schedule()
|
||||
mock_iu.assert_not_called()
|
||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||
TaskManager().schedule()
|
||||
TaskManager.start_task.assert_called_once_with(j, default_instance_group, [])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_shared_dependencies_launch(default_instance_group, job_template_factory, mocker, inventory_source_factory):
|
||||
@@ -230,27 +262,29 @@ class TestReaper():
|
||||
Instance.objects.create(hostname='host4_offline', capacity=0)
|
||||
|
||||
j1 = Job.objects.create(status='pending', execution_node='host1')
|
||||
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2', execution_node='host1')
|
||||
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3', execution_node='host1')
|
||||
j2 = Job.objects.create(status='waiting', celery_task_id='considered_j2')
|
||||
j3 = Job.objects.create(status='waiting', celery_task_id='considered_j3')
|
||||
j3.modified = now - timedelta(seconds=60)
|
||||
j3.save(update_fields=['modified'])
|
||||
j4 = Job.objects.create(status='running', celery_task_id='considered_j4', execution_node='host1')
|
||||
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5', execution_node='host1')
|
||||
j5 = Job.objects.create(status='waiting', celery_task_id='reapable_j5')
|
||||
j5.modified = now - timedelta(seconds=60)
|
||||
j5.save(update_fields=['modified'])
|
||||
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6', execution_node='host2')
|
||||
j6 = Job.objects.create(status='waiting', celery_task_id='considered_j6')
|
||||
j6.modified = now - timedelta(seconds=60)
|
||||
j6.save(update_fields=['modified'])
|
||||
j7 = Job.objects.create(status='running', celery_task_id='considered_j7', execution_node='host2')
|
||||
j8 = Job.objects.create(status='running', celery_task_id='reapable_j7', execution_node='host2')
|
||||
j9 = Job.objects.create(status='waiting', celery_task_id='host3_j8', execution_node='host3_split')
|
||||
j9 = Job.objects.create(status='waiting', celery_task_id='reapable_j8')
|
||||
j9.modified = now - timedelta(seconds=60)
|
||||
j9.save(update_fields=['modified'])
|
||||
j10 = Job.objects.create(status='running', execution_node='host3_split')
|
||||
j10 = Job.objects.create(status='running', celery_task_id='host3_j10', execution_node='host3_split')
|
||||
|
||||
j11 = Job.objects.create(status='running', celery_task_id='host4_j11', execution_node='host4_offline')
|
||||
|
||||
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11]
|
||||
j12 = WorkflowJob.objects.create(status='running', celery_task_id='workflow_job', execution_node='host1')
|
||||
|
||||
js = [j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12]
|
||||
for j in js:
|
||||
j.save = mocker.Mock(wraps=j.save)
|
||||
j.websocket_emit_status = mocker.Mock()
|
||||
@@ -263,12 +297,16 @@ class TestReaper():
|
||||
@pytest.fixture
|
||||
def running_tasks(self, all_jobs):
|
||||
return {
|
||||
'host1': all_jobs[2:5],
|
||||
'host2': all_jobs[5:8],
|
||||
'host3_split': all_jobs[8:10],
|
||||
'host1': [all_jobs[3]],
|
||||
'host2': [all_jobs[7], all_jobs[8]],
|
||||
'host3_split': [all_jobs[9]],
|
||||
'host4_offline': [all_jobs[10]],
|
||||
}
|
||||
|
||||
@pytest.fixture
|
||||
def waiting_tasks(self, all_jobs):
|
||||
return [all_jobs[2], all_jobs[4], all_jobs[5], all_jobs[8]]
|
||||
|
||||
@pytest.fixture
|
||||
def reapable_jobs(self, all_jobs):
|
||||
return [all_jobs[4], all_jobs[7], all_jobs[10]]
|
||||
@@ -287,10 +325,10 @@ class TestReaper():
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks._send_notification_templates')
|
||||
@mock.patch.object(TaskManager, 'get_active_tasks', lambda self: ([], []))
|
||||
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, mocker):
|
||||
def test_cleanup_inconsistent_task(self, notify, active_tasks, considered_jobs, reapable_jobs, running_tasks, waiting_tasks, mocker):
|
||||
tm = TaskManager()
|
||||
|
||||
tm.get_running_tasks = mocker.Mock(return_value=running_tasks)
|
||||
tm.get_running_tasks = mocker.Mock(return_value=(running_tasks, waiting_tasks))
|
||||
tm.get_active_tasks = mocker.Mock(return_value=active_tasks)
|
||||
|
||||
tm.cleanup_inconsistent_celery_tasks()
|
||||
@@ -299,7 +337,7 @@ class TestReaper():
|
||||
if j not in reapable_jobs:
|
||||
j.save.assert_not_called()
|
||||
|
||||
assert notify.call_count == 3
|
||||
assert notify.call_count == 4
|
||||
notify.assert_has_calls([mock.call(j, 'failed') for j in reapable_jobs], any_order=True)
|
||||
|
||||
for j in reapable_jobs:
|
||||
@@ -314,20 +352,23 @@ class TestReaper():
|
||||
tm = TaskManager()
|
||||
|
||||
# Ensure the query grabs the expected jobs
|
||||
execution_nodes_jobs = tm.get_running_tasks()
|
||||
execution_nodes_jobs, waiting_jobs = tm.get_running_tasks()
|
||||
assert 'host1' in execution_nodes_jobs
|
||||
assert 'host2' in execution_nodes_jobs
|
||||
assert 'host3_split' in execution_nodes_jobs
|
||||
|
||||
assert all_jobs[2] in execution_nodes_jobs['host1']
|
||||
assert all_jobs[3] in execution_nodes_jobs['host1']
|
||||
assert all_jobs[4] in execution_nodes_jobs['host1']
|
||||
|
||||
assert all_jobs[5] in execution_nodes_jobs['host2']
|
||||
assert all_jobs[6] in execution_nodes_jobs['host2']
|
||||
assert all_jobs[7] in execution_nodes_jobs['host2']
|
||||
|
||||
assert all_jobs[8] in execution_nodes_jobs['host3_split']
|
||||
assert all_jobs[9] in execution_nodes_jobs['host3_split']
|
||||
|
||||
assert all_jobs[10] in execution_nodes_jobs['host4_offline']
|
||||
|
||||
assert all_jobs[11] not in execution_nodes_jobs['host1']
|
||||
|
||||
assert all_jobs[2] in waiting_jobs
|
||||
assert all_jobs[4] in waiting_jobs
|
||||
assert all_jobs[5] in waiting_jobs
|
||||
assert all_jobs[8] in waiting_jobs
|
||||
|
||||
@@ -19,7 +19,6 @@ EXAMPLE_ENCRYPTED_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nProc-Type: 4,ENCRY
|
||||
def test_default_cred_types():
|
||||
assert sorted(CredentialType.defaults.keys()) == [
|
||||
'aws',
|
||||
'azure',
|
||||
'azure_rm',
|
||||
'cloudforms',
|
||||
'gce',
|
||||
@@ -226,7 +225,7 @@ def test_credential_creation_validation_failure(organization_factory, inputs):
|
||||
[EXAMPLE_PRIVATE_KEY.replace('=', '\u003d'), None, True], # automatically fix JSON-encoded GCE keys
|
||||
])
|
||||
def test_ssh_key_data_validation(organization, kind, ssh_key_data, ssh_key_unlock, valid):
|
||||
inputs = {}
|
||||
inputs = {'username': 'joe-user'}
|
||||
if ssh_key_data:
|
||||
inputs['ssh_key_data'] = ssh_key_data
|
||||
if ssh_key_unlock:
|
||||
|
||||
@@ -269,22 +269,6 @@ def test_gce_migration():
|
||||
assert Credential.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_azure_classic_migration():
|
||||
cred = Credential(name='My Credential')
|
||||
with migrate(cred, 'azure'):
|
||||
cred.__dict__.update({
|
||||
'username': 'bob',
|
||||
'ssh_key_data': EXAMPLE_PRIVATE_KEY
|
||||
})
|
||||
|
||||
assert cred.credential_type.name == 'Microsoft Azure Classic (deprecated)'
|
||||
assert cred.inputs['username'] == 'bob'
|
||||
assert cred.inputs['ssh_key_data'].startswith('$encrypted$')
|
||||
assert decrypt_field(cred, 'ssh_key_data') == EXAMPLE_PRIVATE_KEY
|
||||
assert Credential.objects.count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_azure_rm_migration():
|
||||
cred = Credential(name='My Credential')
|
||||
|
||||
@@ -35,3 +35,13 @@ def test_inv_src_rename(inventory_source_factory):
|
||||
inv_src01.refresh_from_db()
|
||||
# inv-is-t1 is generated in the inventory_source_factory
|
||||
assert inv_src01.name == 't1 - inv-is-t1 - 0'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_azure_inv_src_removal(inventory_source):
|
||||
inventory_source.source = 'azure'
|
||||
inventory_source.save()
|
||||
|
||||
assert InventorySource.objects.filter(pk=inventory_source.pk).exists()
|
||||
invsrc.remove_azure_inventory_sources(apps, None)
|
||||
assert not InventorySource.objects.filter(pk=inventory_source.pk).exists()
|
||||
|
||||
@@ -50,7 +50,7 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
|
||||
organization.instance_groups.add(default_instance_group)
|
||||
|
||||
assert admin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
|
||||
assert oadmin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
|
||||
assert not oadmin_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
|
||||
assert not auditor_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
|
||||
assert not omember_access.can_unattach(organization, default_instance_group, 'instance_groups', None)
|
||||
|
||||
|
||||
@@ -174,3 +174,17 @@ def test_inventory_source_org_admin_schedule_access(org_admin, inventory_source)
|
||||
assert access.get_queryset()
|
||||
assert access.can_read(schedule)
|
||||
assert access.can_change(schedule, {'rrule': 'DTSTART:20151117T050000Z RRULE:FREQ=DAILY;INTERVAL=1;COUNT=2'})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def smart_inventory(organization):
|
||||
return organization.inventories.create(name="smart-inv", kind="smart")
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestSmartInventory:
|
||||
|
||||
def test_host_filter_edit(self, smart_inventory, rando, org_admin):
|
||||
assert InventoryAccess(org_admin).can_admin(smart_inventory, {'host_filter': 'search=foo'})
|
||||
smart_inventory.admin_role.members.add(rando)
|
||||
assert not InventoryAccess(rando).can_admin(smart_inventory, {'host_filter': 'search=foo'})
|
||||
|
||||
@@ -44,6 +44,12 @@ def test_system_auditor_is_system_auditor(system_auditor):
|
||||
assert system_auditor.is_system_auditor
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_system_auditor_can_modify_self(system_auditor):
|
||||
access = UserAccess(system_auditor)
|
||||
assert access.can_change(obj=system_auditor, data=dict(is_system_auditor='true'))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_queryset(user):
|
||||
u = user('pete', False)
|
||||
|
||||
@@ -104,6 +104,11 @@ class TestWorkflowJobAccess:
|
||||
access = WorkflowJobAccess(rando)
|
||||
assert access.can_cancel(workflow_job)
|
||||
|
||||
def test_admin_cancel_access(self, wfjt, workflow_job, rando):
|
||||
wfjt.admin_role.members.add(rando)
|
||||
access = WorkflowJobAccess(rando)
|
||||
assert access.can_cancel(workflow_job)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestWFJTCopyAccess:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ def get_summary_fields_assert():
|
||||
def get_summary_fields_mock_and_run():
|
||||
def fn(serializer_class, model_obj):
|
||||
serializer = serializer_class()
|
||||
serializer.context['view'] = mock.Mock(kwargs={})
|
||||
return serializer.get_summary_fields(model_obj)
|
||||
return fn
|
||||
|
||||
|
||||
@@ -108,6 +108,7 @@ class TestJobTemplateSerializerGetSummaryFields():
|
||||
request.user = user
|
||||
view = JobTemplateDetail()
|
||||
view.request = request
|
||||
view.kwargs = {}
|
||||
serializer.context['view'] = view
|
||||
|
||||
with mocker.patch("awx.api.serializers.role_summary_fields_generator", return_value='Can eat pie'):
|
||||
|
||||
@@ -242,28 +242,28 @@ class TestResourceAccessList:
|
||||
), method='GET')
|
||||
|
||||
|
||||
def mock_view(self):
|
||||
def mock_view(self, parent=None):
|
||||
view = ResourceAccessList()
|
||||
view.parent_model = Organization
|
||||
view.kwargs = {'pk': 4}
|
||||
if parent:
|
||||
view.get_parent_object = lambda: parent
|
||||
return view
|
||||
|
||||
|
||||
def test_parent_access_check_failed(self, mocker, mock_organization):
|
||||
with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization):
|
||||
mock_access = mocker.MagicMock(__name__='for logger', return_value=False)
|
||||
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
|
||||
with pytest.raises(PermissionDenied):
|
||||
self.mock_view().check_permissions(self.mock_request())
|
||||
mock_access.assert_called_once_with(mock_organization)
|
||||
mock_access = mocker.MagicMock(__name__='for logger', return_value=False)
|
||||
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
|
||||
with pytest.raises(PermissionDenied):
|
||||
self.mock_view(parent=mock_organization).check_permissions(self.mock_request())
|
||||
mock_access.assert_called_once_with(mock_organization)
|
||||
|
||||
|
||||
def test_parent_access_check_worked(self, mocker, mock_organization):
|
||||
with mocker.patch('awx.api.permissions.get_object_or_400', return_value=mock_organization):
|
||||
mock_access = mocker.MagicMock(__name__='for logger', return_value=True)
|
||||
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
|
||||
self.mock_view().check_permissions(self.mock_request())
|
||||
mock_access.assert_called_once_with(mock_organization)
|
||||
mock_access = mocker.MagicMock(__name__='for logger', return_value=True)
|
||||
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
|
||||
self.mock_view(parent=mock_organization).check_permissions(self.mock_request())
|
||||
mock_access.assert_called_once_with(mock_organization)
|
||||
|
||||
|
||||
def test_related_search_reverse_FK_field():
|
||||
|
||||
@@ -220,8 +220,8 @@ class TestHostInsights():
|
||||
class TestInventoryHostsList(object):
|
||||
|
||||
def test_host_list_smart_inventory(self, mocker):
|
||||
Inventory = namedtuple('Inventory', ['kind', 'host_filter', 'hosts'])
|
||||
obj = Inventory(kind='smart', host_filter='localhost', hosts=HostManager())
|
||||
Inventory = namedtuple('Inventory', ['kind', 'host_filter', 'hosts', 'organization_id'])
|
||||
obj = Inventory(kind='smart', host_filter='localhost', hosts=HostManager(), organization_id=None)
|
||||
obj.hosts.instance = obj
|
||||
|
||||
with mock.patch.object(InventoryHostsList, 'get_parent_object', return_value=obj):
|
||||
|
||||
135
awx/main/tests/unit/test_capacity.py
Normal file
135
awx/main/tests/unit/test_capacity.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import InstanceGroup
|
||||
|
||||
|
||||
class FakeObject(object):
|
||||
def __init__(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
||||
class Job(FakeObject):
|
||||
task_impact = 43
|
||||
|
||||
def log_format(self):
|
||||
return 'job 382 (fake)'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_cluster():
|
||||
def stand_up_cluster():
|
||||
|
||||
class Instances(FakeObject):
|
||||
def add(self, *args):
|
||||
for instance in args:
|
||||
self.obj.instance_list.append(instance)
|
||||
|
||||
def all(self):
|
||||
return self.obj.instance_list
|
||||
|
||||
class InstanceGroup(FakeObject):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(InstanceGroup, self).__init__(**kwargs)
|
||||
self.instance_list = []
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
mgr = Instances(obj=self)
|
||||
return mgr
|
||||
|
||||
|
||||
class Instance(FakeObject):
|
||||
pass
|
||||
|
||||
|
||||
ig_small = InstanceGroup(name='ig_small')
|
||||
ig_large = InstanceGroup(name='ig_large')
|
||||
tower = InstanceGroup(name='tower')
|
||||
i1 = Instance(hostname='i1', capacity=200)
|
||||
i2 = Instance(hostname='i2', capacity=200)
|
||||
i3 = Instance(hostname='i3', capacity=200)
|
||||
ig_small.instances.add(i1)
|
||||
ig_large.instances.add(i2, i3)
|
||||
tower.instances.add(i2)
|
||||
return [tower, ig_large, ig_small]
|
||||
return stand_up_cluster
|
||||
|
||||
|
||||
def test_committed_capacity(sample_cluster):
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
tasks = [
|
||||
Job(status='waiting', instance_group=tower),
|
||||
Job(status='waiting', instance_group=ig_large),
|
||||
Job(status='waiting', instance_group=ig_small)
|
||||
]
|
||||
capacities = InstanceGroup.objects.capacity_values(
|
||||
qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True
|
||||
)
|
||||
# Jobs submitted to either tower or ig_larg must count toward both
|
||||
assert capacities['tower']['committed_capacity'] == 43 * 2
|
||||
assert capacities['ig_large']['committed_capacity'] == 43 * 2
|
||||
assert capacities['ig_small']['committed_capacity'] == 43
|
||||
|
||||
|
||||
def test_running_capacity(sample_cluster):
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
tasks = [
|
||||
Job(status='running', execution_node='i1'),
|
||||
Job(status='running', execution_node='i2'),
|
||||
Job(status='running', execution_node='i3')
|
||||
]
|
||||
capacities = InstanceGroup.objects.capacity_values(
|
||||
qs=[tower, ig_large, ig_small], tasks=tasks, breakdown=True
|
||||
)
|
||||
# Tower is only given 1 instance
|
||||
assert capacities['tower']['running_capacity'] == 43
|
||||
# Large IG has 2 instances
|
||||
assert capacities['ig_large']['running_capacity'] == 43 * 2
|
||||
assert capacities['ig_small']['running_capacity'] == 43
|
||||
|
||||
|
||||
def test_offline_node_running(sample_cluster):
|
||||
"""
|
||||
Assure that algorithm doesn't explode if a job is marked running
|
||||
in an offline node
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
ig_small.instance_list[0].capacity = 0
|
||||
tasks = [Job(status='running', execution_node='i1', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(
|
||||
qs=[tower, ig_large, ig_small], tasks=tasks)
|
||||
assert capacities['ig_small']['consumed_capacity'] == 43
|
||||
|
||||
|
||||
def test_offline_node_waiting(sample_cluster):
|
||||
"""
|
||||
Same but for a waiting job
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
ig_small.instance_list[0].capacity = 0
|
||||
tasks = [Job(status='waiting', instance_group=ig_small)]
|
||||
capacities = InstanceGroup.objects.capacity_values(
|
||||
qs=[tower, ig_large, ig_small], tasks=tasks)
|
||||
assert capacities['ig_small']['consumed_capacity'] == 43
|
||||
|
||||
|
||||
def test_RBAC_reduced_filter(sample_cluster):
|
||||
"""
|
||||
User can see jobs that are running in `ig_small` and `ig_large` IGs,
|
||||
but user does not have permission to see those actual instance groups.
|
||||
Verify that this does not blow everything up.
|
||||
"""
|
||||
tower, ig_large, ig_small = sample_cluster()
|
||||
tasks = [
|
||||
Job(status='waiting', instance_group=tower),
|
||||
Job(status='waiting', instance_group=ig_large),
|
||||
Job(status='waiting', instance_group=ig_small)
|
||||
]
|
||||
capacities = InstanceGroup.objects.capacity_values(
|
||||
qs=[tower], tasks=tasks, breakdown=True
|
||||
)
|
||||
# Cross-links between groups not visible to current user,
|
||||
# so a naieve accounting of capacities is returned instead
|
||||
assert capacities['tower']['committed_capacity'] == 43
|
||||
@@ -19,10 +19,10 @@ from django.core.cache import cache
|
||||
class TestCleanupInconsistentCeleryTasks():
|
||||
@mock.patch.object(cache, 'get', return_value=None)
|
||||
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {}))
|
||||
@mock.patch.object(TaskManager, 'get_running_tasks', return_value={'host1': [Job(id=2), Job(id=3),]})
|
||||
@mock.patch.object(InstanceGroup.objects, 'all', return_value=[])
|
||||
@mock.patch.object(Instance.objects, 'get', side_effect=Instance.DoesNotExist)
|
||||
@mock.patch('awx.main.scheduler.logger')
|
||||
@mock.patch.object(TaskManager, 'get_running_tasks', return_value=({'host1': [Job(id=2), Job(id=3),]}, []))
|
||||
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
|
||||
@mock.patch.object(Instance.objects, 'filter', return_value=mock.MagicMock(first=lambda: None))
|
||||
@mock.patch('awx.main.scheduler.task_manager.logger')
|
||||
def test_instance_does_not_exist(self, logger_mock, *args):
|
||||
logger_mock.error = mock.MagicMock(side_effect=RuntimeError("mocked"))
|
||||
tm = TaskManager()
|
||||
@@ -31,22 +31,39 @@ class TestCleanupInconsistentCeleryTasks():
|
||||
|
||||
assert "mocked" in str(excinfo.value)
|
||||
logger_mock.error.assert_called_once_with("Execution node Instance host1 not found in database. "
|
||||
"The node is currently executing jobs ['None-2-new', "
|
||||
"'None-3-new']")
|
||||
"The node is currently executing jobs ['job 2 (new)', "
|
||||
"'job 3 (new)']")
|
||||
|
||||
@mock.patch.object(cache, 'get', return_value=None)
|
||||
@mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {'host1': []}))
|
||||
@mock.patch.object(InstanceGroup.objects, 'all', return_value=[])
|
||||
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
|
||||
@mock.patch.object(TaskManager, 'get_running_tasks')
|
||||
@mock.patch('awx.main.scheduler.logger')
|
||||
@mock.patch('awx.main.scheduler.task_manager.logger')
|
||||
def test_save_failed(self, logger_mock, get_running_tasks, *args):
|
||||
logger_mock.error = mock.MagicMock()
|
||||
job = Job(id=2, modified=tz_now(), status='running', celery_task_id='blah', execution_node='host1')
|
||||
job.websocket_emit_status = mock.MagicMock()
|
||||
get_running_tasks.return_value = {'host1': [job]}
|
||||
get_running_tasks.return_value = ({'host1': [job]}, [])
|
||||
tm = TaskManager()
|
||||
|
||||
with mock.patch.object(job, 'save', side_effect=DatabaseError):
|
||||
tm.cleanup_inconsistent_celery_tasks()
|
||||
job.save.assert_called_once()
|
||||
logger_mock.error.assert_called_once_with("Task job 2 (failed) DB error in marking failed. Job possibly deleted.")
|
||||
|
||||
@mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[])
|
||||
@mock.patch('awx.main.scheduler.task_manager.inspect')
|
||||
def test_multiple_active_instances_sanity_check(self, inspect_mock, *args):
|
||||
class MockInspector:
|
||||
pass
|
||||
|
||||
mock_inspector = MockInspector()
|
||||
mock_inspector.active = lambda: {
|
||||
'celery@host1': [],
|
||||
'celery@host2': []
|
||||
}
|
||||
inspect_mock.return_value = mock_inspector
|
||||
tm = TaskManager()
|
||||
active_task_queues, queues = tm.get_active_tasks()
|
||||
assert 'host1' in queues
|
||||
assert 'host2' in queues
|
||||
|
||||
@@ -181,6 +181,8 @@ class TestJobExecution:
|
||||
EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-----'
|
||||
|
||||
def setup_method(self, method):
|
||||
if not os.path.exists(settings.PROJECTS_ROOT):
|
||||
os.mkdir(settings.PROJECTS_ROOT)
|
||||
self.project_path = tempfile.mkdtemp(prefix='awx_project_')
|
||||
with open(os.path.join(self.project_path, 'helloworld.yml'), 'w') as f:
|
||||
f.write('---')
|
||||
@@ -257,7 +259,7 @@ class TestGenericRun(TestJobExecution):
|
||||
with pytest.raises(Exception):
|
||||
self.task.run(self.pk)
|
||||
for c in [
|
||||
mock.call(self.pk, status='running'),
|
||||
mock.call(self.pk, execution_node=settings.CLUSTER_HOST_ID, status='running'),
|
||||
mock.call(self.pk, output_replacements=[], result_traceback=mock.ANY, status='canceled')
|
||||
]:
|
||||
assert c in self.task.update_model.call_args_list
|
||||
@@ -281,6 +283,15 @@ class TestGenericRun(TestJobExecution):
|
||||
args, cwd, env, stdout = call_args
|
||||
assert args[0] == 'bwrap'
|
||||
|
||||
def test_bwrap_virtualenvs_are_readonly(self):
|
||||
self.task.run(self.pk)
|
||||
|
||||
assert self.run_pexpect.call_count == 1
|
||||
call_args, _ = self.run_pexpect.call_args_list[0]
|
||||
args, cwd, env, stdout = call_args
|
||||
assert '--ro-bind %s %s' % (settings.ANSIBLE_VENV_PATH, settings.ANSIBLE_VENV_PATH) in ' '.join(args) # noqa
|
||||
assert '--ro-bind %s %s' % (settings.AWX_VENV_PATH, settings.AWX_VENV_PATH) in ' '.join(args) # noqa
|
||||
|
||||
def test_awx_task_env(self):
|
||||
patch = mock.patch('awx.main.tasks.settings.AWX_TASK_ENV', {'FOO': 'BAR'})
|
||||
patch.start()
|
||||
@@ -533,29 +544,6 @@ class TestJobCredentials(TestJobExecution):
|
||||
self.run_pexpect.side_effect = run_pexpect_side_effect
|
||||
self.task.run(self.pk)
|
||||
|
||||
def test_azure_credentials(self):
|
||||
azure = CredentialType.defaults['azure']()
|
||||
credential = Credential(
|
||||
pk=1,
|
||||
credential_type=azure,
|
||||
inputs = {
|
||||
'username': 'bob',
|
||||
'ssh_key_data': self.EXAMPLE_PRIVATE_KEY
|
||||
}
|
||||
)
|
||||
credential.inputs['ssh_key_data'] = encrypt_field(credential, 'ssh_key_data')
|
||||
self.instance.extra_credentials.add(credential)
|
||||
|
||||
def run_pexpect_side_effect(*args, **kwargs):
|
||||
args, cwd, env, stdout = args
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'bob'
|
||||
ssh_key_data = env['AZURE_CERT_PATH']
|
||||
assert open(ssh_key_data, 'rb').read() == self.EXAMPLE_PRIVATE_KEY
|
||||
return ['successful', 0]
|
||||
|
||||
self.run_pexpect.side_effect = run_pexpect_side_effect
|
||||
self.task.run(self.pk)
|
||||
|
||||
def test_azure_rm_with_tenant(self):
|
||||
azure = CredentialType.defaults['azure_rm']()
|
||||
credential = Credential(
|
||||
@@ -1027,29 +1015,25 @@ class TestJobCredentials(TestJobExecution):
|
||||
gce_credential.inputs['ssh_key_data'] = encrypt_field(gce_credential, 'ssh_key_data')
|
||||
self.instance.extra_credentials.add(gce_credential)
|
||||
|
||||
azure = CredentialType.defaults['azure']()
|
||||
azure_credential = Credential(
|
||||
azure_rm = CredentialType.defaults['azure_rm']()
|
||||
azure_rm_credential = Credential(
|
||||
pk=2,
|
||||
credential_type=azure,
|
||||
credential_type=azure_rm,
|
||||
inputs = {
|
||||
'username': 'joe',
|
||||
'ssh_key_data': 'AZURE: %s' % self.EXAMPLE_PRIVATE_KEY
|
||||
'subscription': 'some-subscription',
|
||||
'username': 'bob',
|
||||
'password': 'secret'
|
||||
}
|
||||
)
|
||||
azure_credential.inputs['ssh_key_data'] = encrypt_field(azure_credential, 'ssh_key_data')
|
||||
self.instance.extra_credentials.add(azure_credential)
|
||||
azure_rm_credential.inputs['secret'] = encrypt_field(azure_rm_credential, 'secret')
|
||||
self.instance.extra_credentials.add(azure_rm_credential)
|
||||
|
||||
def run_pexpect_side_effect(*args, **kwargs):
|
||||
args, cwd, env, stdout = args
|
||||
|
||||
assert env['GCE_EMAIL'] == 'bob'
|
||||
assert env['GCE_PROJECT'] == 'some-project'
|
||||
ssh_key_data = env['GCE_PEM_FILE_PATH']
|
||||
assert open(ssh_key_data, 'rb').read() == 'GCE: %s' % self.EXAMPLE_PRIVATE_KEY
|
||||
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'joe'
|
||||
ssh_key_data = env['AZURE_CERT_PATH']
|
||||
assert open(ssh_key_data, 'rb').read() == 'AZURE: %s' % self.EXAMPLE_PRIVATE_KEY
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'some-subscription'
|
||||
assert env['AZURE_AD_USER'] == 'bob'
|
||||
assert env['AZURE_PASSWORD'] == 'secret'
|
||||
|
||||
return ['successful', 0]
|
||||
|
||||
@@ -1096,6 +1080,27 @@ class TestProjectUpdateCredentials(TestJobExecution):
|
||||
]
|
||||
}
|
||||
|
||||
def test_bwrap_exposes_projects_root(self):
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
self.instance.scm_type = 'git'
|
||||
self.instance.credential = Credential(
|
||||
pk=1,
|
||||
credential_type=ssh,
|
||||
)
|
||||
self.task.run(self.pk)
|
||||
|
||||
assert self.run_pexpect.call_count == 1
|
||||
call_args, call_kwargs = self.run_pexpect.call_args_list[0]
|
||||
args, cwd, env, stdout = call_args
|
||||
|
||||
assert ' '.join(args).startswith('bwrap')
|
||||
' '.join([
|
||||
'--bind',
|
||||
settings.PROJECTS_ROOT,
|
||||
settings.PROJECTS_ROOT,
|
||||
]) in ' '.join(args)
|
||||
assert '"scm_revision_output": "/projects/tmp' in ' '.join(args)
|
||||
|
||||
def test_username_and_password_auth(self, scm_type):
|
||||
ssh = CredentialType.defaults['ssh']()
|
||||
self.instance.scm_type = scm_type
|
||||
@@ -1246,31 +1251,6 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
self.run_pexpect.side_effect = run_pexpect_side_effect
|
||||
self.task.run(self.pk)
|
||||
|
||||
def test_azure_source(self):
|
||||
azure = CredentialType.defaults['azure']()
|
||||
self.instance.source = 'azure'
|
||||
self.instance.credential = Credential(
|
||||
pk=1,
|
||||
credential_type=azure,
|
||||
inputs = {
|
||||
'username': 'bob',
|
||||
'ssh_key_data': self.EXAMPLE_PRIVATE_KEY
|
||||
}
|
||||
)
|
||||
self.instance.credential.inputs['ssh_key_data'] = encrypt_field(
|
||||
self.instance.credential, 'ssh_key_data'
|
||||
)
|
||||
|
||||
def run_pexpect_side_effect(*args, **kwargs):
|
||||
args, cwd, env, stdout = args
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'bob'
|
||||
ssh_key_data = env['AZURE_CERT_PATH']
|
||||
assert open(ssh_key_data, 'rb').read() == self.EXAMPLE_PRIVATE_KEY
|
||||
return ['successful', 0]
|
||||
|
||||
self.run_pexpect.side_effect = run_pexpect_side_effect
|
||||
self.task.run(self.pk)
|
||||
|
||||
def test_gce_source(self):
|
||||
gce = CredentialType.defaults['gce']()
|
||||
self.instance.source = 'gce'
|
||||
|
||||
@@ -6,6 +6,8 @@ import os
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
|
||||
from django.core.cache import cache
|
||||
|
||||
from awx.main.utils import common
|
||||
|
||||
from awx.main.models import (
|
||||
@@ -18,6 +20,14 @@ from awx.main.models import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_cache():
|
||||
'''
|
||||
Clear cache (local memory) for each test to prevent using cached settings.
|
||||
'''
|
||||
cache.clear()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_, output', [
|
||||
({"foo": "bar"}, {"foo": "bar"}),
|
||||
('{"foo": "bar"}', {"foo": "bar"}),
|
||||
@@ -49,3 +59,59 @@ def test_set_environ():
|
||||
])
|
||||
def test_get_type_for_model(model, name):
|
||||
assert common.get_type_for_model(model) == name
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memoized_function(mocker):
|
||||
@common.memoize(track_function=True)
|
||||
def myfunction(key, value):
|
||||
if key not in myfunction.calls:
|
||||
myfunction.calls[key] = 0
|
||||
|
||||
myfunction.calls[key] += 1
|
||||
|
||||
if myfunction.calls[key] == 1:
|
||||
return value
|
||||
else:
|
||||
return '%s called %s times' % (value, myfunction.calls[key])
|
||||
myfunction.calls = dict()
|
||||
return myfunction
|
||||
|
||||
|
||||
def test_memoize_track_function(memoized_function):
|
||||
assert memoized_function('scott', 'scotterson') == 'scotterson'
|
||||
assert cache.get('myfunction') == {u'scott-scotterson': 'scotterson'}
|
||||
assert memoized_function('scott', 'scotterson') == 'scotterson'
|
||||
|
||||
assert memoized_function.calls['scott'] == 1
|
||||
|
||||
assert memoized_function('john', 'smith') == 'smith'
|
||||
assert cache.get('myfunction') == {u'scott-scotterson': 'scotterson', u'john-smith': 'smith'}
|
||||
assert memoized_function('john', 'smith') == 'smith'
|
||||
|
||||
assert memoized_function.calls['john'] == 1
|
||||
|
||||
|
||||
def test_memoize_delete(memoized_function):
|
||||
assert memoized_function('john', 'smith') == 'smith'
|
||||
assert memoized_function('john', 'smith') == 'smith'
|
||||
assert memoized_function.calls['john'] == 1
|
||||
|
||||
assert cache.get('myfunction') == {u'john-smith': 'smith'}
|
||||
|
||||
common.memoize_delete('myfunction')
|
||||
|
||||
assert cache.get('myfunction') is None
|
||||
|
||||
assert memoized_function('john', 'smith') == 'smith called 2 times'
|
||||
assert memoized_function.calls['john'] == 2
|
||||
|
||||
|
||||
def test_memoize_parameter_error():
|
||||
@common.memoize(cache_key='foo', track_function=True)
|
||||
def fn():
|
||||
return
|
||||
|
||||
with pytest.raises(common.IllegalArgumentError):
|
||||
fn()
|
||||
|
||||
|
||||
@@ -61,10 +61,14 @@ def could_be_playbook(project_path, dir_path, filename):
|
||||
def could_be_inventory(project_path, dir_path, filename):
|
||||
# Decisions based exclusively on filename
|
||||
inventory_path = os.path.join(dir_path, filename)
|
||||
inventory_rel_path = os.path.relpath(inventory_path, smart_str(project_path))
|
||||
suspected_ext = os.path.splitext(filename)[-1]
|
||||
if suspected_ext in ['.yml', '.yaml', '.ini'] or os.access(inventory_path, os.X_OK):
|
||||
if filename in ['inventory', 'hosts']:
|
||||
# Users commonly name their inventory files these names
|
||||
return inventory_rel_path
|
||||
elif suspected_ext == '.ini' or os.access(inventory_path, os.X_OK):
|
||||
# Files with any of these extensions are always included
|
||||
return os.path.relpath(inventory_path, smart_str(project_path))
|
||||
return inventory_rel_path
|
||||
elif '.' in suspected_ext:
|
||||
# If not using those extensions, inventory must have _no_ extension
|
||||
return None
|
||||
@@ -79,4 +83,4 @@ def could_be_inventory(project_path, dir_path, filename):
|
||||
return None
|
||||
except IOError:
|
||||
return None
|
||||
return os.path.relpath(inventory_path, smart_str(project_path))
|
||||
return inventory_rel_path
|
||||
|
||||
@@ -17,6 +17,7 @@ import threading
|
||||
import contextlib
|
||||
import tempfile
|
||||
import six
|
||||
import psutil
|
||||
|
||||
# Decorator
|
||||
from decorator import decorator
|
||||
@@ -34,7 +35,7 @@ from django.apps import apps
|
||||
|
||||
logger = logging.getLogger('awx.main.utils')
|
||||
|
||||
__all__ = ['get_object_or_400', 'get_object_or_403', 'camelcase_to_underscore', 'memoize',
|
||||
__all__ = ['get_object_or_400', 'get_object_or_403', 'camelcase_to_underscore', 'memoize', 'memoize_delete',
|
||||
'get_ansible_version', 'get_ssh_version', 'get_licenser', 'get_awx_version', 'update_scm_url',
|
||||
'get_type_for_model', 'get_model_for_type', 'copy_model_by_class',
|
||||
'copy_m2m_relationships' ,'cache_list_capabilities', 'to_python_boolean',
|
||||
@@ -44,7 +45,7 @@ __all__ = ['get_object_or_400', 'get_object_or_403', 'camelcase_to_underscore',
|
||||
'callback_filter_out_ansible_extra_vars', 'get_search_fields', 'get_system_task_capacity',
|
||||
'wrap_args_with_proot', 'build_proot_temp_dir', 'check_proot_installed', 'model_to_dict',
|
||||
'model_instance_diff', 'timestamp_apiformat', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
|
||||
'has_model_field_prefetched', 'set_environ']
|
||||
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',]
|
||||
|
||||
|
||||
def get_object_or_400(klass, *args, **kwargs):
|
||||
@@ -107,22 +108,48 @@ class RequireDebugTrueOrTest(logging.Filter):
|
||||
return settings.DEBUG or 'test' in sys.argv
|
||||
|
||||
|
||||
def memoize(ttl=60, cache_key=None):
|
||||
class IllegalArgumentError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def memoize(ttl=60, cache_key=None, track_function=False):
|
||||
'''
|
||||
Decorator to wrap a function and cache its result.
|
||||
'''
|
||||
from django.core.cache import cache
|
||||
|
||||
|
||||
def _memoizer(f, *args, **kwargs):
|
||||
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
|
||||
value = cache.get(key)
|
||||
if value is None:
|
||||
value = f(*args, **kwargs)
|
||||
cache.set(key, value, ttl)
|
||||
if cache_key and track_function:
|
||||
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
|
||||
|
||||
if track_function:
|
||||
cache_dict_key = slugify('%r %r' % (args, kwargs))
|
||||
key = slugify("%s" % f.__name__)
|
||||
cache_dict = cache.get(key) or dict()
|
||||
if cache_dict_key not in cache_dict:
|
||||
value = f(*args, **kwargs)
|
||||
cache_dict[cache_dict_key] = value
|
||||
cache.set(key, cache_dict, ttl)
|
||||
else:
|
||||
value = cache_dict[cache_dict_key]
|
||||
else:
|
||||
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
|
||||
value = cache.get(key)
|
||||
if value is None:
|
||||
value = f(*args, **kwargs)
|
||||
cache.set(key, value, ttl)
|
||||
|
||||
return value
|
||||
return decorator(_memoizer)
|
||||
|
||||
|
||||
def memoize_delete(function_name):
|
||||
from django.core.cache import cache
|
||||
|
||||
return cache.delete(function_name)
|
||||
|
||||
|
||||
@memoize()
|
||||
def get_ansible_version():
|
||||
'''
|
||||
@@ -581,15 +608,11 @@ def get_system_task_capacity():
|
||||
from django.conf import settings
|
||||
if hasattr(settings, 'SYSTEM_TASK_CAPACITY'):
|
||||
return settings.SYSTEM_TASK_CAPACITY
|
||||
try:
|
||||
out = subprocess.check_output(['free', '-m'])
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception('Problem obtaining capacity from system.')
|
||||
return 0
|
||||
total_mem_value = out.split()[7]
|
||||
if int(total_mem_value) <= 2048:
|
||||
mem = psutil.virtual_memory()
|
||||
total_mem_value = mem.total / 1024 / 1024
|
||||
if total_mem_value <= 2048:
|
||||
return 50
|
||||
return 50 + ((int(total_mem_value) / 1024) - 2) * 75
|
||||
return 50 + ((total_mem_value / 1024) - 2) * 75
|
||||
|
||||
|
||||
_inventory_updates = threading.local()
|
||||
@@ -699,8 +722,13 @@ def wrap_args_with_proot(args, cwd, **kwargs):
|
||||
show_paths = [cwd, kwargs['private_data_dir']]
|
||||
else:
|
||||
show_paths = [cwd]
|
||||
show_paths.extend([settings.ANSIBLE_VENV_PATH, settings.AWX_VENV_PATH])
|
||||
for venv in (
|
||||
settings.ANSIBLE_VENV_PATH,
|
||||
settings.AWX_VENV_PATH
|
||||
):
|
||||
new_args.extend(['--ro-bind', venv, venv])
|
||||
show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or [])
|
||||
show_paths.extend(kwargs.get('proot_show_paths', []))
|
||||
for path in sorted(set(show_paths)):
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
@@ -724,7 +752,10 @@ def get_pk_from_dict(_dict, key):
|
||||
Helper for obtaining a pk from user data dict or None if not present.
|
||||
'''
|
||||
try:
|
||||
return int(_dict[key])
|
||||
val = _dict[key]
|
||||
if isinstance(val, object) and hasattr(val, 'id'):
|
||||
return val.id # return id if given model object
|
||||
return int(val)
|
||||
except (TypeError, KeyError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import distutils.spawn
|
||||
import optparse
|
||||
from operator import attrgetter
|
||||
|
||||
@@ -316,10 +317,9 @@ class InventoryCLI(CLI):
|
||||
|
||||
if __name__ == '__main__':
|
||||
import imp
|
||||
import subprocess
|
||||
import sys
|
||||
with open(__file__) as f:
|
||||
imp.load_source('ansible.cli.inventory', __file__ + '.py', f)
|
||||
ansible_path = subprocess.check_output(['which', 'ansible']).strip()
|
||||
ansible_path = distutils.spawn.find_executable('ansible')
|
||||
sys.argv[0] = 'ansible-inventory'
|
||||
execfile(ansible_path)
|
||||
|
||||
@@ -49,6 +49,7 @@ Command line arguments:
|
||||
- tenant
|
||||
- ad_user
|
||||
- password
|
||||
- cloud_environment
|
||||
|
||||
Environment variables:
|
||||
- AZURE_PROFILE
|
||||
@@ -58,6 +59,7 @@ Environment variables:
|
||||
- AZURE_TENANT
|
||||
- AZURE_AD_USER
|
||||
- AZURE_PASSWORD
|
||||
- AZURE_CLOUD_ENVIRONMENT
|
||||
|
||||
Run for Specific Host
|
||||
-----------------------
|
||||
@@ -190,22 +192,27 @@ import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import inspect
|
||||
import traceback
|
||||
|
||||
|
||||
from packaging.version import Version
|
||||
|
||||
from os.path import expanduser
|
||||
import ansible.module_utils.six.moves.urllib.parse as urlparse
|
||||
|
||||
HAS_AZURE = True
|
||||
HAS_AZURE_EXC = None
|
||||
|
||||
try:
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure import azure_cloud
|
||||
from azure.mgmt.compute import __version__ as azure_compute_version
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
|
||||
from azure.mgmt.network.network_management_client import NetworkManagementClient
|
||||
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
|
||||
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
|
||||
from azure.mgmt.network import NetworkManagementClient
|
||||
from azure.mgmt.resource.resources import ResourceManagementClient
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
except ImportError as exc:
|
||||
HAS_AZURE_EXC = exc
|
||||
HAS_AZURE = False
|
||||
@@ -218,7 +225,8 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict(
|
||||
secret='AZURE_SECRET',
|
||||
tenant='AZURE_TENANT',
|
||||
ad_user='AZURE_AD_USER',
|
||||
password='AZURE_PASSWORD'
|
||||
password='AZURE_PASSWORD',
|
||||
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
|
||||
)
|
||||
|
||||
AZURE_CONFIG_SETTINGS = dict(
|
||||
@@ -232,7 +240,7 @@ AZURE_CONFIG_SETTINGS = dict(
|
||||
group_by_tag='AZURE_GROUP_BY_TAG'
|
||||
)
|
||||
|
||||
AZURE_MIN_VERSION = "0.30.0rc5"
|
||||
AZURE_MIN_VERSION = "2.0.0"
|
||||
|
||||
|
||||
def azure_id_to_dict(id):
|
||||
@@ -249,6 +257,7 @@ class AzureRM(object):
|
||||
|
||||
def __init__(self, args):
|
||||
self._args = args
|
||||
self._cloud_environment = None
|
||||
self._compute_client = None
|
||||
self._resource_client = None
|
||||
self._network_client = None
|
||||
@@ -262,6 +271,26 @@ class AzureRM(object):
|
||||
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
|
||||
"or define a profile in ~/.azure/credentials.")
|
||||
|
||||
# if cloud_environment specified, look up/build Cloud object
|
||||
raw_cloud_env = self.credentials.get('cloud_environment')
|
||||
if not raw_cloud_env:
|
||||
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
|
||||
else:
|
||||
# try to look up "well-known" values via the name attribute on azure_cloud members
|
||||
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
|
||||
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
|
||||
if len(matched_clouds) == 1:
|
||||
self._cloud_environment = matched_clouds[0]
|
||||
elif len(matched_clouds) > 1:
|
||||
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
|
||||
else:
|
||||
if not urlparse.urlparse(raw_cloud_env).scheme:
|
||||
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
|
||||
try:
|
||||
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
|
||||
except Exception as e:
|
||||
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
|
||||
|
||||
if self.credentials.get('subscription_id', None) is None:
|
||||
self.fail("Credentials did not include a subscription_id value.")
|
||||
self.log("setting subscription_id")
|
||||
@@ -272,16 +301,23 @@ class AzureRM(object):
|
||||
self.credentials.get('tenant') is not None:
|
||||
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
|
||||
secret=self.credentials['secret'],
|
||||
tenant=self.credentials['tenant'])
|
||||
tenant=self.credentials['tenant'],
|
||||
cloud_environment=self._cloud_environment)
|
||||
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
|
||||
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
|
||||
tenant = self.credentials.get('tenant')
|
||||
if not tenant:
|
||||
tenant = 'common'
|
||||
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
|
||||
self.credentials['password'],
|
||||
tenant=tenant,
|
||||
cloud_environment=self._cloud_environment)
|
||||
else:
|
||||
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
|
||||
"Credentials must include client_id, secret and tenant or ad_user and password.")
|
||||
|
||||
def log(self, msg):
|
||||
if self.debug:
|
||||
print (msg + u'\n')
|
||||
print(msg + u'\n')
|
||||
|
||||
def fail(self, msg):
|
||||
raise Exception(msg)
|
||||
@@ -341,6 +377,10 @@ class AzureRM(object):
|
||||
self.log('Received credentials from parameters.')
|
||||
return arg_credentials
|
||||
|
||||
if arg_credentials['ad_user'] is not None:
|
||||
self.log('Received credentials from parameters.')
|
||||
return arg_credentials
|
||||
|
||||
# try environment
|
||||
env_credentials = self._get_env_credentials()
|
||||
if env_credentials:
|
||||
@@ -372,7 +412,12 @@ class AzureRM(object):
|
||||
def network_client(self):
|
||||
self.log('Getting network client')
|
||||
if not self._network_client:
|
||||
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._network_client = NetworkManagementClient(
|
||||
self.azure_credentials,
|
||||
self.subscription_id,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-06-01'
|
||||
)
|
||||
self._register('Microsoft.Network')
|
||||
return self._network_client
|
||||
|
||||
@@ -380,14 +425,24 @@ class AzureRM(object):
|
||||
def rm_client(self):
|
||||
self.log('Getting resource manager client')
|
||||
if not self._resource_client:
|
||||
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._resource_client = ResourceManagementClient(
|
||||
self.azure_credentials,
|
||||
self.subscription_id,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-05-10'
|
||||
)
|
||||
return self._resource_client
|
||||
|
||||
@property
|
||||
def compute_client(self):
|
||||
self.log('Getting compute client')
|
||||
if not self._compute_client:
|
||||
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
|
||||
self._compute_client = ComputeManagementClient(
|
||||
self.azure_credentials,
|
||||
self.subscription_id,
|
||||
base_url=self._cloud_environment.endpoints.resource_manager,
|
||||
api_version='2017-03-30'
|
||||
)
|
||||
self._register('Microsoft.Compute')
|
||||
return self._compute_client
|
||||
|
||||
@@ -440,7 +495,7 @@ class AzureInventory(object):
|
||||
self.include_powerstate = False
|
||||
|
||||
self.get_inventory()
|
||||
print (self._json_format_dict(pretty=self._args.pretty))
|
||||
print(self._json_format_dict(pretty=self._args.pretty))
|
||||
sys.exit(0)
|
||||
|
||||
def _parse_cli_args(self):
|
||||
@@ -448,13 +503,13 @@ class AzureInventory(object):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='Send debug messages to STDOUT')
|
||||
help='Send debug messages to STDOUT')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all information about an instance')
|
||||
help='Get all information about an instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False,
|
||||
help='Pretty print JSON output(default: False)')
|
||||
help='Pretty print JSON output(default: False)')
|
||||
parser.add_argument('--profile', action='store',
|
||||
help='Azure profile contained in ~/.azure/credentials')
|
||||
parser.add_argument('--subscription_id', action='store',
|
||||
@@ -465,10 +520,12 @@ class AzureInventory(object):
|
||||
help='Azure Client Secret')
|
||||
parser.add_argument('--tenant', action='store',
|
||||
help='Azure Tenant Id')
|
||||
parser.add_argument('--ad-user', action='store',
|
||||
parser.add_argument('--ad_user', action='store',
|
||||
help='Active Directory User')
|
||||
parser.add_argument('--password', action='store',
|
||||
help='password')
|
||||
parser.add_argument('--cloud_environment', action='store',
|
||||
help='Azure Cloud Environment name or metadata discovery URL')
|
||||
parser.add_argument('--resource-groups', action='store',
|
||||
help='Return inventory for comma separated list of resource group names')
|
||||
parser.add_argument('--tags', action='store',
|
||||
@@ -486,8 +543,7 @@ class AzureInventory(object):
|
||||
try:
|
||||
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
|
||||
except Exception as exc:
|
||||
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
|
||||
str(exc)))
|
||||
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
|
||||
if self._args.host or self.tags:
|
||||
selected_machines = self._selected_machines(virtual_machines)
|
||||
self._load_machines(selected_machines)
|
||||
@@ -510,7 +566,7 @@ class AzureInventory(object):
|
||||
for machine in machines:
|
||||
id_dict = azure_id_to_dict(machine.id)
|
||||
|
||||
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
|
||||
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
|
||||
# fixed, we should remove the .lower(). Opened Issue
|
||||
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
|
||||
resource_group = id_dict['resourceGroups'].lower()
|
||||
@@ -538,7 +594,7 @@ class AzureInventory(object):
|
||||
mac_address=None,
|
||||
plan=(machine.plan.name if machine.plan else None),
|
||||
virtual_machine_size=machine.hardware_profile.vm_size,
|
||||
computer_name=machine.os_profile.computer_name,
|
||||
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
|
||||
provisioning_state=machine.provisioning_state,
|
||||
)
|
||||
|
||||
@@ -559,7 +615,7 @@ class AzureInventory(object):
|
||||
)
|
||||
|
||||
# Add windows details
|
||||
if machine.os_profile.windows_configuration is not None:
|
||||
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
|
||||
host_vars['windows_auto_updates_enabled'] = \
|
||||
machine.os_profile.windows_configuration.enable_automatic_updates
|
||||
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
|
||||
@@ -790,13 +846,10 @@ class AzureInventory(object):
|
||||
|
||||
def main():
|
||||
if not HAS_AZURE:
|
||||
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
|
||||
|
||||
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
|
||||
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
|
||||
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
|
||||
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
|
||||
|
||||
AzureInventory()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,284 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''
|
||||
Windows Azure external inventory script
|
||||
=======================================
|
||||
|
||||
Generates inventory that Ansible can understand by making API request to
|
||||
Windows Azure using the azure python library.
|
||||
|
||||
NOTE: This script assumes Ansible is being executed where azure is already
|
||||
installed.
|
||||
|
||||
pip install azure
|
||||
|
||||
Adapted from the ansible Linode plugin by Dan Slimmon.
|
||||
'''
|
||||
|
||||
# (c) 2013, John Whitbeck
|
||||
#
|
||||
# This file is part of Ansible,
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
######################################################################
|
||||
|
||||
# Standard imports
|
||||
import re
|
||||
import sys
|
||||
import argparse
|
||||
import os
|
||||
from urlparse import urlparse
|
||||
from time import time
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
from azure.servicemanagement import ServiceManagementService
|
||||
except ImportError as e:
|
||||
sys.exit("ImportError: {0}".format(str(e)))
|
||||
|
||||
# Imports for ansible
|
||||
import ConfigParser
|
||||
|
||||
class AzureInventory(object):
|
||||
def __init__(self):
|
||||
"""Main execution path."""
|
||||
# Inventory grouped by display group
|
||||
self.inventory = {}
|
||||
# Index of deployment name -> host
|
||||
self.index = {}
|
||||
self.host_metadata = {}
|
||||
|
||||
# Cache setting defaults.
|
||||
# These can be overridden in settings (see `read_settings`).
|
||||
cache_dir = os.path.expanduser('~')
|
||||
self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache')
|
||||
self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index')
|
||||
self.cache_max_age = 0
|
||||
|
||||
# Read settings and parse CLI arguments
|
||||
self.read_settings()
|
||||
self.read_environment()
|
||||
self.parse_cli_args()
|
||||
|
||||
# Initialize Azure ServiceManagementService
|
||||
self.sms = ServiceManagementService(self.subscription_id, self.cert_path)
|
||||
|
||||
# Cache
|
||||
if self.args.refresh_cache:
|
||||
self.do_api_calls_update_cache()
|
||||
elif not self.is_cache_valid():
|
||||
self.do_api_calls_update_cache()
|
||||
|
||||
if self.args.list_images:
|
||||
data_to_print = self.json_format_dict(self.get_images(), True)
|
||||
elif self.args.list or self.args.host:
|
||||
# Display list of nodes for inventory
|
||||
if len(self.inventory) == 0:
|
||||
data = json.loads(self.get_inventory_from_cache())
|
||||
else:
|
||||
data = self.inventory
|
||||
|
||||
if self.args.host:
|
||||
data_to_print = self.get_host(self.args.host)
|
||||
else:
|
||||
# Add the `['_meta']['hostvars']` information.
|
||||
hostvars = {}
|
||||
if len(data) > 0:
|
||||
for host in set([h for hosts in data.values() for h in hosts if h]):
|
||||
hostvars[host] = self.get_host(host, jsonify=False)
|
||||
data['_meta'] = {'hostvars': hostvars}
|
||||
|
||||
# JSONify the data.
|
||||
data_to_print = self.json_format_dict(data, pretty=True)
|
||||
print(data_to_print)
|
||||
|
||||
def get_host(self, hostname, jsonify=True):
|
||||
"""Return information about the given hostname, based on what
|
||||
the Windows Azure API provides.
|
||||
"""
|
||||
if hostname not in self.host_metadata:
|
||||
return "No host found: %s" % json.dumps(self.host_metadata)
|
||||
if jsonify:
|
||||
return json.dumps(self.host_metadata[hostname])
|
||||
return self.host_metadata[hostname]
|
||||
|
||||
def get_images(self):
|
||||
images = []
|
||||
for image in self.sms.list_os_images():
|
||||
if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
|
||||
images.append(vars(image))
|
||||
return json.loads(json.dumps(images, default=lambda o: o.__dict__))
|
||||
|
||||
def is_cache_valid(self):
|
||||
"""Determines if the cache file has expired, or if it is still valid."""
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
if os.path.isfile(self.cache_path_index):
|
||||
return True
|
||||
return False
|
||||
|
||||
def read_settings(self):
|
||||
"""Reads the settings from the .ini file."""
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')
|
||||
|
||||
# Credentials related
|
||||
if config.has_option('azure', 'subscription_id'):
|
||||
self.subscription_id = config.get('azure', 'subscription_id')
|
||||
if config.has_option('azure', 'cert_path'):
|
||||
self.cert_path = config.get('azure', 'cert_path')
|
||||
|
||||
# Cache related
|
||||
if config.has_option('azure', 'cache_path'):
|
||||
cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path')))
|
||||
self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache')
|
||||
self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index')
|
||||
if config.has_option('azure', 'cache_max_age'):
|
||||
self.cache_max_age = config.getint('azure', 'cache_max_age')
|
||||
|
||||
def read_environment(self):
|
||||
''' Reads the settings from environment variables '''
|
||||
# Credentials
|
||||
if os.getenv("AZURE_SUBSCRIPTION_ID"):
|
||||
self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
|
||||
if os.getenv("AZURE_CERT_PATH"):
|
||||
self.cert_path = os.getenv("AZURE_CERT_PATH")
|
||||
|
||||
def parse_cli_args(self):
|
||||
"""Command line argument processing"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file based on Azure',
|
||||
)
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List nodes (default: True)')
|
||||
parser.add_argument('--list-images', action='store',
|
||||
help='Get all available images.')
|
||||
parser.add_argument('--refresh-cache',
|
||||
action='store_true', default=False,
|
||||
help='Force refresh of thecache by making API requests to Azure '
|
||||
'(default: False - use cache files)',
|
||||
)
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all information about an instance.')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def do_api_calls_update_cache(self):
|
||||
"""Do API calls, and save data in cache files."""
|
||||
self.add_cloud_services()
|
||||
self.write_to_cache(self.inventory, self.cache_path_cache)
|
||||
self.write_to_cache(self.index, self.cache_path_index)
|
||||
|
||||
def add_cloud_services(self):
|
||||
"""Makes an Azure API call to get the list of cloud services."""
|
||||
try:
|
||||
for cloud_service in self.sms.list_hosted_services():
|
||||
self.add_deployments(cloud_service)
|
||||
except Exception as e:
|
||||
sys.exit("Error: Failed to access cloud services - {0}".format(str(e)))
|
||||
|
||||
def add_deployments(self, cloud_service):
|
||||
"""Makes an Azure API call to get the list of virtual machines
|
||||
associated with a cloud service.
|
||||
"""
|
||||
try:
|
||||
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
|
||||
self.add_deployment(cloud_service, deployment)
|
||||
except Exception as e:
|
||||
sys.exit("Error: Failed to access deployments - {0}".format(str(e)))
|
||||
|
||||
def add_deployment(self, cloud_service, deployment):
|
||||
"""Adds a deployment to the inventory and index"""
|
||||
for role in deployment.role_instance_list.role_instances:
|
||||
try:
|
||||
# Default port 22 unless port found with name 'SSH'
|
||||
port = '22'
|
||||
for ie in role.instance_endpoints.instance_endpoints:
|
||||
if ie.name == 'SSH':
|
||||
port = ie.public_port
|
||||
break
|
||||
except AttributeError as e:
|
||||
pass
|
||||
finally:
|
||||
self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status)
|
||||
|
||||
def add_instance(self, hostname, deployment, ssh_port, cloud_service, status):
|
||||
"""Adds an instance to the inventory and index"""
|
||||
|
||||
dest = urlparse(deployment.url).hostname
|
||||
|
||||
# Add to index
|
||||
self.index[hostname] = deployment.name
|
||||
|
||||
self.host_metadata[hostname] = dict(ansible_ssh_host=dest,
|
||||
ansible_ssh_port=int(ssh_port),
|
||||
instance_status=status,
|
||||
private_id=deployment.private_id)
|
||||
|
||||
# List of all azure deployments
|
||||
self.push(self.inventory, "azure", hostname)
|
||||
|
||||
# Inventory: Group by service name
|
||||
self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname)
|
||||
|
||||
if int(ssh_port) == 22:
|
||||
self.push(self.inventory, "Cloud_services", hostname)
|
||||
|
||||
# Inventory: Group by region
|
||||
self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname)
|
||||
|
||||
def push(self, my_dict, key, element):
|
||||
"""Pushed an element onto an array that may not have been defined in the dict."""
|
||||
if key in my_dict:
|
||||
my_dict[key].append(element)
|
||||
else:
|
||||
my_dict[key] = [element]
|
||||
|
||||
def get_inventory_from_cache(self):
|
||||
"""Reads the inventory from the cache file and returns it as a JSON object."""
|
||||
cache = open(self.cache_path_cache, 'r')
|
||||
json_inventory = cache.read()
|
||||
return json_inventory
|
||||
|
||||
def load_index_from_cache(self):
|
||||
"""Reads the index from the cache file and sets self.index."""
|
||||
cache = open(self.cache_path_index, 'r')
|
||||
json_index = cache.read()
|
||||
self.index = json.loads(json_index)
|
||||
|
||||
def write_to_cache(self, data, filename):
|
||||
"""Writes data in JSON format to a file."""
|
||||
json_data = self.json_format_dict(data, True)
|
||||
cache = open(filename, 'w')
|
||||
cache.write(json_data)
|
||||
cache.close()
|
||||
|
||||
def to_safe(self, word):
|
||||
"""Escapes any characters that would be invalid in an ansible group name."""
|
||||
return re.sub("[^A-Za-z0-9\-]", "_", word)
|
||||
|
||||
def json_format_dict(self, data, pretty=False):
|
||||
"""Converts a dict to a JSON object and dumps it as a formatted string."""
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
AzureInventory()
|
||||
@@ -74,7 +74,7 @@ def rpm_package_list():
|
||||
def deb_package_list():
|
||||
import apt
|
||||
apt_cache = apt.Cache()
|
||||
installed_packages = []
|
||||
installed_packages = {}
|
||||
apt_installed_packages = [pk for pk in apt_cache.keys() if apt_cache[pk].is_installed]
|
||||
for package in apt_installed_packages:
|
||||
ac_pkg = apt_cache[package].installed
|
||||
|
||||
@@ -491,7 +491,7 @@ else:
|
||||
}
|
||||
|
||||
# Social Auth configuration.
|
||||
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
|
||||
SOCIAL_AUTH_STRATEGY = 'awx.sso.strategies.django_strategy.AWXDjangoStrategy'
|
||||
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
|
||||
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL # noqa
|
||||
SOCIAL_AUTH_PIPELINE = (
|
||||
@@ -716,7 +716,7 @@ VMWARE_GROUP_FILTER = r'^.+$'
|
||||
VMWARE_HOST_FILTER = r'^.+$'
|
||||
VMWARE_EXCLUDE_EMPTY_GROUPS = True
|
||||
|
||||
|
||||
VMWARE_VALIDATE_CERTS = False
|
||||
# ---------------------------
|
||||
# -- Google Compute Engine --
|
||||
# ---------------------------
|
||||
@@ -770,14 +770,12 @@ GCE_HOST_FILTER = r'^.+$'
|
||||
GCE_EXCLUDE_EMPTY_GROUPS = True
|
||||
GCE_INSTANCE_ID_VAR = None
|
||||
|
||||
|
||||
# -------------------
|
||||
# -- Microsoft Azure --
|
||||
# -------------------
|
||||
|
||||
# --------------------------------------
|
||||
# -- Microsoft Azure Resource Manager --
|
||||
# --------------------------------------
|
||||
# It's not possible to get zones in Azure without authenticating, so we
|
||||
# provide a list here.
|
||||
AZURE_REGION_CHOICES = [
|
||||
AZURE_RM_REGION_CHOICES = [
|
||||
('eastus', _('US East')),
|
||||
('eastus2', _('US East 2')),
|
||||
('centralus', _('US Central')),
|
||||
@@ -804,23 +802,8 @@ AZURE_REGION_CHOICES = [
|
||||
('koreacentral', _('Korea Central')),
|
||||
('koreasouth', _('Korea South')),
|
||||
]
|
||||
AZURE_REGIONS_BLACKLIST = []
|
||||
AZURE_RM_REGIONS_BLACKLIST = []
|
||||
|
||||
# Inventory variable name/value for determining whether a host is active
|
||||
# in Microsoft Azure.
|
||||
AZURE_ENABLED_VAR = 'instance_status'
|
||||
AZURE_ENABLED_VALUE = 'ReadyRole'
|
||||
|
||||
# Filter for allowed group and host names when importing inventory from
|
||||
# Microsoft Azure.
|
||||
AZURE_GROUP_FILTER = r'^.+$'
|
||||
AZURE_HOST_FILTER = r'^.+$'
|
||||
AZURE_EXCLUDE_EMPTY_GROUPS = True
|
||||
AZURE_INSTANCE_ID_VAR = 'private_id'
|
||||
|
||||
# --------------------------------------
|
||||
# -- Microsoft Azure Resource Manager --
|
||||
# --------------------------------------
|
||||
AZURE_RM_GROUP_FILTER = r'^.+$'
|
||||
AZURE_RM_HOST_FILTER = r'^.+$'
|
||||
AZURE_RM_ENABLED_VAR = 'powerstate'
|
||||
|
||||
@@ -87,6 +87,8 @@ settings_files = os.path.join(settings_dir, '*.py')
|
||||
settings_file = os.environ.get('AWX_SETTINGS_FILE',
|
||||
'/etc/tower/settings.py')
|
||||
|
||||
MIDDLEWARE_CLASSES = ('awx.main.middleware.MigrationRanCheckMiddleware',) + MIDDLEWARE_CLASSES
|
||||
|
||||
# Attempt to load settings from /etc/tower/settings.py first, followed by
|
||||
# /etc/tower/conf.d/*.py.
|
||||
try:
|
||||
|
||||
@@ -136,8 +136,7 @@ class LDAPBackend(BaseLDAPBackend):
|
||||
def _decorate_enterprise_user(user, provider):
|
||||
user.set_unusable_password()
|
||||
user.save()
|
||||
enterprise_auth = UserEnterpriseAuth(user=user, provider=provider)
|
||||
enterprise_auth.save()
|
||||
enterprise_auth, _ = UserEnterpriseAuth.objects.get_or_create(user=user, provider=provider)
|
||||
return enterprise_auth
|
||||
|
||||
|
||||
@@ -241,7 +240,9 @@ class TowerSAMLIdentityProvider(BaseSAMLIdentityProvider):
|
||||
another attribute to use.
|
||||
"""
|
||||
key = self.conf.get(conf_key, default_attribute)
|
||||
value = attributes[key][0] if key in attributes else None
|
||||
value = attributes[key] if key in attributes else None
|
||||
if isinstance(value, list):
|
||||
value = value[0]
|
||||
if conf_key in ('attr_first_name', 'attr_last_name', 'attr_username', 'attr_email') and value is None:
|
||||
logger.warn("Could not map user detail '%s' from SAML attribute '%s'; "
|
||||
"update SOCIAL_AUTH_SAML_ENABLED_IDPS['%s']['%s'] with the correct SAML attribute.",
|
||||
@@ -267,16 +268,12 @@ class SAMLAuth(BaseSAMLAuth):
|
||||
if not feature_enabled('enterprise_auth'):
|
||||
logger.error("Unable to authenticate, license does not support SAML authentication")
|
||||
return None
|
||||
created = False
|
||||
try:
|
||||
user = User.objects.get(username=kwargs.get('username', ''))
|
||||
if user and not user.is_in_enterprise_category('saml'):
|
||||
return None
|
||||
except User.DoesNotExist:
|
||||
created = True
|
||||
user = super(SAMLAuth, self).authenticate(*args, **kwargs)
|
||||
if user and created:
|
||||
# Comes from https://github.com/omab/python-social-auth/blob/v0.2.21/social/backends/base.py#L91
|
||||
if getattr(user, 'is_new', False):
|
||||
_decorate_enterprise_user(user, 'saml')
|
||||
elif user and not user.is_in_enterprise_category('saml'):
|
||||
return None
|
||||
return user
|
||||
|
||||
def get_user(self, user_id):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user