mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 12:04:44 -03:30
Compare commits
697 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69589821ce | ||
|
|
5ee865b051 | ||
|
|
e80942f6cd | ||
|
|
d96cde91a3 | ||
|
|
0aa23a7d63 | ||
|
|
35e5c7f309 | ||
|
|
8174516d3b | ||
|
|
d016de8103 | ||
|
|
d8b9d48313 | ||
|
|
a669db989c | ||
|
|
07ff3139d6 | ||
|
|
88a38e30c3 | ||
|
|
28e0ee8b7d | ||
|
|
92937d1384 | ||
|
|
9aab085b77 | ||
|
|
29bc6c8b48 | ||
|
|
f161617755 | ||
|
|
bb1687642c | ||
|
|
9f625d61f5 | ||
|
|
173d058d88 | ||
|
|
a5576d2ae9 | ||
|
|
957c0b31bf | ||
|
|
826b03b78e | ||
|
|
781869a436 | ||
|
|
25916d983e | ||
|
|
c762545c47 | ||
|
|
e49a99e0f6 | ||
|
|
475b733890 | ||
|
|
2a922861c3 | ||
|
|
2385e47ac3 | ||
|
|
729949e0d0 | ||
|
|
37218e1695 | ||
|
|
e4eef82a39 | ||
|
|
bdd5efc2a9 | ||
|
|
a0413b7e0b | ||
|
|
71cfeda08f | ||
|
|
c5addd7c67 | ||
|
|
654675621c | ||
|
|
5e5aba59b0 | ||
|
|
89b087ffb6 | ||
|
|
ab1e991e01 | ||
|
|
31ab386591 | ||
|
|
6682dcbc7c | ||
|
|
defa334bd9 | ||
|
|
70f8a02a5e | ||
|
|
3834125d09 | ||
|
|
118e1b8df1 | ||
|
|
393369adec | ||
|
|
e732d03b20 | ||
|
|
f76c57b1ec | ||
|
|
8fc3377fa6 | ||
|
|
1321d298ee | ||
|
|
d6b9077f72 | ||
|
|
0e274c3038 | ||
|
|
43bf2989af | ||
|
|
2e33c69e9c | ||
|
|
9e118b8a0c | ||
|
|
91cd5b359a | ||
|
|
2df67c4dc8 | ||
|
|
e784a7c0a3 | ||
|
|
401c5671c8 | ||
|
|
3c8c1858d4 | ||
|
|
b6a4c34b7a | ||
|
|
71cf3198d8 | ||
|
|
b0ff763a0b | ||
|
|
8f56042dcc | ||
|
|
ec3c4de1f7 | ||
|
|
b2bdb0c81d | ||
|
|
bedb1f364d | ||
|
|
6ed611c27c | ||
|
|
97dbfee162 | ||
|
|
51b0c51605 | ||
|
|
5ca66ac806 | ||
|
|
90bcb6d145 | ||
|
|
3c27e1ca12 | ||
|
|
8aa6e9466f | ||
|
|
9180334908 | ||
|
|
9d63b679f9 | ||
|
|
d5578d9744 | ||
|
|
7fefea31c0 | ||
|
|
b5ce1fe2f3 | ||
|
|
3dfc9328a9 | ||
|
|
dba55fec47 | ||
|
|
93498b2d7f | ||
|
|
781b675837 | ||
|
|
5e3ce7b7ea | ||
|
|
34dd034f7c | ||
|
|
d5fcf733f0 | ||
|
|
8d63ebf0e0 | ||
|
|
de5f996358 | ||
|
|
a54081de2e | ||
|
|
70afbe0b8d | ||
|
|
7bf1d4946e | ||
|
|
fd18194b1b | ||
|
|
73a39c1e55 | ||
|
|
415c39aabc | ||
|
|
1c78190385 | ||
|
|
ea175ac5b6 | ||
|
|
5527fc9115 | ||
|
|
4c55685656 | ||
|
|
de82c613fc | ||
|
|
241931309e | ||
|
|
26083bdef6 | ||
|
|
e035eea95a | ||
|
|
f211c70e69 | ||
|
|
08d9341704 | ||
|
|
0fda9d2c56 | ||
|
|
43ab6acb84 | ||
|
|
cb1a3e4199 | ||
|
|
3b61b82616 | ||
|
|
9eaee801a7 | ||
|
|
1970fac989 | ||
|
|
28164ad4cc | ||
|
|
8eecfeaab4 | ||
|
|
bf6e8f8e83 | ||
|
|
a33c303765 | ||
|
|
423df6618d | ||
|
|
3ae4417af4 | ||
|
|
2582036fc8 | ||
|
|
b8e1f1b882 | ||
|
|
dc3701b3d6 | ||
|
|
3818fb9b69 | ||
|
|
64ebfa75a8 | ||
|
|
e81ccf10df | ||
|
|
9797c8e3c2 | ||
|
|
a83a9c9723 | ||
|
|
6777b82d8e | ||
|
|
753b8c287c | ||
|
|
eb5970b4a1 | ||
|
|
19b7c91486 | ||
|
|
aeeed4d371 | ||
|
|
1f7c1af64e | ||
|
|
65fc2db42f | ||
|
|
82010e4ba2 | ||
|
|
442d539ff8 | ||
|
|
c8288af87f | ||
|
|
76fb605dcd | ||
|
|
a531b85b31 | ||
|
|
86afa5cf42 | ||
|
|
57aff6394c | ||
|
|
66bc947adb | ||
|
|
301f15bfdd | ||
|
|
9fc1a4bb44 | ||
|
|
f7825aefeb | ||
|
|
14b5f63bd8 | ||
|
|
1300d38e47 | ||
|
|
bb66e4633d | ||
|
|
201de4e18a | ||
|
|
471dc2babf | ||
|
|
6387258da1 | ||
|
|
6958815f6e | ||
|
|
43b76f4575 | ||
|
|
53d81d42cc | ||
|
|
385725e52a | ||
|
|
1b264011a2 | ||
|
|
719f0b407c | ||
|
|
a5fa34bd3b | ||
|
|
eb10a1873d | ||
|
|
ab15349c8c | ||
|
|
e053a58223 | ||
|
|
4262dd38ba | ||
|
|
0deacc4391 | ||
|
|
329293dbf0 | ||
|
|
e92c8cfdcc | ||
|
|
07ba521b8b | ||
|
|
972d3ab535 | ||
|
|
a1f7d0b781 | ||
|
|
30a3e3e172 | ||
|
|
01d575f833 | ||
|
|
e0f6af4700 | ||
|
|
3860c7597f | ||
|
|
e4146e9bc7 | ||
|
|
19c92a7055 | ||
|
|
6a9add4fe3 | ||
|
|
ad574eb896 | ||
|
|
55f79a4569 | ||
|
|
d20fa03034 | ||
|
|
868aafb263 | ||
|
|
3f204659a8 | ||
|
|
ea5b810e87 | ||
|
|
2c00d42ced | ||
|
|
d191edcaf1 | ||
|
|
65e16dc7ae | ||
|
|
95b22bf05b | ||
|
|
21e36ad19a | ||
|
|
f53920d3bf | ||
|
|
372570ce8e | ||
|
|
6f28361bf5 | ||
|
|
0877e5305c | ||
|
|
e4383c505f | ||
|
|
fb066eb52e | ||
|
|
6162ff0439 | ||
|
|
dfe34563aa | ||
|
|
37cbf7691b | ||
|
|
9280198b0f | ||
|
|
2127f3c96d | ||
|
|
9f7fecf8da | ||
|
|
c1a07ff00b | ||
|
|
f21d6b1fc4 | ||
|
|
4312395a3a | ||
|
|
c1f6fec532 | ||
|
|
3952946d9c | ||
|
|
3fa34dad04 | ||
|
|
1233462419 | ||
|
|
0ad78874ce | ||
|
|
2bbbb04499 | ||
|
|
fa1294922b | ||
|
|
277b6897fa | ||
|
|
d7f9e66710 | ||
|
|
81d388d137 | ||
|
|
60800d6740 | ||
|
|
02cf4585f8 | ||
|
|
669d4535b1 | ||
|
|
b09d9cbe41 | ||
|
|
d2bbe7aa1a | ||
|
|
246aee623b | ||
|
|
6b3ec46fe8 | ||
|
|
9dec359898 | ||
|
|
222998281f | ||
|
|
7164049062 | ||
|
|
e768d5e7fc | ||
|
|
e5ec761434 | ||
|
|
40b020c370 | ||
|
|
2a566f575f | ||
|
|
2784409c46 | ||
|
|
946a86f350 | ||
|
|
57a6c8d693 | ||
|
|
142c0da9f0 | ||
|
|
28baa8b24b | ||
|
|
3f8bc0d7c8 | ||
|
|
0df4047d3d | ||
|
|
feb9c5700c | ||
|
|
cf6235f6ea | ||
|
|
b457c8f133 | ||
|
|
824d798d81 | ||
|
|
76dcd6d72a | ||
|
|
64846c3347 | ||
|
|
95b8bd63ea | ||
|
|
3b60529488 | ||
|
|
7a59922f0f | ||
|
|
360352b78e | ||
|
|
21abf5a788 | ||
|
|
62dbb6bfdb | ||
|
|
460f31a05d | ||
|
|
91dbc2de30 | ||
|
|
e603cfd7ab | ||
|
|
0bfcacfcf4 | ||
|
|
1e6437b773 | ||
|
|
3cdeb48d3f | ||
|
|
7d0c49c043 | ||
|
|
1f17e02fe9 | ||
|
|
0e54f76f80 | ||
|
|
5df08711e9 | ||
|
|
41d0548af6 | ||
|
|
11da8e254d | ||
|
|
30346618f1 | ||
|
|
7c88a51992 | ||
|
|
1a0407ba50 | ||
|
|
1dbea4614b | ||
|
|
6e9a43513e | ||
|
|
e50c8fc9c9 | ||
|
|
e6416d770b | ||
|
|
4de7de3ce9 | ||
|
|
e6b1e55274 | ||
|
|
49053d3473 | ||
|
|
f9a7db6045 | ||
|
|
edb0df788b | ||
|
|
40cd87f253 | ||
|
|
22a9c29961 | ||
|
|
951f13c066 | ||
|
|
4a5edf7b88 | ||
|
|
ced8f42835 | ||
|
|
8c51993278 | ||
|
|
5fcf1a2d5e | ||
|
|
61a1cfa35a | ||
|
|
4531c418e2 | ||
|
|
7623257a6c | ||
|
|
3fa3ddf04b | ||
|
|
fdb53bd1af | ||
|
|
0615252cf7 | ||
|
|
cb453de6a4 | ||
|
|
9c57f550ed | ||
|
|
7060fbd3c2 | ||
|
|
974b219858 | ||
|
|
4657680f9e | ||
|
|
64e3135754 | ||
|
|
68a8dda869 | ||
|
|
fb8b90254c | ||
|
|
86f1ba984a | ||
|
|
e3814c6f0f | ||
|
|
559d917184 | ||
|
|
ec2c121762 | ||
|
|
0c3d27e818 | ||
|
|
0a3ac25c1a | ||
|
|
101c70c152 | ||
|
|
80fb713f91 | ||
|
|
22d1c8a59c | ||
|
|
68568be235 | ||
|
|
aa54cf097b | ||
|
|
24571166f6 | ||
|
|
af70e3bb49 | ||
|
|
990eead3ac | ||
|
|
5e5026aae8 | ||
|
|
3dec277331 | ||
|
|
41894e30ac | ||
|
|
8cdb05e4a2 | ||
|
|
66df922956 | ||
|
|
455cd74492 | ||
|
|
e57bd88bd2 | ||
|
|
af118fec99 | ||
|
|
9d3b19341d | ||
|
|
87607dd997 | ||
|
|
15d6c5fb7a | ||
|
|
718d3728dd | ||
|
|
7e0dc41bf7 | ||
|
|
72c7ace1a0 | ||
|
|
644ecdb1fb | ||
|
|
0abfa428c4 | ||
|
|
b01e312b8f | ||
|
|
78ade1d99d | ||
|
|
568e70b68b | ||
|
|
5367bc4d3b | ||
|
|
df069f3874 | ||
|
|
e927680cc2 | ||
|
|
4b95297bd4 | ||
|
|
068d9660b3 | ||
|
|
58737a64e1 | ||
|
|
cfe8a1722c | ||
|
|
e373ae1e27 | ||
|
|
9d42b8f0f2 | ||
|
|
54167d9693 | ||
|
|
4a9603a7ea | ||
|
|
747fdf38d8 | ||
|
|
0f0e401c98 | ||
|
|
80e22ff2ce | ||
|
|
d205685541 | ||
|
|
3b06d7b02b | ||
|
|
ca6ae24032 | ||
|
|
585ca082e3 | ||
|
|
4f6d7e56eb | ||
|
|
8527991cb2 | ||
|
|
9de83fdcfe | ||
|
|
b13a175668 | ||
|
|
c1ec84d4a9 | ||
|
|
1f2481211b | ||
|
|
d3086206b4 | ||
|
|
b1853d815b | ||
|
|
131f5ff018 | ||
|
|
efa5a95cf1 | ||
|
|
8d6d5eeed8 | ||
|
|
38025e1926 | ||
|
|
0e24cb3eac | ||
|
|
6cf195a27e | ||
|
|
39b37817a1 | ||
|
|
6d46ee7a01 | ||
|
|
90ca2fd59b | ||
|
|
a5602cc2e7 | ||
|
|
4fbab7f1b3 | ||
|
|
5d5edf6535 | ||
|
|
a599afa81c | ||
|
|
49dfb5dcaf | ||
|
|
1835787772 | ||
|
|
00060c9572 | ||
|
|
01c89398b7 | ||
|
|
fe7df910e2 | ||
|
|
1c8fb0636c | ||
|
|
5eeb8b0337 | ||
|
|
f4dfbcdf18 | ||
|
|
02171ce2a1 | ||
|
|
193bd6d05b | ||
|
|
0d7af90e8c | ||
|
|
4d5af1d191 | ||
|
|
df9a8d537f | ||
|
|
09caf729f1 | ||
|
|
f042b8adf5 | ||
|
|
8b97e3f8bb | ||
|
|
61d5dc4cb2 | ||
|
|
8ab885de93 | ||
|
|
1b1a93dd4b | ||
|
|
1e8a9a7df3 | ||
|
|
e2d3407f66 | ||
|
|
d42f57d726 | ||
|
|
d239c82ff5 | ||
|
|
a480e79e21 | ||
|
|
09e62df84a | ||
|
|
b939637266 | ||
|
|
ffe328dcf9 | ||
|
|
10a6ad92b7 | ||
|
|
ca1c7b38db | ||
|
|
dbeb7ec67c | ||
|
|
cb1b42a93e | ||
|
|
75fd703530 | ||
|
|
33aee7f830 | ||
|
|
4042e78757 | ||
|
|
1bb29ec5f7 | ||
|
|
b81c9dbeea | ||
|
|
0dab3e920f | ||
|
|
496c0c5921 | ||
|
|
ebade7b9b7 | ||
|
|
902a31d073 | ||
|
|
43825faa72 | ||
|
|
d40497aca5 | ||
|
|
c61e875bf8 | ||
|
|
24691f6c75 | ||
|
|
f5eb673898 | ||
|
|
d35e87ace7 | ||
|
|
95a722255b | ||
|
|
385365a78f | ||
|
|
0c6b7fdb59 | ||
|
|
580f098990 | ||
|
|
d733903a22 | ||
|
|
a2243d91d2 | ||
|
|
d4084c0b13 | ||
|
|
00f8dd9e6a | ||
|
|
77691a9631 | ||
|
|
7939f2d320 | ||
|
|
53be991cfe | ||
|
|
ce5272eae6 | ||
|
|
a4ec6f6763 | ||
|
|
cc037cb4b5 | ||
|
|
226dac7b24 | ||
|
|
ea5e35910f | ||
|
|
455dfa6caa | ||
|
|
c2c6f2a197 | ||
|
|
17139f1f82 | ||
|
|
574838740f | ||
|
|
f8681cfb6b | ||
|
|
5a69074b09 | ||
|
|
dea7ec7845 | ||
|
|
81272a8150 | ||
|
|
5dfe53f158 | ||
|
|
fe7a51f8f7 | ||
|
|
7ea96ad468 | ||
|
|
63494c94b7 | ||
|
|
0618822fe5 | ||
|
|
a306397586 | ||
|
|
785a8d9c77 | ||
|
|
a2e5639128 | ||
|
|
bc4d789da0 | ||
|
|
296995e8bd | ||
|
|
1f7a36490d | ||
|
|
f9fb9b120b | ||
|
|
fbe679e651 | ||
|
|
751ca57390 | ||
|
|
10f8983554 | ||
|
|
bfdd136a46 | ||
|
|
e74fb194bc | ||
|
|
17efb34c6a | ||
|
|
39c32fb5d0 | ||
|
|
87e3d62684 | ||
|
|
057933f030 | ||
|
|
73b9d25371 | ||
|
|
73b0506e96 | ||
|
|
d6f7692a7d | ||
|
|
973e6f4213 | ||
|
|
da1a19ce88 | ||
|
|
38f1393030 | ||
|
|
9a66364a9d | ||
|
|
67b826b438 | ||
|
|
428527052c | ||
|
|
4a9d39c3fa | ||
|
|
f29144ba91 | ||
|
|
49edaab861 | ||
|
|
e509bbfbb3 | ||
|
|
4b02e4ab57 | ||
|
|
60d2409321 | ||
|
|
9d420df632 | ||
|
|
71ef219ffb | ||
|
|
22cdc129ad | ||
|
|
048c394897 | ||
|
|
19ccb5e213 | ||
|
|
8006b24ae3 | ||
|
|
95dd3dbfbd | ||
|
|
7e0ef6dd7b | ||
|
|
a66d44d2ff | ||
|
|
d242932837 | ||
|
|
99701e4112 | ||
|
|
556cd40440 | ||
|
|
c7bb5a3e7b | ||
|
|
3c195eed9f | ||
|
|
c7db982ab1 | ||
|
|
54178a1982 | ||
|
|
b69b53f527 | ||
|
|
d0270a1bac | ||
|
|
438929007e | ||
|
|
b904ad68a6 | ||
|
|
b7ab6ba9bb | ||
|
|
b80127dd40 | ||
|
|
09c10a6f59 | ||
|
|
7478a2aa5e | ||
|
|
6a9423626c | ||
|
|
31a11cf6bb | ||
|
|
4321c63165 | ||
|
|
8c5d236066 | ||
|
|
81eb9bb78a | ||
|
|
762d8a287e | ||
|
|
c370cb4d93 | ||
|
|
736e7a33ac | ||
|
|
83bb3bba08 | ||
|
|
30610f1a62 | ||
|
|
a3e95ab171 | ||
|
|
275c43bd4a | ||
|
|
479ab8550d | ||
|
|
6924466c0b | ||
|
|
1808bed2e8 | ||
|
|
d5817fd87a | ||
|
|
8b51e8eb82 | ||
|
|
66245d3094 | ||
|
|
a057277823 | ||
|
|
7852f4f054 | ||
|
|
d0bbf8c711 | ||
|
|
c02f6b9ece | ||
|
|
e078ac1c80 | ||
|
|
5d82d89dc2 | ||
|
|
78e51d5159 | ||
|
|
2d1bc58bb2 | ||
|
|
5a47cd8f94 | ||
|
|
6c4bf5bf7d | ||
|
|
ca992246d1 | ||
|
|
ca0130fc64 | ||
|
|
b41a55f297 | ||
|
|
b3323a24e4 | ||
|
|
1b144470b0 | ||
|
|
3c77e5b005 | ||
|
|
660ca5f6ff | ||
|
|
aa28909313 | ||
|
|
e532f4c0c5 | ||
|
|
ce7ea1fbcb | ||
|
|
23a20b9db0 | ||
|
|
8bf426479c | ||
|
|
155faa0138 | ||
|
|
bba9d86078 | ||
|
|
09e5093b96 | ||
|
|
4da0e0dd80 | ||
|
|
90c02db386 | ||
|
|
ba4ae7c104 | ||
|
|
d2acd15783 | ||
|
|
7971cc5c17 | ||
|
|
6098ee8f7f | ||
|
|
41fa3e0473 | ||
|
|
3ad62f586b | ||
|
|
43f32f98a9 | ||
|
|
abcf8fea96 | ||
|
|
10f324110e | ||
|
|
0f335170c5 | ||
|
|
12a04bf42e | ||
|
|
4378dc62bb | ||
|
|
99de79dfc2 | ||
|
|
74308f3dad | ||
|
|
c3ff7ab247 | ||
|
|
74c7d9686a | ||
|
|
d566b465aa | ||
|
|
6aa972a6e9 | ||
|
|
917c6b405e | ||
|
|
deadf197a3 | ||
|
|
90f6d4ed05 | ||
|
|
70a9a72c25 | ||
|
|
d2698c2cb1 | ||
|
|
09e72bc0ae | ||
|
|
a38a7ad9b6 | ||
|
|
c3afe3f815 | ||
|
|
8f6b654696 | ||
|
|
3cb2475307 | ||
|
|
732f2fb828 | ||
|
|
2e2fe40d2a | ||
|
|
caa4e90fa0 | ||
|
|
bb0abf37e0 | ||
|
|
e3a3a47229 | ||
|
|
8b2c65a3fa | ||
|
|
e7347d15c1 | ||
|
|
1590c69590 | ||
|
|
d6b56d8794 | ||
|
|
d96fd7e06f | ||
|
|
4e8bbdaae7 | ||
|
|
cb7036382b | ||
|
|
725437571d | ||
|
|
9fd396cbe0 | ||
|
|
10ccb57062 | ||
|
|
1d3efecd99 | ||
|
|
6a003919c0 | ||
|
|
3f94657cdb | ||
|
|
dfecd4ad9d | ||
|
|
0b207e02ab | ||
|
|
59e3306a3c | ||
|
|
da8f486c5d | ||
|
|
1ac92b0493 | ||
|
|
11752e123d | ||
|
|
5c2eebf692 | ||
|
|
9b3b20c96b | ||
|
|
91d4948564 | ||
|
|
345f1db994 | ||
|
|
9360d3fabc | ||
|
|
1ca3d1fe3b | ||
|
|
216454d298 | ||
|
|
6b8359fc2f | ||
|
|
086d6951b4 | ||
|
|
8cdffd0dd0 | ||
|
|
9668d18203 | ||
|
|
5e02e6e4a4 | ||
|
|
6f872bf752 | ||
|
|
814f033d46 | ||
|
|
ff573e06b3 | ||
|
|
2cbcbddc52 | ||
|
|
68d56d5616 | ||
|
|
0b8aabbd16 | ||
|
|
8c57a92a65 | ||
|
|
77ee2191ed | ||
|
|
a3463e87b5 | ||
|
|
c494c38966 | ||
|
|
58c85ab03f | ||
|
|
64add6e907 | ||
|
|
10be375137 | ||
|
|
4c6cac90fd | ||
|
|
1d91387f58 | ||
|
|
d825cca9f2 | ||
|
|
694c7e8af5 | ||
|
|
2ed3a39b46 | ||
|
|
dd49f747a0 | ||
|
|
0eb7e22d1f | ||
|
|
677ff99eb4 | ||
|
|
12805954e0 | ||
|
|
0e1a2b899a | ||
|
|
bae9c03258 | ||
|
|
420c75f76f | ||
|
|
d58ea85584 | ||
|
|
37125102ab | ||
|
|
c6d77a1183 | ||
|
|
8f1fccefeb | ||
|
|
4b53875a71 | ||
|
|
b9e45e62c0 | ||
|
|
0675b9e8fa | ||
|
|
e64f9c6963 | ||
|
|
d0a7f7f4e9 | ||
|
|
e24c511aef | ||
|
|
a461df0e4d | ||
|
|
3aa6e8a457 | ||
|
|
a3cd858665 | ||
|
|
84c854bdf3 | ||
|
|
e243513a0d | ||
|
|
9a23056073 | ||
|
|
e7e716742a | ||
|
|
961c5589c1 | ||
|
|
1ca29df0de | ||
|
|
b717aabcc9 | ||
|
|
52a272e8e4 | ||
|
|
e41d33991a | ||
|
|
c565130b35 | ||
|
|
7782b1ddf4 | ||
|
|
9a891794d9 | ||
|
|
de8c37fd3d | ||
|
|
a7ca6e2eea | ||
|
|
e080c1f4c2 | ||
|
|
5fd11d8829 | ||
|
|
44311c163c | ||
|
|
ba7e2c9bc4 | ||
|
|
e9cda0c819 | ||
|
|
60976b6aca | ||
|
|
a98887deb0 | ||
|
|
b12c0def7d | ||
|
|
63fec77f82 | ||
|
|
462cfa2344 | ||
|
|
503b86d41c | ||
|
|
c6d6536078 | ||
|
|
d688f7b833 | ||
|
|
d54e5e5ed8 | ||
|
|
2997911fd4 | ||
|
|
9c5f04b1e0 | ||
|
|
b4b261b918 | ||
|
|
6d1746f99c | ||
|
|
cd21dd69f5 | ||
|
|
bf65b40241 | ||
|
|
fcca0cee37 | ||
|
|
1ea924aa13 | ||
|
|
d1671d72dc | ||
|
|
4cf38db19c | ||
|
|
77fd2d677a | ||
|
|
b0ab3fbe10 | ||
|
|
008cd9985a | ||
|
|
f9b3fb4321 | ||
|
|
9be1fd56d2 | ||
|
|
6d07064ca2 | ||
|
|
18607107a7 | ||
|
|
697b0c634d | ||
|
|
fd91c8e329 | ||
|
|
6f80e5b67b | ||
|
|
8d31d09d4a | ||
|
|
271b19bf09 | ||
|
|
99c7f2f70d | ||
|
|
e4921abfff | ||
|
|
cd15a5c082 | ||
|
|
6b976c4239 | ||
|
|
a8f52c1639 | ||
|
|
867475ad49 | ||
|
|
2ecd055d1e | ||
|
|
df1489bcee | ||
|
|
4c72ab896a | ||
|
|
6cd4b1c666 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,6 +35,7 @@ rsyslog.pid
|
||||
/tower-license
|
||||
/tower-license/**
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/Dockerfile
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
|
||||
31
CHANGELOG.md
31
CHANGELOG.md
@@ -2,6 +2,35 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 13.0.0 (Jun 23, 2020)
|
||||
- Added import and export subcommands to the awx-cli tool, replacing send and receive from the old tower-cli (https://github.com/ansible/awx/pull/6125).
|
||||
- Removed scripts as a means of running inventory updates of built-in types (https://github.com/ansible/awx/pull/6911)
|
||||
- Ansible 2.8 is now partially unsupported; some inventory source types are known to no longer work.
|
||||
- Fixed an issue where the vmware inventory source ssl_verify source variable was not recognized (https://github.com/ansible/awx/pull/7360)
|
||||
- Fixed a bug that caused redis' listen socket to have too-permissive file permissions (https://github.com/ansible/awx/pull/7317)
|
||||
- Fixed a bug that caused rsyslogd's configuration file to have world-readable file permissions, potentially leaking secrets (CVE-2020-10782)
|
||||
|
||||
## 12.0.0 (Jun 9, 2020)
|
||||
- Removed memcached as a dependency of AWX (https://github.com/ansible/awx/pull/7240)
|
||||
- Moved to a single container image build instead of separate awx_web and awx_task images. The container image is just `awx` (https://github.com/ansible/awx/pull/7228)
|
||||
- Official AWX container image builds now use a two-stage container build process that notably reduces the size of our published images (https://github.com/ansible/awx/pull/7017)
|
||||
- Removed support for HipChat notifications ([EoL announcement](https://www.atlassian.com/partnerships/slack/faq#faq-98b17ca3-247f-423b-9a78-70a91681eff0)); all previously-created HipChat notification templates will be deleted due to this removal.
|
||||
- Fixed a bug which broke AWX installations with oc version 4.3 (https://github.com/ansible/awx/pull/6948/files)
|
||||
- Fixed a performance issue that caused notable delay of stdout processing for playbooks run against large numbers of hosts (https://github.com/ansible/awx/issues/6991)
|
||||
- Fixed a bug that caused CyberArk AIM credential plugin looks to hang forever in some environments (https://github.com/ansible/awx/issues/6986)
|
||||
- Fixed a bug that caused ANY/ALL converage settings not to properly save when editing approval nodes in the UI (https://github.com/ansible/awx/issues/6998)
|
||||
- Fixed a bug that broke support for the satellite6_group_prefix source variable (https://github.com/ansible/awx/issues/7031)
|
||||
- Fixed a bug that prevented changes to workflow node convergence settings when approval nodes were in use (https://github.com/ansible/awx/issues/7063)
|
||||
- Fixed a bug that caused notifications to fail on newer version of Mattermost (https://github.com/ansible/awx/issues/7264)
|
||||
- Fixed a bug (by upgrading to 0.8.1 of the foreman collection) that prevented host_filters from working properly with Foreman-based inventory (https://github.com/ansible/awx/issues/7225)
|
||||
- Fixed a bug that prevented the usage of the Conjur credential plugin with secrets that contain spaces (https://github.com/ansible/awx/issues/7191)
|
||||
- Fixed a bug in awx-manage run_wsbroadcast --status in kubernetes (https://github.com/ansible/awx/pull/7009)
|
||||
- Fixed a bug that broke notification toggles for system jobs in the UI (https://github.com/ansible/awx/pull/7042)
|
||||
- Fixed a bug that broke local pip installs of awxkit (https://github.com/ansible/awx/issues/7107)
|
||||
- Fixed a bug that prevented PagerDuty notifications from sending for workflow job template approvals (https://github.com/ansible/awx/issues/7094)
|
||||
- Fixed a bug that broke external log aggregation support for URL paths that include the = character (such as the tokens for SumoLogic) (https://github.com/ansible/awx/issues/7139)
|
||||
- Fixed a bug that prevented organization admins from removing labels from workflow job templates (https://github.com/ansible/awx/pull/7143)
|
||||
|
||||
## 11.2.0 (Apr 29, 2020)
|
||||
|
||||
- Inventory updates now use collection-based plugins by default (in Ansible 2.9+):
|
||||
@@ -103,7 +132,7 @@ This is a list of high-level changes for each release of AWX. A full list of com
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
|
||||
@@ -157,8 +157,7 @@ If you start a second terminal session, you can take a look at the running conta
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
44251b476f98 gcr.io/ansible-tower-engineering/awx_devel:devel "/entrypoint.sh /bin…" 27 seconds ago Up 23 seconds 0.0.0.0:6899->6899/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 22/tcp, 0.0.0.0:8888->8888/tcp tools_awx_run_9e820694d57e
|
||||
b049a43817b4 memcached:alpine "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:6379->6379/tcp tools_redis_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds
|
||||
b66a506d3007 postgres:10 "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:5432->5432/tcp tools_postgres_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
18
INSTALL.md
18
INSTALL.md
@@ -80,9 +80,11 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
+ We use this module instead of `docker-py` because it is what the `docker-compose` Python module requires.
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
- Python 3.6+
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -107,7 +109,7 @@ In the sections below, you'll find deployment details and instructions for each
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
When installing AWX you have the option of building your own images or using the images provided on DockerHub (see [awx_web](https://hub.docker.com/r/ansible/awx_web/) and [awx_task](https://hub.docker.com/r/ansible/awx_task/))
|
||||
When installing AWX you have the option of building your own image or using the image provided on DockerHub (see [awx](https://hub.docker.com/r/ansible/awx/))
|
||||
|
||||
This is controlled by the following variables in the `inventory` file
|
||||
|
||||
@@ -120,12 +122,16 @@ If these variables are present then all deployments will use these hosted images
|
||||
|
||||
*dockerhub_base*
|
||||
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls container images named `ansible/awx_web:tag` and `ansible/awx_task:tag`)
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls a container image named `ansible/awx:tag`)
|
||||
|
||||
*dockerhub_version*
|
||||
|
||||
> Multiple versions are provided. `latest` always pulls the most recent. You may also select version numbers at different granularities: 1, 1.0, 1.0.1, 1.0.0.123
|
||||
|
||||
*use_container_for_build*
|
||||
|
||||
> Use a local distribution build container image for building the AWX package. This is helpful if you don't want to bother installing the build-time dependencies as it is taken care of already.
|
||||
|
||||
|
||||
## Upgrading from previous versions
|
||||
|
||||
@@ -475,11 +481,11 @@ Before starting the install process, review the [inventory](./installer/inventor
|
||||
|
||||
*host_port*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. If undefined no port will be exposed. Defaults to *80*.
|
||||
|
||||
*host_port_ssl*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. If undefined no port will be exposed. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
|
||||
52
Makefile
52
Makefile
@@ -6,12 +6,14 @@ PACKER ?= packer
|
||||
PACKER_BUILD_OPTS ?= -var 'official=$(OFFICIAL)' -var 'aw_repo_url=$(AW_REPO_URL)'
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
DEPS_SCRIPT ?= packaging/bundle/deps.py
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
IMAGE_REPOSITORY_AUTH ?=
|
||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||
VERSION := $(shell cat VERSION)
|
||||
PYCURL_SSL_LIBRARY ?= openssl
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -24,7 +26,7 @@ CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
|
||||
DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio,pycurl
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0
|
||||
@@ -173,9 +175,9 @@ virtualenv_awx:
|
||||
# --ignore-install flag is not used because *.txt files should specify exact versions
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
@@ -183,9 +185,9 @@ requirements_ansible: virtualenv_ansible
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
@@ -353,19 +355,19 @@ swagger: reports
|
||||
check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
awx-link:
|
||||
cp -R /tmp/awx.egg-info /awx_devel/ || true
|
||||
sed -i "s/placeholder/$(shell cat VERSION)/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
[ -d "/awx_devel/awx.egg-info" ] || python3 /awx_devel/setup.py egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
|
||||
# Run all API unit tests.
|
||||
test:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py2,py3
|
||||
cmp VERSION awxkit/VERSION || "VERSION and awxkit/VERSION *must* match"
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
@@ -375,10 +377,15 @@ COLLECTION_NAMESPACE ?= awx
|
||||
COLLECTION_INSTALL = ~/.ansible/collections/ansible_collections/$(COLLECTION_NAMESPACE)/$(COLLECTION_PACKAGE)
|
||||
|
||||
test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
py.test $(COLLECTION_TEST_DIRS) -v
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
# Second we will load any libraries out of the virtualenv (if it's unspecified that should be ok because python should not load out of an empty directory)
|
||||
# Finally we will add the system path so that the tests can find the ansible libraries
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
@@ -393,7 +400,7 @@ symlink_collection:
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION) -e '{"awx_template_version":false}'
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
@@ -547,11 +554,13 @@ jshint: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
$(NPM_BIN) run --prefix awx/ui lint
|
||||
|
||||
ui-zuul-lint-and-test: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
$(NPM_BIN) run --prefix awx/ui lint
|
||||
$(NPM_BIN) --prefix awx/ui run test:ci
|
||||
$(NPM_BIN) --prefix awx/ui run unit
|
||||
ui-zuul-lint-and-test:
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) ./awx/ui/build/zuul_download_chromium.sh
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 $(NPM_BIN) --unsafe-perm --prefix awx/ui ci --no-save awx/ui
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) $(NPM_BIN) run --prefix awx/ui jshint
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) $(NPM_BIN) run --prefix awx/ui lint
|
||||
CHROME_BIN=$(CHROMIUM_BIN) $(NPM_BIN) --prefix awx/ui run test:ci
|
||||
CHROME_BIN=$(CHROMIUM_BIN) $(NPM_BIN) --prefix awx/ui run unit
|
||||
|
||||
# END UI TASKS
|
||||
# --------------------------------------
|
||||
@@ -640,7 +649,7 @@ docker-compose-runtest: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports --no-deps awx /start_tests.sh swagger
|
||||
|
||||
detect-schema-change: genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
@@ -650,17 +659,16 @@ detect-schema-change: genschema
|
||||
docker-compose-clean: awx/projects
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
docker-compose-build: awx-devel-build
|
||||
|
||||
# Base development image build
|
||||
awx-devel-build:
|
||||
docker-compose-build:
|
||||
ansible localhost -m template -a "src=installer/roles/image_build/templates/Dockerfile.j2 dest=tools/docker-compose/Dockerfile" -e build_dev=True
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
docker tag ansible/awx_devel $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
# For use when developing on "isolated" AWX deployments
|
||||
docker-compose-isolated-build: awx-devel-build
|
||||
docker-compose-isolated-build: docker-compose-build
|
||||
docker build -t ansible/awx_isolated -f tools/docker-isolated/Dockerfile .
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
|
||||
@@ -30,6 +30,7 @@ except ImportError:
|
||||
HAS_DJANGO = False
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
|
||||
|
||||
@@ -50,6 +51,7 @@ if HAS_DJANGO is True:
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
|
||||
@@ -980,7 +980,7 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into memcached, because it's
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
|
||||
@@ -126,7 +126,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
|
||||
'inventory_source': ('source', 'last_updated', 'status'),
|
||||
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'source_script': ('name', 'description'),
|
||||
'source_script': DEFAULT_SUMMARY_FIELDS,
|
||||
'role': ('id', 'role_field'),
|
||||
'notification_template': DEFAULT_SUMMARY_FIELDS,
|
||||
'instance_group': ('id', 'name', 'controller_id', 'is_containerized'),
|
||||
@@ -806,7 +806,9 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
td = now() - obj.started
|
||||
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
|
||||
ret['elapsed'] = float(ret['elapsed'])
|
||||
|
||||
# Because this string is saved in the db in the source language,
|
||||
# it must be marked for translation after it is pulled from the db, not when set
|
||||
ret['job_explanation'] = _(obj.job_explanation)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -2306,6 +2308,7 @@ class RoleSerializer(BaseSerializer):
|
||||
content_model = obj.content_type.model_class()
|
||||
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
|
||||
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
|
||||
ret['summary_fields']['resource_id'] = obj.object_id
|
||||
|
||||
return ret
|
||||
|
||||
@@ -2641,9 +2644,17 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
owner_fields.add(field)
|
||||
else:
|
||||
attrs.pop(field)
|
||||
|
||||
if not owner_fields:
|
||||
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
|
||||
|
||||
if len(owner_fields) > 1:
|
||||
received = ", ".join(sorted(owner_fields))
|
||||
raise serializers.ValidationError({"detail": _(
|
||||
"Only one of 'user', 'team', or 'organization' should be provided, "
|
||||
"received {} fields.".format(received)
|
||||
)})
|
||||
|
||||
if attrs.get('team'):
|
||||
attrs['organization'] = attrs['team'].organization
|
||||
|
||||
@@ -2756,16 +2767,11 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': obj.pk}
|
||||
)
|
||||
res['credentials'] = self.reverse(
|
||||
'api:job_template_credentials_list',
|
||||
kwargs={'pk': obj.pk}
|
||||
)
|
||||
elif isinstance(obj, UnifiedJob):
|
||||
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
|
||||
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
|
||||
|
||||
return res
|
||||
@@ -2934,7 +2940,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
|
||||
all_creds = []
|
||||
# Organize credential data into multitude of deprecated fields
|
||||
extra_creds = []
|
||||
if obj.pk:
|
||||
for cred in obj.credentials.all():
|
||||
summarized_cred = {
|
||||
@@ -2945,10 +2950,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'cloud': cred.credential_type.kind == 'cloud'
|
||||
}
|
||||
all_creds.append(summarized_cred)
|
||||
if cred.credential_type.kind in ('cloud', 'net'):
|
||||
extra_creds.append(summarized_cred)
|
||||
if self.is_detail_view:
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
summary_fields['credentials'] = all_creds
|
||||
return summary_fields
|
||||
|
||||
@@ -3023,7 +3024,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
|
||||
all_creds = []
|
||||
# Organize credential data into multitude of deprecated fields
|
||||
extra_creds = []
|
||||
if obj.pk:
|
||||
for cred in obj.credentials.all():
|
||||
summarized_cred = {
|
||||
@@ -3034,10 +3034,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
'cloud': cred.credential_type.kind == 'cloud'
|
||||
}
|
||||
all_creds.append(summarized_cred)
|
||||
if cred.credential_type.kind in ('cloud', 'net'):
|
||||
extra_creds.append(summarized_cred)
|
||||
if self.is_detail_view:
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
summary_fields['credentials'] = all_creds
|
||||
return summary_fields
|
||||
|
||||
@@ -3612,7 +3608,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
ujt = self.instance.unified_job_template
|
||||
if ujt is None:
|
||||
ret = {}
|
||||
for fd in ('workflow_job_template', 'identifier'):
|
||||
for fd in ('workflow_job_template', 'identifier', 'all_parents_must_converge'):
|
||||
if fd in attrs:
|
||||
ret[fd] = attrs[fd]
|
||||
return ret
|
||||
@@ -3899,15 +3895,23 @@ class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
return UriCleaner.remove_sensitive(obj.stdout)
|
||||
|
||||
def get_event_data(self, obj):
|
||||
try:
|
||||
return json.loads(
|
||||
UriCleaner.remove_sensitive(
|
||||
json.dumps(obj.event_data)
|
||||
# the project update playbook uses the git, hg, or svn modules
|
||||
# to clone repositories, and those modules are prone to printing
|
||||
# raw SCM URLs in their stdout (which *could* contain passwords)
|
||||
# attempt to detect and filter HTTP basic auth passwords in the stdout
|
||||
# of these types of events
|
||||
if obj.event_data.get('task_action') in ('git', 'hg', 'svn'):
|
||||
try:
|
||||
return json.loads(
|
||||
UriCleaner.remove_sensitive(
|
||||
json.dumps(obj.event_data)
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to sanitize event_data")
|
||||
return {}
|
||||
except Exception:
|
||||
logger.exception("Failed to sanitize event_data")
|
||||
return {}
|
||||
else:
|
||||
return obj.event_data
|
||||
|
||||
|
||||
class AdHocCommandEventSerializer(BaseSerializer):
|
||||
|
||||
@@ -23,9 +23,7 @@ from awx.api.views import (
|
||||
UnifiedJobList,
|
||||
HostAnsibleFactsDetail,
|
||||
JobCredentialsList,
|
||||
JobExtraCredentialsList,
|
||||
JobTemplateCredentialsList,
|
||||
JobTemplateExtraCredentialsList,
|
||||
SchedulePreview,
|
||||
ScheduleZoneInfo,
|
||||
OAuth2ApplicationList,
|
||||
@@ -83,9 +81,7 @@ v2_urls = [
|
||||
url(r'^credential_types/', include(credential_type_urls)),
|
||||
url(r'^credential_input_sources/', include(credential_input_source_urls)),
|
||||
url(r'^hosts/(?P<pk>[0-9]+)/ansible_facts/$', HostAnsibleFactsDetail.as_view(), name='host_ansible_facts_detail'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/extra_credentials/$', JobExtraCredentialsList.as_view(), name='job_extra_credentials_list'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
|
||||
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
|
||||
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
|
||||
|
||||
@@ -12,7 +12,7 @@ import socket
|
||||
import sys
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict, Iterable
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
# Django
|
||||
@@ -2337,70 +2337,24 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
old field structure to launch endpoint
|
||||
TODO: delete this method with future API version changes
|
||||
'''
|
||||
ignored_fields = {}
|
||||
modern_data = data.copy()
|
||||
|
||||
id_fd = '{}_id'.format('inventory')
|
||||
if 'inventory' not in modern_data and id_fd in modern_data:
|
||||
modern_data['inventory'] = modern_data[id_fd]
|
||||
|
||||
# Automatically convert legacy launch credential arguments into a list of `.credentials`
|
||||
if 'credentials' in modern_data and 'extra_credentials' in modern_data:
|
||||
raise ParseError({"error": _(
|
||||
"'credentials' cannot be used in combination with 'extra_credentials'."
|
||||
)})
|
||||
|
||||
if 'extra_credentials' in modern_data:
|
||||
# make a list of the current credentials
|
||||
existing_credentials = obj.credentials.all()
|
||||
template_credentials = list(existing_credentials) # save copy of existing
|
||||
new_credentials = []
|
||||
if 'extra_credentials' in modern_data:
|
||||
existing_credentials = [
|
||||
cred for cred in existing_credentials
|
||||
if cred.credential_type.kind not in ('cloud', 'net')
|
||||
]
|
||||
prompted_value = modern_data.pop('extra_credentials')
|
||||
|
||||
# validate type, since these are not covered by a serializer
|
||||
if not isinstance(prompted_value, Iterable):
|
||||
msg = _(
|
||||
"Incorrect type. Expected a list received {}."
|
||||
).format(prompted_value.__class__.__name__)
|
||||
raise ParseError({'extra_credentials': [msg], 'credentials': [msg]})
|
||||
|
||||
# add the deprecated credential specified in the request
|
||||
if not isinstance(prompted_value, Iterable) or isinstance(prompted_value, str):
|
||||
prompted_value = [prompted_value]
|
||||
|
||||
# If user gave extra_credentials, special case to use exactly
|
||||
# the given list without merging with JT credentials
|
||||
if prompted_value:
|
||||
obj._deprecated_credential_launch = True # signal to not merge credentials
|
||||
new_credentials.extend(prompted_value)
|
||||
|
||||
# combine the list of "new" and the filtered list of "old"
|
||||
new_credentials.extend([cred.pk for cred in existing_credentials])
|
||||
if new_credentials:
|
||||
# If provided list doesn't contain the pre-existing credentials
|
||||
# defined on the template, add them back here
|
||||
for cred_obj in template_credentials:
|
||||
if cred_obj.pk not in new_credentials:
|
||||
new_credentials.append(cred_obj.pk)
|
||||
modern_data['credentials'] = new_credentials
|
||||
|
||||
# credential passwords were historically provided as top-level attributes
|
||||
if 'credential_passwords' not in modern_data:
|
||||
modern_data['credential_passwords'] = data.copy()
|
||||
|
||||
return (modern_data, ignored_fields)
|
||||
return modern_data
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
|
||||
try:
|
||||
modern_data, ignored_fields = self.modernize_launch_payload(
|
||||
modern_data = self.modernize_launch_payload(
|
||||
data=request.data, obj=obj
|
||||
)
|
||||
except ParseError as exc:
|
||||
@@ -2410,8 +2364,6 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
if not serializer.is_valid():
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
ignored_fields.update(serializer._ignored_fields)
|
||||
|
||||
if not request.user.can_access(models.JobLaunchConfig, 'add', serializer.validated_data, template=obj):
|
||||
raise PermissionDenied()
|
||||
|
||||
@@ -2427,11 +2379,11 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
data = OrderedDict()
|
||||
if isinstance(new_job, models.WorkflowJob):
|
||||
data['workflow_job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
|
||||
data.update(serializers.WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
else:
|
||||
data['job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
|
||||
data.update(serializers.JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
@@ -2711,22 +2663,6 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
|
||||
|
||||
class JobTemplateExtraCredentialsList(JobTemplateCredentialsList):
|
||||
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
sublist_qs = super(JobTemplateExtraCredentialsList, self).get_queryset()
|
||||
sublist_qs = sublist_qs.filter(credential_type__kind__in=['cloud', 'net'])
|
||||
return sublist_qs
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
valid = super(JobTemplateExtraCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
if sub.credential_type.kind not in ('cloud', 'net'):
|
||||
return {'error': _('Extra credentials must be network or cloud.')}
|
||||
return valid
|
||||
|
||||
|
||||
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.Label
|
||||
@@ -3543,16 +3479,6 @@ class JobCredentialsList(SubListAPIView):
|
||||
relationship = 'credentials'
|
||||
|
||||
|
||||
class JobExtraCredentialsList(JobCredentialsList):
|
||||
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
sublist_qs = super(JobExtraCredentialsList, self).get_queryset()
|
||||
sublist_qs = sublist_qs.filter(credential_type__kind__in=['cloud', 'net'])
|
||||
return sublist_qs
|
||||
|
||||
|
||||
class JobLabelList(SubListAPIView):
|
||||
|
||||
model = models.Label
|
||||
|
||||
@@ -4,12 +4,11 @@ import os
|
||||
import logging
|
||||
import django
|
||||
from awx import __version__ as tower_version
|
||||
|
||||
# Prepare the AWX environment.
|
||||
from awx import prepare_env, MODE
|
||||
from channels.routing import get_default_application # noqa
|
||||
prepare_env() # NOQA
|
||||
|
||||
from channels.routing import get_default_application
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
@@ -31,18 +31,18 @@ logger = logging.getLogger('awx.conf.settings')
|
||||
# Store a special value to indicate when a setting is not set in the database.
|
||||
SETTING_CACHE_NOTSET = '___notset___'
|
||||
|
||||
# Cannot store None in memcached; use a special value instead to indicate None.
|
||||
# Cannot store None in cache; use a special value instead to indicate None.
|
||||
# If the special value for None is the same as the "not set" value, then a value
|
||||
# of None will be equivalent to the setting not being set (and will raise an
|
||||
# AttributeError if there is no other default defined).
|
||||
# SETTING_CACHE_NONE = '___none___'
|
||||
SETTING_CACHE_NONE = SETTING_CACHE_NOTSET
|
||||
|
||||
# Cannot store empty list/tuple in memcached; use a special value instead to
|
||||
# Cannot store empty list/tuple in cache; use a special value instead to
|
||||
# indicate an empty list.
|
||||
SETTING_CACHE_EMPTY_LIST = '___[]___'
|
||||
|
||||
# Cannot store empty dict in memcached; use a special value instead to indicate
|
||||
# Cannot store empty dict in cache; use a special value instead to indicate
|
||||
# an empty dict.
|
||||
SETTING_CACHE_EMPTY_DICT = '___{}___'
|
||||
|
||||
@@ -74,10 +74,19 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
except DBError as exc:
|
||||
if trans_safe:
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
level = logger.exception
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
|
||||
@@ -29,9 +29,10 @@ def reg(request):
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
defaults = request.node.get_marker('defined_in_file')
|
||||
if defaults:
|
||||
settings.configure(**defaults.kwargs)
|
||||
for marker in request.node.own_markers:
|
||||
if marker.name == 'defined_in_file':
|
||||
settings.configure(**marker.kwargs)
|
||||
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
cache,
|
||||
registry)
|
||||
|
||||
@@ -41,13 +41,16 @@ def settings(request):
|
||||
cache = LocMemCache(str(uuid4()), {}) # make a new random cache each time
|
||||
settings = LazySettings()
|
||||
registry = SettingsRegistry(settings)
|
||||
defaults = {}
|
||||
|
||||
# @pytest.mark.defined_in_file can be used to mark specific setting values
|
||||
# as "defined in a settings file". This is analogous to manually
|
||||
# specifying a setting on the filesystem (e.g., in a local_settings.py in
|
||||
# development, or in /etc/tower/conf.d/<something>.py)
|
||||
in_file_marker = request.node.get_marker('defined_in_file')
|
||||
defaults = in_file_marker.kwargs if in_file_marker else {}
|
||||
for marker in request.node.own_markers:
|
||||
if marker.name == 'defined_in_file':
|
||||
defaults = marker.kwargs
|
||||
|
||||
defaults['DEFAULTS_SNAPSHOT'] = {}
|
||||
settings.configure(**defaults)
|
||||
settings._wrapped = SettingsWrapper(settings._wrapped,
|
||||
@@ -63,15 +66,6 @@ def test_unregistered_setting(settings):
|
||||
assert settings.cache.get('DEBUG') is None
|
||||
|
||||
|
||||
def test_cached_settings_unicode_is_auto_decoded(settings):
|
||||
# https://github.com/linsomniac/python-memcached/issues/79
|
||||
# https://github.com/linsomniac/python-memcached/blob/288c159720eebcdf667727a859ef341f1e908308/memcache.py#L961
|
||||
|
||||
value = 'Iñtërnâtiônàlizætiøn' # this simulates what python-memcached does on cache.set()
|
||||
settings.cache.set('DEBUG', value)
|
||||
assert settings.cache.get('DEBUG') == 'Iñtërnâtiônàlizætiøn'
|
||||
|
||||
|
||||
def test_read_only_setting(settings):
|
||||
settings.registry.register(
|
||||
'AWX_READ_ONLY',
|
||||
@@ -251,31 +245,6 @@ def test_setting_from_db(settings, mocker):
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'FROM_DB'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('encrypted', (True, False))
|
||||
def test_setting_from_db_with_unicode(settings, mocker, encrypted):
|
||||
settings.registry.register(
|
||||
'AWX_SOME_SETTING',
|
||||
field_class=fields.CharField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default='DEFAULT',
|
||||
encrypted=encrypted
|
||||
)
|
||||
# this simulates a bug in python-memcached; see https://github.com/linsomniac/python-memcached/issues/79
|
||||
value = 'Iñtërnâtiônàlizætiøn'
|
||||
|
||||
setting_from_db = mocker.Mock(id=1, key='AWX_SOME_SETTING', value=value)
|
||||
mocks = mocker.Mock(**{
|
||||
'order_by.return_value': mocker.Mock(**{
|
||||
'__iter__': lambda self: iter([setting_from_db]),
|
||||
'first.return_value': setting_from_db
|
||||
}),
|
||||
})
|
||||
with mocker.patch('awx.conf.models.Setting.objects.filter', return_value=mocks):
|
||||
assert settings.AWX_SOME_SETTING == 'Iñtërnâtiônàlizætiøn'
|
||||
assert settings.cache.get('AWX_SOME_SETTING') == 'Iñtërnâtiônàlizætiøn'
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_SOME_SETTING='DEFAULT')
|
||||
def test_read_only_setting_assignment(settings):
|
||||
"read-only settings cannot be overwritten"
|
||||
|
||||
@@ -10,6 +10,7 @@ import socket
|
||||
from socket import SHUT_RDWR
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.http import Http404
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -130,7 +131,8 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@@ -145,7 +147,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -495,7 +495,7 @@ class NotificationAttachMixin(BaseAccess):
|
||||
# due to this special case, we use symmetrical logic with attach permission
|
||||
return self._can_attach(notification_template=sub_obj, resource_obj=obj)
|
||||
return super(NotificationAttachMixin, self).can_unattach(
|
||||
obj, sub_obj, relationship, relationship, data=data
|
||||
obj, sub_obj, relationship, data=data
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -246,18 +246,6 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_VERBOSITY',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
max_value=5,
|
||||
label=_('Verbosity level for isolated node management tasks'),
|
||||
help_text=_('This can be raised to aid in debugging connection issues for isolated task execution'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
default=0
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_CHECK_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -435,6 +423,19 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_SHOW_PLAYBOOK_LINKS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Follow symlinks'),
|
||||
help_text=_(
|
||||
'Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead '
|
||||
'to infinite recursion if a link points to a parent directory of itself.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_URL',
|
||||
field_class=fields.URLField,
|
||||
@@ -777,16 +778,6 @@ register(
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_AUDIT',
|
||||
field_class=fields.BooleanField,
|
||||
allow_null=True,
|
||||
default=False,
|
||||
label=_('Enabled external log aggregation auditing'),
|
||||
help_text=_('When enabled, all external logs emitted by Tower will also be written to /var/log/tower/external.log. This is an experimental setting intended to be used for debugging external log aggregation issues (and may be subject to change in the future).'), # noqa
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -822,15 +813,6 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'BROKER_DURABILITY',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Message Durability'),
|
||||
help_text=_('When set (the default), underlying queues will be persisted to disk. Disable this to enable higher message bus throughput.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
|
||||
@@ -10,8 +10,7 @@ __all__ = [
|
||||
'ANSI_SGR_PATTERN', 'CAN_CANCEL', 'ACTIVE_STATES', 'STANDARD_INVENTORY_UPDATE_ENV'
|
||||
]
|
||||
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'cloudforms', 'tower')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
||||
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),
|
||||
|
||||
@@ -104,7 +104,7 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
logger.info(f"client '{self.channel_name}' joined the broadcast group.")
|
||||
|
||||
async def disconnect(self, code):
|
||||
logger.info("client '{self.channel_name}' disconnected from the broadcast group.")
|
||||
logger.info(f"client '{self.channel_name}' disconnected from the broadcast group.")
|
||||
await self.channel_layer.group_discard(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
|
||||
async def internal_message(self, event):
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
from urllib.parse import quote, urlencode, urljoin
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
aim_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
@@ -81,21 +76,13 @@ def aim_backend(**kwargs):
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
|
||||
cert = None
|
||||
if client_cert and client_key:
|
||||
cert = (
|
||||
create_temporary_fifo(client_cert.encode()),
|
||||
create_temporary_fifo(client_key.encode())
|
||||
with CertFiles(client_cert, client_key) as cert:
|
||||
res = requests.get(
|
||||
request_url + request_qs,
|
||||
timeout=30,
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
)
|
||||
elif client_cert:
|
||||
cert = create_temporary_fifo(client_cert.encode())
|
||||
|
||||
res = requests.get(
|
||||
request_url + request_qs,
|
||||
timeout=30,
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
)
|
||||
res.raise_for_status()
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
import base64
|
||||
from urllib.parse import urljoin, quote_plus
|
||||
from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
'fields': [{
|
||||
@@ -55,9 +50,9 @@ conjur_inputs = {
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
account = quote_plus(kwargs['account'])
|
||||
username = quote_plus(kwargs['username'])
|
||||
secret_path = quote_plus(kwargs['secret_path'])
|
||||
account = quote(kwargs['account'], safe='')
|
||||
username = quote(kwargs['username'], safe='')
|
||||
secret_path = quote(kwargs['secret_path'], safe='')
|
||||
version = kwargs.get('secret_version')
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
@@ -65,22 +60,20 @@ def conjur_backend(**kwargs):
|
||||
'headers': {'Content-Type': 'text/plain'},
|
||||
'data': api_key
|
||||
}
|
||||
if cacert:
|
||||
auth_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
resp = requests.post(
|
||||
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
|
||||
**auth_kwargs
|
||||
)
|
||||
with CertFiles(cacert) as cert:
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
auth_kwargs['verify'] = cert
|
||||
resp = requests.post(
|
||||
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
|
||||
**auth_kwargs
|
||||
)
|
||||
resp.raise_for_status()
|
||||
token = base64.b64encode(resp.content).decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
}
|
||||
if cacert:
|
||||
lookup_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
|
||||
path = urljoin(url, '/'.join([
|
||||
@@ -92,7 +85,9 @@ def conjur_backend(**kwargs):
|
||||
if version:
|
||||
path = '?'.join([path, version])
|
||||
|
||||
resp = requests.get(path, timeout=30, **lookup_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
lookup_kwargs['verify'] = cert
|
||||
resp = requests.get(path, timeout=30, **lookup_kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp.text
|
||||
|
||||
|
||||
@@ -3,16 +3,11 @@ import os
|
||||
import pathlib
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
import requests
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
base_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
@@ -129,14 +124,13 @@ def approle_auth(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# AppRole Login
|
||||
request_kwargs['json'] = {'role_id': role_id, 'secret_id': secret_id}
|
||||
sess = requests.Session()
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
@@ -152,8 +146,6 @@ def kv_backend(**kwargs):
|
||||
api_version = kwargs['api_version']
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
@@ -180,7 +172,9 @@ def kv_backend(**kwargs):
|
||||
path_segments = [secret_path]
|
||||
|
||||
request_url = urljoin(url, '/'.join(['v1'] + path_segments)).rstrip('/')
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
response.raise_for_status()
|
||||
|
||||
json = response.json()
|
||||
@@ -205,8 +199,6 @@ def ssh_backend(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
request_kwargs['json'] = {'public_key': kwargs['public_key']}
|
||||
if kwargs.get('valid_principals'):
|
||||
@@ -218,7 +210,10 @@ def ssh_backend(**kwargs):
|
||||
sess.headers['X-Vault-Token'] = token
|
||||
# https://www.vaultproject.io/api/secret/ssh/index.html#sign-ssh-key
|
||||
request_url = '/'.join([url, secret_path, 'sign', role]).rstrip('/')
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
|
||||
resp.raise_for_status()
|
||||
return resp.json()['data']['signed_key']
|
||||
|
||||
@@ -1,3 +1,45 @@
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
CredentialPlugin = namedtuple('CredentialPlugin', ['name', 'inputs', 'backend'])
|
||||
|
||||
|
||||
class CertFiles():
|
||||
"""
|
||||
A context manager used for writing a certificate and (optional) key
|
||||
to $TMPDIR, and cleaning up afterwards.
|
||||
|
||||
This is particularly useful as a shared resource for credential plugins
|
||||
that want to pull cert/key data out of the database and persist it
|
||||
temporarily to the file system so that it can loaded into the openssl
|
||||
certificate chain (generally, for HTTPS requests plugins make via the
|
||||
Python requests library)
|
||||
|
||||
with CertFiles(cert_data, key_data) as cert:
|
||||
# cert is string representing a path to the cert or pemfile
|
||||
# temporarily written to disk
|
||||
requests.post(..., cert=cert)
|
||||
"""
|
||||
|
||||
certfile = None
|
||||
|
||||
def __init__(self, cert, key=None):
|
||||
self.cert = cert
|
||||
self.key = key
|
||||
|
||||
def __enter__(self):
|
||||
if not self.cert:
|
||||
return None
|
||||
self.certfile = tempfile.NamedTemporaryFile('wb', delete=False)
|
||||
self.certfile.write(self.cert.encode())
|
||||
if self.key:
|
||||
self.certfile.write(b'\n')
|
||||
self.certfile.write(self.key.encode())
|
||||
self.certfile.flush()
|
||||
return str(self.certfile.name)
|
||||
|
||||
def __exit__(self, *args):
|
||||
if self.certfile and os.path.exists(self.certfile.name):
|
||||
os.remove(self.certfile.name)
|
||||
|
||||
@@ -24,7 +24,7 @@ class RecordedQueryLog(object):
|
||||
try:
|
||||
self.threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
# if we can't reach the cache, just assume profiling's off
|
||||
self.threshold = None
|
||||
|
||||
def append(self, query):
|
||||
@@ -110,7 +110,7 @@ class RecordedQueryLog(object):
|
||||
class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
"""
|
||||
This is a special subclass of Django's postgres DB backend which - based on
|
||||
the value of a special flag in memcached - captures slow queries and
|
||||
the value of a special flag in cache - captures slow queries and
|
||||
writes profile and Python stack metadata to the disk.
|
||||
"""
|
||||
|
||||
@@ -133,19 +133,19 @@ class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
# is the same mechanism used by libraries like the django-debug-toolbar)
|
||||
#
|
||||
# in _this_ implementation, we represent it as a property which will
|
||||
# check memcache for a special flag to be set (when the flag is set, it
|
||||
# check the cache for a special flag to be set (when the flag is set, it
|
||||
# means we should start recording queries because somebody called
|
||||
# `awx-manage profile_sql`)
|
||||
#
|
||||
# it's worth noting that this property is wrapped w/ @memoize because
|
||||
# Django references this attribute _constantly_ (in particular, once
|
||||
# per executed query); doing a memcached.get() _at most_ once per
|
||||
# per executed query); doing a cache.get() _at most_ once per
|
||||
# second is a good enough window to detect when profiling is turned
|
||||
# on/off by a system administrator
|
||||
try:
|
||||
threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
# if we can't reach the cache, just assume profiling's off
|
||||
threshold = None
|
||||
self.queries_log.threshold = threshold
|
||||
return threshold is not None
|
||||
|
||||
@@ -222,7 +222,7 @@ class WorkerPool(object):
|
||||
idx = len(self.workers)
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
# for the DB and cache connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
worker = PoolWorker(self.queue_size, self.target, (idx,) + self.target_args)
|
||||
|
||||
@@ -8,6 +8,7 @@ import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
import time
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
@@ -34,6 +35,7 @@ class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
@@ -116,18 +118,23 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
time_to_sleep = 1
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
time_to_sleep = 1
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
time_to_sleep = min(time_to_sleep * 2, 30)
|
||||
logger.exception(f"encountered an error communicating with redis. Reconnect attempt in {time_to_sleep} seconds")
|
||||
time.sleep(time_to_sleep)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
@@ -58,7 +58,7 @@ class IsolatedManager(object):
|
||||
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
for host in hosts:
|
||||
inventory['all']['hosts'][host] = {
|
||||
"ansible_connection": "kubectl",
|
||||
"ansible_connection": "community.kubernetes.kubectl",
|
||||
"ansible_kubectl_config": path,
|
||||
}
|
||||
else:
|
||||
@@ -74,6 +74,7 @@ class IsolatedManager(object):
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
|
||||
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
|
||||
|
||||
def finished_callback(runner_obj):
|
||||
@@ -109,7 +110,6 @@ class IsolatedManager(object):
|
||||
'cancel_callback': self.canceled_callback,
|
||||
'settings': {
|
||||
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ class Command(BaseCommand):
|
||||
if clear:
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
for line in lines:
|
||||
print(line)
|
||||
clear = True
|
||||
time.sleep(.25)
|
||||
|
||||
@@ -169,7 +169,7 @@ class AnsibleInventoryLoader(object):
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.AWX_ANSIBLE_COLLECTIONS_PATHS]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
@@ -271,7 +271,7 @@ class Command(BaseCommand):
|
||||
logging.DEBUG, 0]))
|
||||
logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
|
||||
def _get_instance_id(self, from_dict, default=''):
|
||||
def _get_instance_id(self, variables, default=''):
|
||||
'''
|
||||
Retrieve the instance ID from the given dict of host variables.
|
||||
|
||||
@@ -279,15 +279,23 @@ class Command(BaseCommand):
|
||||
the lookup will traverse into nested dicts, equivalent to:
|
||||
|
||||
from_dict.get('foo', {}).get('bar', default)
|
||||
|
||||
Multiple ID variables may be specified as 'foo.bar,foobar', so that
|
||||
it will first try to find 'bar' inside of 'foo', and if unable,
|
||||
will try to find 'foobar' as a fallback
|
||||
'''
|
||||
instance_id = default
|
||||
if getattr(self, 'instance_id_var', None):
|
||||
for key in self.instance_id_var.split('.'):
|
||||
if not hasattr(from_dict, 'get'):
|
||||
instance_id = default
|
||||
for single_instance_id in self.instance_id_var.split(','):
|
||||
from_dict = variables
|
||||
for key in single_instance_id.split('.'):
|
||||
if not hasattr(from_dict, 'get'):
|
||||
instance_id = default
|
||||
break
|
||||
instance_id = from_dict.get(key, default)
|
||||
from_dict = instance_id
|
||||
if instance_id:
|
||||
break
|
||||
instance_id = from_dict.get(key, default)
|
||||
from_dict = instance_id
|
||||
return smart_text(instance_id)
|
||||
|
||||
def _get_enabled(self, from_dict, default=None):
|
||||
@@ -422,7 +430,7 @@ class Command(BaseCommand):
|
||||
for mem_host in self.all_group.all_hosts.values():
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if not instance_id:
|
||||
logger.warning('Host "%s" has no "%s" variable',
|
||||
logger.warning('Host "%s" has no "%s" variable(s)',
|
||||
mem_host.name, self.instance_id_var)
|
||||
continue
|
||||
mem_host.instance_id = instance_id
|
||||
@@ -649,11 +657,12 @@ class Command(BaseCommand):
|
||||
if group_name in existing_group_names:
|
||||
continue
|
||||
mem_group = self.all_group.all_groups[group_name]
|
||||
group_desc = mem_group.variables.pop('_awx_description', 'imported')
|
||||
group = self.inventory.groups.update_or_create(
|
||||
name=group_name,
|
||||
defaults={
|
||||
'variables':json.dumps(mem_group.variables),
|
||||
'description':'imported'
|
||||
'description':group_desc
|
||||
}
|
||||
)[0]
|
||||
logger.debug('Group "%s" added', group.name)
|
||||
@@ -776,8 +785,9 @@ class Command(BaseCommand):
|
||||
# Create any new hosts.
|
||||
for mem_host_name in sorted(mem_host_names_to_update):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
host_attrs = dict(variables=json.dumps(mem_host.variables),
|
||||
description='imported')
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
@@ -1078,7 +1088,7 @@ class Command(BaseCommand):
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
# Check if the license is valid.
|
||||
# Check if the license is valid.
|
||||
# If the license is not valid, a CommandError will be thrown,
|
||||
# and inventory update will be marked as invalid.
|
||||
# with transaction.atomic() will roll back the changes.
|
||||
|
||||
@@ -82,7 +82,7 @@ class Command(BaseCommand):
|
||||
OAuth2Application.objects.filter(pk=app.pk).update(client_secret=encrypted)
|
||||
|
||||
def _settings(self):
|
||||
# don't update memcached, the *actual* value isn't changing
|
||||
# don't update the cache, the *actual* value isn't changing
|
||||
post_save.disconnect(on_post_save_setting, sender=Setting)
|
||||
for setting in Setting.objects.filter().order_by('pk'):
|
||||
if settings_registry.is_setting_encrypted(setting.key):
|
||||
|
||||
@@ -16,31 +16,24 @@ class InstanceNotFound(Exception):
|
||||
super(InstanceNotFound, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, controller, instance_percent, inst_min, hostname_list):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.controller = controller
|
||||
self.instance_percent = instance_percent
|
||||
self.instance_min = inst_min
|
||||
self.hostname_list = hostname_list
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=str,
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=str,
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
help='The percentage of active instances that will be assigned to this group'),
|
||||
parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances')
|
||||
|
||||
|
||||
def get_create_update_instance_group(self, queuename, instance_percent, instance_min):
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
changed = False
|
||||
|
||||
(ig, created) = InstanceGroup.objects.get_or_create(name=queuename)
|
||||
if ig.policy_instance_percentage != instance_percent:
|
||||
ig.policy_instance_percentage = instance_percent
|
||||
(ig, created) = InstanceGroup.objects.get_or_create(name=self.queuename)
|
||||
if ig.policy_instance_percentage != self.instance_percent:
|
||||
ig.policy_instance_percentage = self.instance_percent
|
||||
changed = True
|
||||
if ig.policy_instance_minimum != instance_min:
|
||||
ig.policy_instance_minimum = instance_min
|
||||
if ig.policy_instance_minimum != self.instance_min:
|
||||
ig.policy_instance_minimum = self.instance_min
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
@@ -48,12 +41,12 @@ class Command(BaseCommand):
|
||||
|
||||
return (ig, created, changed)
|
||||
|
||||
def update_instance_group_controller(self, ig, controller):
|
||||
def update_instance_group_controller(self, ig):
|
||||
changed = False
|
||||
control_ig = None
|
||||
|
||||
if controller:
|
||||
control_ig = InstanceGroup.objects.filter(name=controller).first()
|
||||
if self.controller:
|
||||
control_ig = InstanceGroup.objects.filter(name=self.controller).first()
|
||||
|
||||
if control_ig and ig.controller_id != control_ig.pk:
|
||||
ig.controller = control_ig
|
||||
@@ -62,10 +55,10 @@ class Command(BaseCommand):
|
||||
|
||||
return (control_ig, changed)
|
||||
|
||||
def add_instances_to_group(self, ig, hostname_list):
|
||||
def add_instances_to_group(self, ig):
|
||||
changed = False
|
||||
|
||||
instance_list_unique = set([x.strip() for x in hostname_list if x])
|
||||
instance_list_unique = set([x.strip() for x in self.hostname_list if x])
|
||||
instances = []
|
||||
for inst_name in instance_list_unique:
|
||||
instance = Instance.objects.filter(hostname=inst_name)
|
||||
@@ -86,43 +79,61 @@ class Command(BaseCommand):
|
||||
|
||||
return (instances, changed)
|
||||
|
||||
def handle(self, **options):
|
||||
instance_not_found_err = None
|
||||
queuename = options.get('queuename')
|
||||
if not queuename:
|
||||
raise CommandError("Specify `--queuename` to use this command.")
|
||||
ctrl = options.get('controller')
|
||||
inst_per = options.get('instance_percent')
|
||||
inst_min = options.get('instance_minimum')
|
||||
hostname_list = []
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
def register(self):
|
||||
with advisory_lock('cluster_policy_lock'):
|
||||
with transaction.atomic():
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
(ig, created, changed1) = self.get_create_update_instance_group()
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if self.controller:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
print("Set controller group {} on {}.".format(self.controller, self.queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
(instances, changed3) = self.add_instances_to_group(ig)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
self.instance_not_found_err = e
|
||||
|
||||
if any([changed1, changed2, changed3]):
|
||||
print('(changed: True)')
|
||||
|
||||
if instance_not_found_err:
|
||||
print(instance_not_found_err.message)
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=str,
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=str,
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
help='The percentage of active instances that will be assigned to this group'),
|
||||
parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances')
|
||||
|
||||
|
||||
def handle(self, **options):
|
||||
queuename = options.get('queuename')
|
||||
if not queuename:
|
||||
raise CommandError("Specify `--queuename` to use this command.")
|
||||
ctrl = options.get('controller')
|
||||
inst_per = options.get('instance_percent')
|
||||
instance_min = options.get('instance_minimum')
|
||||
hostname_list = []
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
rq = RegisterQueue(queuename, ctrl, inst_per, instance_min, hostname_list)
|
||||
rq.register()
|
||||
if rq.instance_not_found_err:
|
||||
print(rq.instance_not_found_err.message)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -44,7 +44,7 @@ class Command(BaseCommand):
|
||||
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and memcached connections (that way lies race conditions)
|
||||
# for the DB and cache connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
|
||||
|
||||
@@ -5,17 +5,19 @@ import asyncio
|
||||
import datetime
|
||||
import re
|
||||
import redis
|
||||
import time
|
||||
from datetime import datetime as dt
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection
|
||||
from django.db.models import Q
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
@@ -91,6 +93,36 @@ class Command(BaseCommand):
|
||||
return host_stats
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
# it's necessary to delay this import in case
|
||||
# database migrations are still running
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
registered = False
|
||||
|
||||
if not migrating:
|
||||
try:
|
||||
Instance.objects.me()
|
||||
registered = True
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
if migrating or not registered:
|
||||
# In containerized deployments, migrations happen in the task container,
|
||||
# and the services running there don't start until migrations are
|
||||
# finished.
|
||||
# *This* service runs in the web container, and it's possible that it can
|
||||
# start _before_ migrations are finished, thus causing issues with the ORM
|
||||
# queries it makes (specifically, conf.settings queries).
|
||||
# This block is meant to serve as a sort of bail-out for the situation
|
||||
# where migrations aren't yet finished (similar to the migration
|
||||
# detection middleware that the uwsgi processes have) or when instance
|
||||
# registration isn't done yet
|
||||
logger.error('AWX is currently installing/upgrading. Trying again in 5s...')
|
||||
time.sleep(5)
|
||||
return
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
@@ -107,6 +139,7 @@ class Command(BaseCommand):
|
||||
break
|
||||
else:
|
||||
data[family.name] = family.samples[0].value
|
||||
|
||||
me = Instance.objects.me()
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(Q(hostname=me.hostname) | Q(rampart_groups__controller__isnull=False))]
|
||||
|
||||
|
||||
@@ -44,20 +44,6 @@ class HostManager(models.Manager):
|
||||
inventory_sources__source='tower'
|
||||
).filter(inventory__organization=org_id).values('name').distinct().count()
|
||||
|
||||
def active_counts_by_org(self):
|
||||
"""Return the counts of active, unique hosts for each organization.
|
||||
Construction of query involves:
|
||||
- remove any ordering specified in model's Meta
|
||||
- Exclude hosts sourced from another Tower
|
||||
- Consider only hosts where the canonical inventory is owned by each organization
|
||||
- Restrict the query to only count distinct names
|
||||
- Return the counts
|
||||
"""
|
||||
return self.order_by().exclude(
|
||||
inventory_sources__source='tower'
|
||||
).values('inventory__organization').annotate(
|
||||
inventory__organization__count=models.Count('name', distinct=True))
|
||||
|
||||
def get_queryset(self):
|
||||
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
|
||||
set. Use the `host_filter` to generate the queryset for the hosts.
|
||||
@@ -149,8 +135,11 @@ class InstanceManager(models.Manager):
|
||||
|
||||
def get_or_register(self):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
return self.register(ip_address=pod_ip)
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
RegisterQueue('tower', None, 100, 0, []).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
|
||||
24
awx/main/migrations/0115_v370_schedule_set_null.py
Normal file
24
awx/main/migrations/0115_v370_schedule_set_null.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-04 02:26
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0114_v370_remove_deprecated_manual_inventory_sources'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='schedule',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.Schedule'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='next_schedule',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplate_as_next_schedule+', to='main.Schedule'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,34 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-19 02:27
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def remove_hipchat_notifications(apps, schema_editor):
|
||||
'''
|
||||
HipChat notifications are no longer in service, remove any that are found.
|
||||
'''
|
||||
Notification = apps.get_model('main', 'Notification')
|
||||
Notification.objects.filter(notification_type='hipchat').delete()
|
||||
NotificationTemplate = apps.get_model('main', 'NotificationTemplate')
|
||||
NotificationTemplate.objects.filter(notification_type='hipchat').delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0115_v370_schedule_set_null'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(remove_hipchat_notifications),
|
||||
migrations.AlterField(
|
||||
model_name='notification',
|
||||
name='notification_type',
|
||||
field=models.CharField(choices=[('email', 'Email'), ('grafana', 'Grafana'), ('irc', 'IRC'), ('mattermost', 'Mattermost'), ('pagerduty', 'Pagerduty'), ('rocketchat', 'Rocket.Chat'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('webhook', 'Webhook')], max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='notification_type',
|
||||
field=models.CharField(choices=[('email', 'Email'), ('grafana', 'Grafana'), ('irc', 'IRC'), ('mattermost', 'Mattermost'), ('pagerduty', 'Pagerduty'), ('rocketchat', 'Rocket.Chat'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('webhook', 'Webhook')], max_length=32),
|
||||
),
|
||||
]
|
||||
29
awx/main/migrations/0117_v400_remove_cloudforms_inventory.py
Normal file
29
awx/main/migrations/0117_v400_remove_cloudforms_inventory.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-01 13:25
|
||||
|
||||
from django.db import migrations, models
|
||||
from awx.main.migrations._inventory_source import create_scm_script_substitute
|
||||
|
||||
|
||||
def convert_cloudforms_to_scm(apps, schema_editor):
|
||||
create_scm_script_substitute(apps, 'cloudforms')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0116_v400_remove_hipchat_notifications'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(convert_cloudforms_to_scm),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
]
|
||||
@@ -1,6 +1,9 @@
|
||||
import logging
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
|
||||
@@ -87,3 +90,44 @@ def back_out_new_instance_id(apps, source, new_id):
|
||||
modified_ct, source
|
||||
))
|
||||
|
||||
|
||||
def create_scm_script_substitute(apps, source):
|
||||
"""Only applies for cloudforms in practice, but written generally.
|
||||
Given a source type, this will replace all inventory sources of that type
|
||||
with SCM inventory sources that source the script from Ansible core
|
||||
"""
|
||||
# the revision in the Ansible 2.9 stable branch this project will start out as
|
||||
# it can still be updated manually later (but staying within 2.9 branch), if desired
|
||||
ansible_rev = '6f83b9aff42331e15c55a171de0a8b001208c18c'
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
Project = apps.get_model('main', 'Project')
|
||||
if not InventorySource.objects.filter(source=source).exists():
|
||||
logger.debug('No sources of type {} to migrate'.format(source))
|
||||
return
|
||||
proj_name = 'Replacement project for {} type sources - {}'.format(source, uuid4())
|
||||
right_now = now()
|
||||
project = Project.objects.create(
|
||||
name=proj_name,
|
||||
created=right_now,
|
||||
modified=right_now,
|
||||
description='Created by migration',
|
||||
polymorphic_ctype=ContentType.objects.get(model='project'),
|
||||
# project-specific fields
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible.git',
|
||||
scm_branch='stable-2.9',
|
||||
scm_revision=ansible_rev
|
||||
)
|
||||
ct = 0
|
||||
for inv_src in InventorySource.objects.filter(source=source).iterator():
|
||||
inv_src.source = 'scm'
|
||||
inv_src.source_project = project
|
||||
inv_src.source_path = 'contrib/inventory/{}.py'.format(source)
|
||||
inv_src.scm_last_revision = ansible_rev
|
||||
inv_src.save(update_fields=['source', 'source_project', 'source_path', 'scm_last_revision'])
|
||||
logger.debug('Changed inventory source {} to scm type'.format(inv_src.pk))
|
||||
ct += 1
|
||||
if ct:
|
||||
logger.info('Changed total of {} inventory sources from {} type to scm'.format(ct, source))
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ from crum import get_current_user
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import encrypt_field, parse_yaml_or_json
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
|
||||
__all__ = ['prevent_search', 'VarsDictProperty', 'BaseModel', 'CreatedModifiedModel',
|
||||
'PasswordFieldsModel', 'PrimordialModel', 'CommonModel',
|
||||
@@ -50,7 +51,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'rhv', 'custom', 'satellite6', 'cloudforms', 'scm', 'tower',]
|
||||
CLOUD_INVENTORY_SOURCES = list(CLOUD_PROVIDERS) + ['scm', 'custom']
|
||||
|
||||
VERBOSITY_CHOICES = [
|
||||
(0, '0 (Normal)'),
|
||||
|
||||
@@ -7,7 +7,7 @@ from collections import defaultdict
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.timezone import utc, now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
@@ -338,7 +338,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
|
||||
if isinstance(self, JobEvent):
|
||||
hostnames = self._hostnames()
|
||||
self._update_host_summary_from_stats(hostnames)
|
||||
self._update_host_summary_from_stats(set(hostnames))
|
||||
if self.job.inventory:
|
||||
try:
|
||||
self.job.inventory.update_computed_fields()
|
||||
@@ -407,11 +407,14 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
host_map = kwargs.pop('host_map', {})
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
workflow_job_id = kwargs.pop('workflow_job_id', None)
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
setattr(event, 'host_map', host_map)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@@ -484,29 +487,47 @@ class JobEvent(BasePlaybookEvent):
|
||||
if not self.job or not self.job.inventory:
|
||||
logger.info('Event {} missing job or inventory, host summaries not updated'.format(self.pk))
|
||||
return
|
||||
qs = self.job.inventory.hosts.filter(name__in=hostnames)
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
all_hosts = Host.objects.filter(
|
||||
pk__in=self.host_map.values()
|
||||
).only('id')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
for host in hostnames:
|
||||
host_id = self.host_map.get(host, None)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
if qs.filter(name=host).exists():
|
||||
host_actual = qs.get(name=host)
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
|
||||
else:
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
JobHostSummary.objects.bulk_create(summaries.values())
|
||||
|
||||
# update the last_job_id and last_job_host_summary_id
|
||||
# in single queries
|
||||
host_mapping = dict(
|
||||
(summary['host_id'], summary['id'])
|
||||
for summary in JobHostSummary.objects.filter(job_id=job.id).values('id', 'host_id')
|
||||
)
|
||||
for h in all_hosts:
|
||||
# if the hostname *shows up* in the playbook_on_stats event
|
||||
if h.name in hostnames:
|
||||
h.last_job_id = job.id
|
||||
if h.id in host_mapping:
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
Host.objects.bulk_update(all_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
||||
|
||||
if not created:
|
||||
update_fields = []
|
||||
for stat, value in host_stats.items():
|
||||
if getattr(host_summary, stat) != value:
|
||||
setattr(host_summary, stat, value)
|
||||
update_fields.append(stat)
|
||||
if update_fields:
|
||||
host_summary.save(update_fields=update_fields)
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@@ -4,16 +4,13 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
import yaml
|
||||
import configparser
|
||||
import tempfile
|
||||
from io import StringIO
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -59,7 +56,7 @@ from awx.main.models.notifications import (
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates, region_sorting, get_licenser
|
||||
from awx.main.utils import _inventory_updates, region_sorting
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
|
||||
@@ -828,7 +825,6 @@ class InventorySourceOptions(BaseModel):
|
||||
('azure_rm', _('Microsoft Azure Resource Manager')),
|
||||
('vmware', _('VMware vCenter')),
|
||||
('satellite6', _('Red Hat Satellite 6')),
|
||||
('cloudforms', _('Red Hat CloudForms')),
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
@@ -1068,11 +1064,6 @@ class InventorySourceOptions(BaseModel):
|
||||
"""Red Hat Satellite 6 region choices (not implemented)"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_cloudforms_region_choices(self):
|
||||
"""Red Hat CloudForms region choices (not implemented)"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_rhv_region_choices(self):
|
||||
"""No region supprt"""
|
||||
@@ -1601,19 +1592,12 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
# TODO: move to awx/main/models/inventory/injectors.py
|
||||
class PluginFileInjector(object):
|
||||
# if plugin_name is not given, no inventory plugin functionality exists
|
||||
plugin_name = None # Ansible core name used to reference plugin
|
||||
# if initial_version is None, but we have plugin name, injection logic exists,
|
||||
# but it is vaporware, meaning we do not use it for some reason in Ansible core
|
||||
initial_version = None # at what version do we switch to the plugin
|
||||
ini_env_reference = None # env var name that points to old ini config file
|
||||
# base injector should be one of None, "managed", or "template"
|
||||
# this dictates which logic to borrow from playbook injectors
|
||||
base_injector = None
|
||||
# every source should have collection, but these are set here
|
||||
# so that a source without a collection will have null values
|
||||
# every source should have collection, these are for the collection name
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
@@ -1629,12 +1613,6 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
return '{0}.yml'.format(self.plugin_name)
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
"""Name of the script located in awx/plugins/inventory
|
||||
"""
|
||||
return '{0}.py'.format(self.__class__.__name__)
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
"""Default implementation of inventory plugin file contents.
|
||||
There are some valid cases when all parameters can be obtained from
|
||||
@@ -1643,10 +1621,7 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
if self.plugin_name is None:
|
||||
raise NotImplementedError('At minimum the plugin name is needed for inventory plugin use.')
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
proper_name = self.plugin_name
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return {'plugin': proper_name}
|
||||
|
||||
def inventory_contents(self, inventory_update, private_data_dir):
|
||||
@@ -1658,17 +1633,8 @@ class PluginFileInjector(object):
|
||||
width=1000
|
||||
)
|
||||
|
||||
def should_use_plugin(self):
|
||||
return bool(
|
||||
self.plugin_name and self.initial_version and
|
||||
Version(self.ansible_version) >= Version(self.initial_version)
|
||||
)
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
if self.should_use_plugin():
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
else:
|
||||
injector_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
@@ -1676,7 +1642,6 @@ class PluginFileInjector(object):
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
"""By default, we will apply the standard managed_by_tower injectors
|
||||
for the script injection
|
||||
"""
|
||||
injected_env = {}
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
@@ -1703,52 +1668,18 @@ class PluginFileInjector(object):
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.INVENTORY_COLLECTIONS_ROOT
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
return env
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
|
||||
# Put in env var reference to private ini data files, if relevant
|
||||
if self.ini_env_reference:
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
injected_env[self.ini_env_reference] = cred_data[credential]
|
||||
|
||||
return injected_env
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
if self.should_use_plugin():
|
||||
return self.build_plugin_private_data(inventory_update, private_data_dir)
|
||||
else:
|
||||
return self.build_script_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
return self.build_plugin_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def dump_cp(cp, credential):
|
||||
"""Dump config parser data and return it as a string.
|
||||
Helper method intended for use by build_script_private_data
|
||||
"""
|
||||
if cp.sections():
|
||||
f = StringIO()
|
||||
cp.write(f)
|
||||
private_data = {'credentials': {}}
|
||||
private_data['credentials'][credential] = f.getvalue()
|
||||
return private_data
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class azure_rm(PluginFileInjector):
|
||||
plugin_name = 'azure_rm'
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars, host names
|
||||
ini_env_reference = 'AZURE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'azure'
|
||||
collection = 'azcollection'
|
||||
@@ -1859,32 +1790,9 @@ class azure_rm(PluginFileInjector):
|
||||
ret['exclude_host_filters'].append("location not in {}".format(repr(python_regions)))
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
section = 'azure'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'include_powerstate', 'yes')
|
||||
cp.set(section, 'group_by_resource_group', 'yes')
|
||||
cp.set(section, 'group_by_location', 'yes')
|
||||
cp.set(section, 'group_by_tag', 'yes')
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
cp.set(
|
||||
section, 'locations',
|
||||
','.join([x.strip() for x in inventory_update.source_regions.split(',')])
|
||||
)
|
||||
|
||||
azure_rm_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for k, v in azure_rm_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class ec2(PluginFileInjector):
|
||||
plugin_name = 'aws_ec2'
|
||||
# blocked by https://github.com/ansible/ansible/issues/54059
|
||||
initial_version = '2.9' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
ini_env_reference = 'EC2_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'amazon'
|
||||
collection = 'aws'
|
||||
@@ -2107,46 +2015,9 @@ class ec2(PluginFileInjector):
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
# Build custom ec2.ini for ec2 inventory script to use.
|
||||
section = 'ec2'
|
||||
cp.add_section(section)
|
||||
ec2_opts = dict(inventory_update.source_vars_dict.items())
|
||||
regions = inventory_update.source_regions or 'all'
|
||||
regions = ','.join([x.strip() for x in regions.split(',')])
|
||||
regions_blacklist = ','.join(settings.EC2_REGIONS_BLACKLIST)
|
||||
ec2_opts['regions'] = regions
|
||||
ec2_opts.setdefault('regions_exclude', regions_blacklist)
|
||||
ec2_opts.setdefault('destination_variable', 'public_dns_name')
|
||||
ec2_opts.setdefault('vpc_destination_variable', 'ip_address')
|
||||
ec2_opts.setdefault('route53', 'False')
|
||||
ec2_opts.setdefault('all_instances', 'True')
|
||||
ec2_opts.setdefault('all_rds_instances', 'False')
|
||||
ec2_opts.setdefault('include_rds_clusters', 'False')
|
||||
ec2_opts.setdefault('rds', 'False')
|
||||
ec2_opts.setdefault('nested_groups', 'True')
|
||||
ec2_opts.setdefault('elasticache', 'False')
|
||||
ec2_opts.setdefault('stack_filters', 'False')
|
||||
if inventory_update.instance_filters:
|
||||
ec2_opts.setdefault('instance_filters', inventory_update.instance_filters)
|
||||
group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()]
|
||||
for choice in inventory_update.get_ec2_group_by_choices():
|
||||
value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id'))
|
||||
ec2_opts.setdefault('group_by_%s' % choice[0], str(value))
|
||||
if 'cache_path' not in ec2_opts:
|
||||
cache_path = tempfile.mkdtemp(prefix='ec2_cache', dir=private_data_dir)
|
||||
ec2_opts['cache_path'] = cache_path
|
||||
ec2_opts.setdefault('cache_max_age', '300')
|
||||
for k, v in ec2_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class gce(PluginFileInjector):
|
||||
plugin_name = 'gcp_compute'
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
ini_env_reference = 'GCE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'google'
|
||||
collection = 'cloud'
|
||||
@@ -2157,17 +2028,6 @@ class gce(PluginFileInjector):
|
||||
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
|
||||
return ret
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(gce, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
cred = inventory_update.get_cloud_credential()
|
||||
# these environment keys are unique to the script operation, and are not
|
||||
# concepts in the modern inventory plugin or gce Ansible module
|
||||
# email and project are redundant with the creds file
|
||||
env['GCE_EMAIL'] = cred.get_input('username', default='')
|
||||
env['GCE_PROJECT'] = cred.get_input('project', default='')
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
|
||||
return env
|
||||
|
||||
def _compat_compose_vars(self):
|
||||
# missing: gce_image, gce_uuid
|
||||
# https://github.com/ansible/ansible/issues/51884
|
||||
@@ -2240,28 +2100,13 @@ class gce(PluginFileInjector):
|
||||
ret['zones'] = inventory_update.source_regions.split(',')
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
# by default, the GCE inventory source caches results on disk for
|
||||
# 5 minutes; disable this behavior
|
||||
cp.add_section('cache')
|
||||
cp.set('cache', 'cache_max_age', '0')
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class vmware(PluginFileInjector):
|
||||
plugin_name = 'vmware_vm_inventory'
|
||||
initial_version = '2.9'
|
||||
ini_env_reference = 'VMWARE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'community'
|
||||
collection = 'vmware'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'vmware_inventory.py' # exception
|
||||
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(vmware, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['strict'] = False
|
||||
@@ -2274,7 +2119,7 @@ class vmware(PluginFileInjector):
|
||||
"customValue", # optional
|
||||
"datastore",
|
||||
"effectiveRole",
|
||||
"guestHeartbeatStatus", # optonal
|
||||
"guestHeartbeatStatus", # optional
|
||||
"layout", # optional
|
||||
"layoutEx", # optional
|
||||
"name",
|
||||
@@ -2286,7 +2131,6 @@ class vmware(PluginFileInjector):
|
||||
"resourcePool",
|
||||
"rootSnapshot",
|
||||
"snapshot", # optional
|
||||
"tag",
|
||||
"triggeredAlarmState",
|
||||
"value"
|
||||
]
|
||||
@@ -2355,7 +2199,7 @@ class vmware(PluginFileInjector):
|
||||
})
|
||||
else:
|
||||
# default groups from script
|
||||
for entry in ('guest.guestId', '"templates" if config.template else "guests"'):
|
||||
for entry in ('config.guestId', '"templates" if config.template else "guests"'):
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': entry
|
||||
@@ -2363,57 +2207,16 @@ class vmware(PluginFileInjector):
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
# Allow custom options to vmware inventory script.
|
||||
section = 'vmware'
|
||||
cp.add_section(section)
|
||||
cp.set('vmware', 'cache_max_age', '0')
|
||||
cp.set('vmware', 'validate_certs', str(settings.VMWARE_VALIDATE_CERTS))
|
||||
cp.set('vmware', 'username', credential.get_input('username', default=''))
|
||||
cp.set('vmware', 'password', credential.get_input('password', default=''))
|
||||
cp.set('vmware', 'server', credential.get_input('host', default=''))
|
||||
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
for k, v in vmware_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class openstack(PluginFileInjector):
|
||||
ini_env_reference = 'OS_CLIENT_CONFIG_FILE'
|
||||
plugin_name = 'openstack'
|
||||
# minimum version of 2.7.8 may be theoretically possible
|
||||
initial_version = '2.8' # Driven by consistency with other sources
|
||||
namespace = 'openstack'
|
||||
collection = 'cloud'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'openstack_inventory.py' # exception
|
||||
|
||||
def _get_clouds_dict(self, inventory_update, cred, private_data_dir, mk_cache=True):
|
||||
def _get_clouds_dict(self, inventory_update, cred, private_data_dir):
|
||||
openstack_data = _openstack_data(cred)
|
||||
|
||||
openstack_data['clouds']['devstack']['private'] = inventory_update.source_vars_dict.get('private', True)
|
||||
if mk_cache:
|
||||
# Retrieve cache path from inventory update vars if available,
|
||||
# otherwise create a temporary cache path only for this update.
|
||||
cache = inventory_update.source_vars_dict.get('cache', {})
|
||||
if not isinstance(cache, dict):
|
||||
cache = {}
|
||||
if not cache.get('path', ''):
|
||||
cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir)
|
||||
cache['path'] = cache_path
|
||||
openstack_data['cache'] = cache
|
||||
ansible_variables = {
|
||||
'use_hostnames': True,
|
||||
'expand_hostvars': False,
|
||||
@@ -2430,27 +2233,16 @@ class openstack(PluginFileInjector):
|
||||
openstack_data['ansible'] = ansible_variables
|
||||
return openstack_data
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir, mk_cache=True):
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
private_data = {'credentials': {}}
|
||||
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir, mk_cache=mk_cache)
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir)
|
||||
private_data['credentials'][credential] = yaml.safe_dump(
|
||||
openstack_data, default_flow_style=False, allow_unicode=True
|
||||
)
|
||||
return private_data
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
# Credentials can be passed in the same way as the script did
|
||||
# but do not create the tmp cache file
|
||||
return self.build_script_private_data(inventory_update, private_data_dir, mk_cache=False)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
script_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(script_env)
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
def use_host_name_for_name(a_bool_maybe):
|
||||
if not isinstance(a_bool_maybe, bool):
|
||||
@@ -2485,84 +2277,48 @@ class openstack(PluginFileInjector):
|
||||
ret['inventory_hostname'] = use_host_name_for_name(source_vars['use_hostnames'])
|
||||
return ret
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cred_data[credential]
|
||||
return env
|
||||
|
||||
|
||||
class rhv(PluginFileInjector):
|
||||
"""ovirt uses the custom credential templating, and that is all
|
||||
"""
|
||||
plugin_name = 'ovirt'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.9'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt_collection'
|
||||
collection = 'ovirt'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'ovirt4.py' # exception
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(rhv, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['ovirt_insecure'] = False # Default changed from script
|
||||
# TODO: process strict option upstream
|
||||
ret['compose'] = {
|
||||
'ansible_host': '(devices.values() | list)[0][0] if devices else None'
|
||||
}
|
||||
ret['keyed_groups'] = []
|
||||
for key in ('cluster', 'status'):
|
||||
ret['keyed_groups'].append({'prefix': key, 'separator': '_', 'key': key})
|
||||
ret['keyed_groups'].append({'prefix': 'tag', 'separator': '_', 'key': 'tags'})
|
||||
ret['ovirt_hostname_preference'] = ['name', 'fqdn']
|
||||
source_vars = inventory_update.source_vars_dict
|
||||
for key, value in source_vars.items():
|
||||
if key == 'plugin':
|
||||
continue
|
||||
ret[key] = value
|
||||
return ret
|
||||
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
ini_env_reference = 'FOREMAN_INI_PATH'
|
||||
initial_version = '2.9'
|
||||
# No base injector, because this does not work in playbooks. Bug??
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'foreman.py' # exception
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'foreman'
|
||||
cp.add_section(section)
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'user', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'path', '/tmp')
|
||||
cp.set(section, 'max_age', '0')
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# this assumes that this is merged
|
||||
# https://github.com/ansible/ansible/pull/52693
|
||||
@@ -2576,95 +2332,119 @@ class satellite6(PluginFileInjector):
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['validate_certs'] = False
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = False
|
||||
want_ansible_ssh_host = False
|
||||
want_facts = True
|
||||
|
||||
foreman_opts = inventory_update.source_vars_dict.copy()
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
# add backwards support for ssl_verify
|
||||
# plugin uses new option, validate_certs, instead
|
||||
elif k == 'ssl_verify' and isinstance(v, bool):
|
||||
ret['validate_certs'] = v
|
||||
else:
|
||||
ret[k] = str(v)
|
||||
|
||||
# Compatibility content
|
||||
group_by_hostvar = {
|
||||
"environment": {"prefix": "foreman_environment_",
|
||||
"environment": {"prefix": "{}environment_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | "
|
||||
"regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')"}, # NOQA: W605
|
||||
"location": {"prefix": "foreman_location_",
|
||||
"regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')"},
|
||||
"location": {"prefix": "{}location_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"organization": {"prefix": "foreman_organization_",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"organization": {"prefix": "{}organization_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "foreman_lifecycle_environment_",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "{}lifecycle_environment_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['lifecycle_environment_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"content_view": {"prefix": "foreman_content_view_",
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"content_view": {"prefix": "{}content_view_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"}
|
||||
}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
ret['legacy_hostvars'] = True
|
||||
ret['want_facts'] = True
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"}
|
||||
}
|
||||
|
||||
ret['legacy_hostvars'] = True # convert hostvar structure to the form used by the script
|
||||
ret['want_params'] = True
|
||||
ret['group_prefix'] = group_prefix
|
||||
ret['want_hostcollections'] = want_hostcollections
|
||||
ret['want_facts'] = want_facts
|
||||
|
||||
if want_ansible_ssh_host:
|
||||
ret['compose'] = {'ansible_ssh_host': "foreman['ip6'] | default(foreman['ip'], true)"}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
|
||||
def form_keyed_group(group_pattern):
|
||||
"""
|
||||
Converts foreman group_pattern to
|
||||
inventory plugin keyed_group
|
||||
|
||||
e.g. {app_param}-{tier_param}-{dc_param}
|
||||
becomes
|
||||
"%s-%s-%s" | format(app_param, tier_param, dc_param)
|
||||
"""
|
||||
if type(group_pattern) is not str:
|
||||
return None
|
||||
params = re.findall('{[^}]*}', group_pattern)
|
||||
if len(params) == 0:
|
||||
return None
|
||||
|
||||
param_names = []
|
||||
for p in params:
|
||||
param_names.append(p[1:-1].strip()) # strip braces and space
|
||||
|
||||
# form keyed_group key by
|
||||
# replacing curly braces with '%s'
|
||||
# (for use with jinja's format filter)
|
||||
key = group_pattern
|
||||
for p in params:
|
||||
key = key.replace(p, '%s', 1)
|
||||
|
||||
# apply jinja filter to key
|
||||
key = '"{}" | format({})'.format(key, ', '.join(param_names))
|
||||
|
||||
keyed_group = {'key': key,
|
||||
'separator': ''}
|
||||
return keyed_group
|
||||
|
||||
try:
|
||||
group_patterns = json.loads(group_patterns)
|
||||
|
||||
if type(group_patterns) is list:
|
||||
for group_pattern in group_patterns:
|
||||
keyed_group = form_keyed_group(group_pattern)
|
||||
if keyed_group:
|
||||
ret['keyed_groups'].append(keyed_group)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning('Could not parse group_patterns. Expected JSON-formatted string, found: {}'
|
||||
.format(group_patterns))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class cloudforms(PluginFileInjector):
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
ini_env_reference = 'CLOUDFORMS_INI_PATH'
|
||||
# Also no base_injector because this does not work in playbooks
|
||||
# namespace = '' # does not have a collection
|
||||
# collection = ''
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'cloudforms'
|
||||
cp.add_section(section)
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'username', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix', 'prefer_ipv4']:
|
||||
if opt in cloudforms_opts:
|
||||
cp.set(section, opt, str(cloudforms_opts[opt]))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'max_age', "0")
|
||||
cache_path = tempfile.mkdtemp(
|
||||
prefix='cloudforms_cache',
|
||||
dir=private_data_dir
|
||||
)
|
||||
cp.set(section, 'path', cache_path)
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.8' # Driven by "include_metadata" hostvars
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(tower, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate().get('license_type', 'unlicensed')
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(tower, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# Credentials injected as env vars, same as script
|
||||
|
||||
@@ -439,13 +439,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
field = self._meta.get_field(field_name)
|
||||
if isinstance(field, models.ManyToManyField):
|
||||
old_value = set(old_value.all())
|
||||
if getattr(self, '_deprecated_credential_launch', False):
|
||||
# TODO: remove this code branch when support for `extra_credentials` goes away
|
||||
new_value = set(kwargs[field_name])
|
||||
else:
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
|
||||
if new_value == old_value:
|
||||
# no-op case: Fields the same as template's value
|
||||
@@ -1133,20 +1129,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
self.failed = bool(self.dark or self.failures)
|
||||
update_fields.append('failed')
|
||||
super(JobHostSummary, self).save(*args, **kwargs)
|
||||
self.update_host_last_job_summary()
|
||||
|
||||
def update_host_last_job_summary(self):
|
||||
update_fields = []
|
||||
if self.host is None:
|
||||
return
|
||||
if self.host.last_job_id != self.job_id:
|
||||
self.host.last_job_id = self.job_id
|
||||
update_fields.append('last_job_id')
|
||||
if self.host.last_job_host_summary_id != self.id:
|
||||
self.host.last_job_host_summary_id = self.id
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
|
||||
@@ -566,7 +566,6 @@ class WebhookMixin(models.Model):
|
||||
|
||||
def update_webhook_status(self, status):
|
||||
if not self.webhook_credential:
|
||||
logger.debug("No credential configured to post back webhook status, skipping.")
|
||||
return
|
||||
|
||||
status_api = self.extra_vars_dict.get('tower_webhook_status_api')
|
||||
|
||||
@@ -23,7 +23,6 @@ from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
from awx.main.notifications.twilio_backend import TwilioBackend
|
||||
from awx.main.notifications.pagerduty_backend import PagerDutyBackend
|
||||
from awx.main.notifications.hipchat_backend import HipChatBackend
|
||||
from awx.main.notifications.webhook_backend import WebhookBackend
|
||||
from awx.main.notifications.mattermost_backend import MattermostBackend
|
||||
from awx.main.notifications.grafana_backend import GrafanaBackend
|
||||
@@ -44,7 +43,6 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
('twilio', _('Twilio'), TwilioBackend),
|
||||
('pagerduty', _('Pagerduty'), PagerDutyBackend),
|
||||
('grafana', _('Grafana'), GrafanaBackend),
|
||||
('hipchat', _('HipChat'), HipChatBackend),
|
||||
('webhook', _('Webhook'), WebhookBackend),
|
||||
('mattermost', _('Mattermost'), MattermostBackend),
|
||||
('rocketchat', _('Rocket.Chat'), RocketChatBackend),
|
||||
|
||||
@@ -199,7 +199,7 @@ class ProjectOptions(models.Model):
|
||||
results = []
|
||||
project_path = self.get_project_path()
|
||||
if project_path:
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=True):
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=settings.AWX_SHOW_PLAYBOOK_LINKS):
|
||||
if skip_directory(dirpath):
|
||||
continue
|
||||
for filename in filenames:
|
||||
|
||||
@@ -150,7 +150,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
default=None,
|
||||
editable=False,
|
||||
related_name='%(class)s_as_next_schedule+',
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
status = models.CharField(
|
||||
max_length=32,
|
||||
@@ -413,9 +413,8 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
if 'extra_vars' in validated_kwargs:
|
||||
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
||||
|
||||
if not getattr(self, '_deprecated_credential_launch', False):
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
|
||||
# manually issue the create activity stream entry _after_ M2M relations
|
||||
# have been associated to the UJ
|
||||
@@ -587,7 +586,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
dependent_jobs = models.ManyToManyField(
|
||||
'self',
|
||||
|
||||
@@ -13,6 +13,19 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
|
||||
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.grafana_backend')
|
||||
|
||||
@@ -25,31 +38,13 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {
|
||||
"started": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"success": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"error": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"workflow_approval": {
|
||||
"running": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG, "body": None
|
||||
},
|
||||
"approved": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG, "body": None
|
||||
},
|
||||
"timed_out": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None
|
||||
},
|
||||
"denied": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG, "body": None
|
||||
}
|
||||
}
|
||||
}
|
||||
default_messages = {"started": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"success": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"error": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG,"body": DEFAULT_APPROVAL_APPROVED_BODY},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY}}}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.hipchat_backend')
|
||||
|
||||
|
||||
class HipChatBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"token": {"label": "Token", "type": "password"},
|
||||
"rooms": {"label": "Destination Rooms", "type": "list"},
|
||||
"color": {"label": "Notification Color", "type": "string"},
|
||||
"api_url": {"label": "API Url (e.g: https://mycompany.hipchat.com)", "type": "string"},
|
||||
"notify": {"label": "Notify room", "type": "bool"},
|
||||
"message_from": {"label": "Label to be shown with notification", "type": "string"}}
|
||||
recipient_parameter = "rooms"
|
||||
sender_parameter = "message_from"
|
||||
|
||||
def __init__(self, token, color, api_url, notify, fail_silently=False, **kwargs):
|
||||
super(HipChatBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.token = token
|
||||
if color is not None:
|
||||
self.color = color.lower()
|
||||
self.api_url = api_url
|
||||
self.notify = notify
|
||||
|
||||
def send_messages(self, messages):
|
||||
sent_messages = 0
|
||||
|
||||
for m in messages:
|
||||
for rcp in m.recipients():
|
||||
r = requests.post("{}/v2/room/{}/notification".format(self.api_url, rcp),
|
||||
params={"auth_token": self.token},
|
||||
verify=False,
|
||||
json={"color": self.color,
|
||||
"message": m.subject,
|
||||
"notify": self.notify,
|
||||
"from": m.from_email,
|
||||
"message_format": "text"})
|
||||
if r.status_code != 204:
|
||||
logger.error(smart_text(_("Error sending messages: {}").format(r.text)))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error sending message to hipchat: {}").format(r.text)))
|
||||
sent_messages += 1
|
||||
return sent_messages
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
import logging
|
||||
import requests
|
||||
import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -45,7 +44,7 @@ class MattermostBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
payload['text'] = m.subject
|
||||
|
||||
r = requests.post("{}".format(m.recipients()[0]),
|
||||
data=json.dumps(payload), verify=(not self.mattermost_no_verify_ssl))
|
||||
json=payload, verify=(not self.mattermost_no_verify_ssl))
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_text(_("Error sending notification mattermost: {}").format(r.text)))
|
||||
if not self.fail_silently:
|
||||
|
||||
@@ -11,9 +11,20 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_BODY = CustomNotificationBase.DEFAULT_BODY
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
|
||||
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.pagerduty_backend')
|
||||
|
||||
|
||||
@@ -30,10 +41,10 @@ class PagerDutyBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
default_messages = {"started": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"success": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"error": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"approved": {"message": DEFAULT_MSG,"body": DEFAULT_BODY},
|
||||
"timed_out": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"denied": {"message": DEFAULT_MSG, "body": DEFAULT_BODY}}}
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG,"body": DEFAULT_APPROVAL_APPROVED_BODY},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY}}}
|
||||
|
||||
def __init__(self, subdomain, token, fail_silently=False, **kwargs):
|
||||
super(PagerDutyBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
@@ -1,14 +1,37 @@
|
||||
import redis
|
||||
import logging
|
||||
|
||||
from django.conf.urls import url
|
||||
from django.conf import settings
|
||||
|
||||
from channels.auth import AuthMiddlewareStack
|
||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||
|
||||
from . import consumers
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.routing')
|
||||
|
||||
|
||||
class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
for k in r.scan_iter('asgi:*', 500):
|
||||
logger.debug(f"cleaning up Redis key {k}")
|
||||
r.delete(k)
|
||||
except redis.exceptions.RedisError as e:
|
||||
logger.warn("encountered an error communicating with redis.")
|
||||
raise e
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
websocket_urlpatterns = [
|
||||
url(r'websocket/$', consumers.EventConsumer),
|
||||
url(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
]
|
||||
|
||||
application = ProtocolTypeRouter({
|
||||
application = AWXProtocolTypeRouter({
|
||||
'websocket': AuthMiddlewareStack(
|
||||
URLRouter(websocket_urlpatterns)
|
||||
),
|
||||
|
||||
@@ -152,8 +152,8 @@ class SimpleDAG(object):
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
for label_obj in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_children_by_label(this_ord, label_obj))
|
||||
return nodes
|
||||
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
@@ -168,8 +168,8 @@ class SimpleDAG(object):
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
for label_obj in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_parents_by_label(this_ord, label_obj))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
|
||||
@@ -10,7 +10,7 @@ import random
|
||||
|
||||
# Django
|
||||
from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
# AWX
|
||||
@@ -114,7 +114,7 @@ class TaskManager():
|
||||
logger.info('Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||
job.job_explanation = _(
|
||||
job.job_explanation = gettext_noop(
|
||||
"Workflow Job spawned from workflow could not start because it "
|
||||
"would result in recursion (spawn order, most recent first: {})"
|
||||
).format(', '.join(['<{}>'.format(tmp) for tmp in display_list]))
|
||||
@@ -123,8 +123,8 @@ class TaskManager():
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
if not job._resources_sufficient_for_launch():
|
||||
can_start = False
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
job.job_explanation = gettext_noop("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
if can_start:
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
@@ -132,8 +132,8 @@ class TaskManager():
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
job.job_explanation = gettext_noop("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
@@ -173,7 +173,7 @@ class TaskManager():
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(reason)
|
||||
workflow_job.job_explanation = _("No error handling paths found, marking workflow as failed")
|
||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
@@ -581,3 +581,4 @@ class TaskManager():
|
||||
logger.debug("Starting Scheduler")
|
||||
with task_manager_bulk_reschedule():
|
||||
self._schedule()
|
||||
logger.debug("Finishing Scheduler")
|
||||
|
||||
@@ -150,9 +150,9 @@ def rbac_activity_stream(instance, sender, **kwargs):
|
||||
|
||||
|
||||
def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
||||
for l in instance.labels.all():
|
||||
if l.is_candidate_for_detach():
|
||||
l.delete()
|
||||
for label in instance.labels.all():
|
||||
if label.is_candidate_for_detach():
|
||||
label.delete()
|
||||
|
||||
|
||||
def save_related_job_templates(sender, instance, **kwargs):
|
||||
@@ -393,7 +393,7 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
'{} ({})'.format(c.name, c.id)
|
||||
for c in instance.credentials.iterator()
|
||||
]
|
||||
changes['labels'] = [l.name for l in instance.labels.iterator()]
|
||||
changes['labels'] = [label.name for label in instance.labels.iterator()]
|
||||
if 'extra_vars' in changes:
|
||||
changes['extra_vars'] = instance.display_extra_vars()
|
||||
if type(instance) == OAuth2AccessToken:
|
||||
|
||||
@@ -31,7 +31,7 @@ from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
@@ -50,7 +50,7 @@ import ansible_runner
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
@@ -67,7 +67,7 @@ from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.isolated import manager as isolated_manager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
from awx.main.utils import (update_scm_url,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager,
|
||||
get_awx_version)
|
||||
@@ -141,7 +141,7 @@ def dispatch_startup():
|
||||
# and Tower fall out of use/support, we can probably just _assume_ that
|
||||
# everybody has moved to bigint, and remove this code entirely
|
||||
enforce_bigint_pk_migration()
|
||||
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
@@ -288,7 +288,7 @@ def handle_setting_changes(setting_keys):
|
||||
setting.startswith('LOG_AGGREGATOR')
|
||||
for setting in setting_keys
|
||||
]):
|
||||
connection.on_commit(reconfigure_rsyslog)
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
@@ -358,6 +358,9 @@ def gather_analytics():
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
||||
logger.debug('Not gathering analytics, configuration is invalid')
|
||||
return
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
@@ -558,7 +561,8 @@ def awx_periodic_scheduler():
|
||||
continue
|
||||
if not can_start:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
|
||||
new_unified_job.job_explanation = gettext_noop("Scheduled job could not start because it \
|
||||
was not in the right state or required manual credentials")
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
@@ -897,21 +901,14 @@ class BaseTask(object):
|
||||
private_data = self.build_private_data(instance, private_data_dir)
|
||||
private_data_files = {'credentials': {}}
|
||||
if private_data is not None:
|
||||
ssh_ver = get_ssh_version()
|
||||
ssh_too_old = True if ssh_ver == "unknown" else Version(ssh_ver) < Version("6.0")
|
||||
openssh_keys_supported = ssh_ver != "unknown" and Version(ssh_ver) >= Version("6.5")
|
||||
for credential, data in private_data.get('credentials', {}).items():
|
||||
# Bail out now if a private key was provided in OpenSSH format
|
||||
# and we're running an earlier version (<6.5).
|
||||
if 'OPENSSH PRIVATE KEY' in data and not openssh_keys_supported:
|
||||
raise RuntimeError(OPENSSH_KEY_ERROR)
|
||||
# OpenSSH formatted keys must have a trailing newline to be
|
||||
# accepted by ssh-add.
|
||||
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
|
||||
data += '\n'
|
||||
# For credentials used with ssh-add, write to a named pipe which
|
||||
# will be read then closed, instead of leaving the SSH key on disk.
|
||||
if credential and credential.credential_type.namespace in ('ssh', 'scm') and not ssh_too_old:
|
||||
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
|
||||
try:
|
||||
os.mkdir(os.path.join(private_data_dir, 'env'))
|
||||
except OSError as e:
|
||||
@@ -1016,8 +1013,6 @@ class BaseTask(object):
|
||||
'resource_profiling_memory_poll_interval': mem_poll_interval,
|
||||
'resource_profiling_pid_poll_interval': pid_poll_interval,
|
||||
'resource_profiling_results_dir': results_dir})
|
||||
else:
|
||||
logger.debug('Resource profiling not enabled for task')
|
||||
|
||||
return resource_profiling_params
|
||||
|
||||
@@ -1222,6 +1217,8 @@ class BaseTask(object):
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
if event_data.get('event') == 'playbook_on_stats':
|
||||
event_data['host_map'] = self.host_map
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
@@ -1232,10 +1229,12 @@ class BaseTask(object):
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
task = event_data.get('event_data', {}).get('task_action')
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
if task in ('git', 'hg', 'svn'):
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
@@ -1421,7 +1420,6 @@ class BaseTask(object):
|
||||
'status_handler': self.status_handler,
|
||||
'settings': {
|
||||
'job_timeout': self.get_instance_timeout(self.instance),
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
**process_isolation_params,
|
||||
**resource_profiling_params,
|
||||
@@ -2169,7 +2167,10 @@ class RunProjectUpdate(BaseTask):
|
||||
scm_branch = project_update.scm_branch
|
||||
branch_override = bool(scm_branch and project_update.scm_branch != project_update.project.scm_branch)
|
||||
if project_update.job_type == 'run' and (not branch_override):
|
||||
scm_branch = project_update.project.scm_revision
|
||||
if project_update.project.scm_revision:
|
||||
scm_branch = project_update.project.scm_revision
|
||||
elif not scm_branch:
|
||||
raise RuntimeError('Could not determine a revision to run from project.')
|
||||
elif not scm_branch:
|
||||
scm_branch = {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
extra_vars.update({
|
||||
@@ -2280,7 +2281,11 @@ class RunProjectUpdate(BaseTask):
|
||||
def acquire_lock(self, instance, blocking=True):
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
||||
instance.save()
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
|
||||
try:
|
||||
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
||||
@@ -2412,7 +2417,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.AWX_ANSIBLE_COLLECTIONS_PATHS]
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
"""
|
||||
@@ -2462,11 +2467,8 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as either script or auto plugin
|
||||
if injector.should_use_plugin():
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
else:
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'script'
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source in ['scm', 'custom']:
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@@ -2554,7 +2556,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
args.append('--exclude-empty-groups')
|
||||
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
|
||||
args.extend(['--instance-id-var',
|
||||
getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper()),])
|
||||
"'{}'".format(getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())),])
|
||||
# Add arguments for the source inventory script
|
||||
args.append('--source')
|
||||
args.append(self.pseudo_build_inventory(inventory_update, private_data_dir))
|
||||
@@ -2582,16 +2584,12 @@ class RunInventoryUpdate(BaseTask):
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
|
||||
if injector is not None:
|
||||
if injector.should_use_plugin():
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
else:
|
||||
# Use the vendored script path
|
||||
inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name)
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
elif src == 'scm':
|
||||
inventory_path = os.path.join(private_data_dir, 'project', inventory_update.source_path)
|
||||
elif src == 'custom':
|
||||
@@ -2615,12 +2613,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
src = inventory_update.source
|
||||
if src == 'scm' and inventory_update.source_project_update:
|
||||
return os.path.join(private_data_dir, 'project')
|
||||
if src in CLOUD_PROVIDERS:
|
||||
injector = None
|
||||
if src in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
if (not injector) or (not injector.should_use_plugin()):
|
||||
return self.get_path_to('..', 'plugins', 'inventory')
|
||||
return private_data_dir
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
|
||||
|
||||
@@ -107,11 +107,6 @@ def workflow_job_template_factory():
|
||||
return create_workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_ssh_version(mocker):
|
||||
return mocker.patch('awx.main.tasks.get_ssh_version', return_value='OpenSSH_6.9p1, LibreSSL 2.1.8')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
|
||||
return job_template_with_survey_passwords_factory(persisted=False)
|
||||
@@ -136,8 +131,8 @@ def mock_cache():
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
# clear Django cache at the end of every test ran
|
||||
# NOTE: this should not be memcache, see test_cache in test_env.py
|
||||
# this is a local test cache, so we want every test to start with empty cache
|
||||
# NOTE: this should not be memcache (as it is deprecated), nor should it be redis.
|
||||
# This is a local test cache, so we want every test to start with an empty cache
|
||||
cache.clear()
|
||||
|
||||
|
||||
|
||||
@@ -1 +1,20 @@
|
||||
plugin: ovirt.ovirt_collection.ovirt
|
||||
base_source_var: value_of_var
|
||||
compose:
|
||||
ansible_host: (devices.values() | list)[0][0] if devices else None
|
||||
groups:
|
||||
dev: '"dev" in tags'
|
||||
keyed_groups:
|
||||
- key: cluster
|
||||
prefix: cluster
|
||||
separator: _
|
||||
- key: status
|
||||
prefix: status
|
||||
separator: _
|
||||
- key: tags
|
||||
prefix: tag
|
||||
separator: _
|
||||
ovirt_hostname_preference:
|
||||
- name
|
||||
- fqdn
|
||||
ovirt_insecure: false
|
||||
plugin: ovirt.ovirt.ovirt
|
||||
|
||||
@@ -1,22 +1,30 @@
|
||||
base_source_var: value_of_var
|
||||
compose:
|
||||
ansible_ssh_host: foreman['ip6'] | default(foreman['ip'], true)
|
||||
group_prefix: foo_group_prefix
|
||||
keyed_groups:
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')
|
||||
prefix: foreman_environment_
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')
|
||||
prefix: foo_group_prefixenvironment_
|
||||
separator: ''
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_location_
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixlocation_
|
||||
separator: ''
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_organization_
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixorganization_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_lifecycle_environment_
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixlifecycle_environment_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_content_view_
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixcontent_view_
|
||||
separator: ''
|
||||
- key: '"%s-%s-%s" | format(app, tier, color)'
|
||||
separator: ''
|
||||
- key: '"%s-%s" | format(app, color)'
|
||||
separator: ''
|
||||
legacy_hostvars: true
|
||||
plugin: theforeman.foreman.foreman
|
||||
validate_certs: false
|
||||
want_facts: true
|
||||
want_hostcollections: true
|
||||
want_params: true
|
||||
|
||||
@@ -43,7 +43,6 @@ properties:
|
||||
- resourcePool
|
||||
- rootSnapshot
|
||||
- snapshot
|
||||
- tag
|
||||
- triggeredAlarmState
|
||||
- value
|
||||
- capability
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_INI_PATH": "{{ file_reference }}",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
[azure]
|
||||
include_powerstate = yes
|
||||
group_by_resource_group = yes
|
||||
group_by_location = yes
|
||||
group_by_tag = yes
|
||||
locations = southcentralus,westus
|
||||
base_source_var = value_of_var
|
||||
use_private_ip = True
|
||||
resource_groups = foo_resources,bar_resources
|
||||
tags = Creator:jmarshall, peanutbutter:jelly
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"CLOUDFORMS_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
<directory>
|
||||
@@ -1,16 +0,0 @@
|
||||
[cloudforms]
|
||||
url = https://foo.invalid
|
||||
username = fooo
|
||||
password = fooo
|
||||
ssl_verify = false
|
||||
version = 2.4
|
||||
purge_actions = maybe
|
||||
clean_group_keys = this_key
|
||||
nest_tags = yes
|
||||
suffix = .ppt
|
||||
prefer_ipv4 = yes
|
||||
|
||||
[cache]
|
||||
max_age = 0
|
||||
path = {{ cache_dir }}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"EC2_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
<directory>
|
||||
@@ -1,34 +0,0 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
iam_role_arn = arn:aws:iam::123456789012:role/test-role
|
||||
hostname_variable = public_dns_name
|
||||
destination_variable = public_dns_name
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
all_rds_instances = False
|
||||
include_rds_clusters = False
|
||||
rds = False
|
||||
nested_groups = True
|
||||
elasticache = False
|
||||
stack_filters = False
|
||||
instance_filters = foobaa
|
||||
group_by_ami_id = False
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_instance_id = False
|
||||
group_by_instance_state = False
|
||||
group_by_platform = False
|
||||
group_by_instance_type = True
|
||||
group_by_key_pair = False
|
||||
group_by_region = True
|
||||
group_by_security_group = False
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = False
|
||||
group_by_vpc_id = False
|
||||
cache_path = {{ cache_dir }}
|
||||
cache_max_age = 300
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GCE_EMAIL": "fooo",
|
||||
"GCE_INI_PATH": "{{ file_reference_0 }}",
|
||||
"GCE_PROJECT": "fooo",
|
||||
"GCE_ZONE": "us-east4-a,us-west1-b",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
"GCP_ENV_TYPE": "tower",
|
||||
"GCP_PROJECT": "fooo",
|
||||
"GCP_SERVICE_ACCOUNT_FILE": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://oauth2.googleapis.com/token"
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
[cache]
|
||||
cache_max_age = 0
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
<directory>
|
||||
@@ -1,17 +0,0 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
cache:
|
||||
path: {{ cache_dir }}
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_domain_name: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"FOREMAN_INI_PATH": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
[foreman]
|
||||
base_source_var = value_of_var
|
||||
ssl_verify = False
|
||||
url = https://foo.invalid
|
||||
user = fooo
|
||||
password = fooo
|
||||
|
||||
[ansible]
|
||||
group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
max_age = 0
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_INVENTORY": "42",
|
||||
"TOWER_LICENSE_TYPE": "open",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False"
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_INI_PATH": "{{ file_reference }}",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_VALIDATE_CERTS": "False"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
[vmware]
|
||||
cache_max_age = 0
|
||||
validate_certs = False
|
||||
username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
alias_pattern = {{ config.foo }}
|
||||
host_filters = {{ config.zoo == "DC0_H0_VM0" }}
|
||||
groupby_patterns = {{ config.asdf }}
|
||||
|
||||
@@ -50,8 +50,6 @@ class TestSwaggerGeneration():
|
||||
data.update(response.accepted_renderer.get_customizations() or {})
|
||||
|
||||
data['host'] = None
|
||||
if not pytest.config.getoption("--genschema"):
|
||||
data['modified'] = datetime.datetime.utcnow().isoformat()
|
||||
data['schemes'] = ['https']
|
||||
data['consumes'] = ['application/json']
|
||||
|
||||
@@ -79,10 +77,14 @@ class TestSwaggerGeneration():
|
||||
data['paths'] = revised_paths
|
||||
self.__class__.JSON = data
|
||||
|
||||
def test_sanity(self, release):
|
||||
def test_sanity(self, release, request):
|
||||
JSON = self.__class__.JSON
|
||||
JSON['info']['version'] = release
|
||||
|
||||
|
||||
if not request.config.getoption('--genschema'):
|
||||
JSON['modified'] = datetime.datetime.utcnow().isoformat()
|
||||
|
||||
# Make some basic assertions about the rendered JSON so we can
|
||||
# be sure it doesn't break across DRF upgrades and view/serializer
|
||||
# changes.
|
||||
@@ -105,9 +107,6 @@ class TestSwaggerGeneration():
|
||||
'get', 'put', 'patch', 'delete'
|
||||
]
|
||||
|
||||
# Test deprecated paths
|
||||
assert paths['/api/v2/jobs/{id}/extra_credentials/']['get']['deprecated'] is True
|
||||
|
||||
@pytest.mark.parametrize('path', [
|
||||
'/api/',
|
||||
'/api/v2/',
|
||||
@@ -118,7 +117,7 @@ class TestSwaggerGeneration():
|
||||
# hit a couple important endpoints so we always have example data
|
||||
get(path, user=admin, expect=200)
|
||||
|
||||
def test_autogen_response_examples(self, swagger_autogen):
|
||||
def test_autogen_response_examples(self, swagger_autogen, request):
|
||||
for pattern, node in TestSwaggerGeneration.JSON['paths'].items():
|
||||
pattern = pattern.replace('{id}', '[0-9]+')
|
||||
pattern = pattern.replace(r'{category_slug}', r'[a-zA-Z0-9\-]+')
|
||||
@@ -141,7 +140,7 @@ class TestSwaggerGeneration():
|
||||
for param in node[method].get('parameters'):
|
||||
if param['in'] == 'body':
|
||||
node[method]['parameters'].remove(param)
|
||||
if pytest.config.getoption("--genschema"):
|
||||
if request.config.getoption("--genschema"):
|
||||
pytest.skip("In schema generator skipping swagger generator", allow_module_level=True)
|
||||
else:
|
||||
node[method].setdefault('parameters', []).append({
|
||||
@@ -177,7 +176,7 @@ class TestSwaggerGeneration():
|
||||
data
|
||||
)
|
||||
data = re.sub(
|
||||
r'"action_node": "awx-[^"]+"',
|
||||
r'"action_node": "[^"]+"',
|
||||
'"action_node": "awx"',
|
||||
data
|
||||
)
|
||||
|
||||
@@ -319,11 +319,11 @@ def create_organization(name, roles=None, persisted=True, **kwargs):
|
||||
users = generate_users(org, teams, False, persisted, users=kwargs.get('users'))
|
||||
|
||||
if 'labels' in kwargs:
|
||||
for l in kwargs['labels']:
|
||||
if type(l) is Label:
|
||||
labels[l.name] = l
|
||||
for label_obj in kwargs['labels']:
|
||||
if type(label_obj) is Label:
|
||||
labels[label_obj.name] = label_obj
|
||||
else:
|
||||
labels[l] = mk_label(l, organization=org, persisted=persisted)
|
||||
labels[label_obj] = mk_label(label_obj, organization=org, persisted=persisted)
|
||||
|
||||
if 'notification_templates' in kwargs:
|
||||
for nt in kwargs['notification_templates']:
|
||||
|
||||
@@ -88,7 +88,7 @@ def test_copy_tables_unified_job_query(
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="unified_jobs")
|
||||
with open(os.path.join(tmpdir, "unified_jobs_table.csv")) as f:
|
||||
lines = "".join([l for l in f])
|
||||
lines = "".join([line for line in f])
|
||||
|
||||
assert project_update_name in lines
|
||||
assert inventory_update_name in lines
|
||||
@@ -139,9 +139,9 @@ def test_copy_tables_workflow_job_node_query(sqlite_copy_expert, workflow_job):
|
||||
reader = csv.reader(f)
|
||||
# Pop the headers
|
||||
next(reader)
|
||||
lines = [l for l in reader]
|
||||
lines = [line for line in reader]
|
||||
|
||||
ids = [int(l[0]) for l in lines]
|
||||
ids = [int(line[0]) for line in lines]
|
||||
|
||||
assert ids == list(
|
||||
workflow_job.workflow_nodes.all().values_list("id", flat=True)
|
||||
|
||||
@@ -60,6 +60,36 @@ def test_credential_validation_error_with_bad_user(post, admin, credentialtype_s
|
||||
assert response.data['user'][0] == 'Incorrect type. Expected pk value, received str.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_validation_error_with_no_owner_field(post, admin, credentialtype_ssh):
|
||||
params = {
|
||||
'credential_type': credentialtype_ssh.id,
|
||||
'inputs': {'username': 'someusername'},
|
||||
'name': 'Some name',
|
||||
}
|
||||
response = post(reverse('api:credential_list'), params, admin)
|
||||
assert response.status_code == 400
|
||||
assert response.data['detail'][0] == "Missing 'user', 'team', or 'organization'."
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_credential_validation_error_with_multiple_owner_fields(post, admin, alice, team, organization, credentialtype_ssh):
|
||||
params = {
|
||||
'credential_type': credentialtype_ssh.id,
|
||||
'inputs': {'username': 'someusername'},
|
||||
'team': team.id,
|
||||
'user': alice.id,
|
||||
'organization': organization.id,
|
||||
'name': 'Some name',
|
||||
}
|
||||
response = post(reverse('api:credential_list'), params, admin)
|
||||
assert response.status_code == 400
|
||||
assert response.data['detail'][0] == (
|
||||
"Only one of 'user', 'team', or 'organization' should be provided, "
|
||||
"received organization, team, user fields."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_user_credential_via_user_credentials_list(post, get, alice, credentialtype_ssh):
|
||||
params = {
|
||||
|
||||
@@ -24,41 +24,6 @@ def job_template(job_template, project, inventory):
|
||||
return job_template
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_filtering(get, job_template, admin,
|
||||
machine_credential, vault_credential, credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
job_template.credentials.add(vault_credential)
|
||||
job_template.credentials.add(credential)
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
resp = get(url, admin, expect=200)
|
||||
assert resp.data['count'] == 1
|
||||
assert resp.data['results'][0]['id'] == credential.pk
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_requires_cloud_or_net(get, post, job_template, admin,
|
||||
machine_credential, vault_credential, credential,
|
||||
net_credential):
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
|
||||
for cred in (machine_credential, vault_credential):
|
||||
resp = post(url, {'associate': True, 'id': cred.pk}, admin, expect=400)
|
||||
assert 'Extra credentials must be network or cloud.' in smart_str(resp.content)
|
||||
|
||||
post(url, {'associate': True, 'id': credential.pk}, admin, expect=204)
|
||||
assert get(url, admin).data['count'] == 1
|
||||
|
||||
post(url, {'associate': True, 'id': net_credential.pk}, admin, expect=204)
|
||||
assert get(url, admin).data['count'] == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_multiple_machine_creds(get, post, job_template, admin, machine_credential):
|
||||
url = reverse(
|
||||
@@ -115,52 +80,6 @@ def test_prevent_multiple_machine_creds_at_launch(get, post, job_template, admin
|
||||
assert 'Cannot assign multiple Machine credentials.' in smart_str(resp.content)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_unique_by_kind(get, post, job_template, admin,
|
||||
credentialtype_aws):
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
|
||||
def _new_cred(name):
|
||||
return {
|
||||
'name': name,
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}
|
||||
|
||||
post(url, _new_cred('First Cred'), admin, expect=201)
|
||||
assert get(url, admin).data['count'] == 1
|
||||
|
||||
resp = post(url, _new_cred('Second Cred'), admin, expect=400)
|
||||
assert 'Cannot assign multiple Amazon Web Services credentials.' in smart_str(resp.content)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_at_launch(get, post, job_template, admin, credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
|
||||
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
|
||||
|
||||
assert len(summary_fields['credentials']) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_modify_extra_credentials_at_launch(get, post, job_template, admin,
|
||||
machine_credential, vault_credential, credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
job_template.credentials.add(vault_credential)
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
|
||||
|
||||
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
|
||||
assert len(summary_fields['credentials']) == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ssh_password_prompted_at_launch(get, post, job_template, admin, machine_credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
@@ -229,25 +148,6 @@ def test_vault_credential_with_password_at_launch(get, post, job_template, admin
|
||||
signal_start.assert_called_with(vault_password='testing123')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_creds_prompted_at_launch(get, post, job_template, admin, net_credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
resp = post(url, {'extra_credentials': [net_credential.pk]}, admin, expect=201)
|
||||
|
||||
summary_fields = get(
|
||||
reverse('api:job_detail', kwargs={'pk': resp.data['job']}),
|
||||
admin
|
||||
).data['summary_fields']
|
||||
assert len(summary_fields['credentials']) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_invalid_mixed_credentials_specification(get, post, job_template, admin, net_credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
post(url=url, data={'credentials': [net_credential.pk], 'extra_credentials': [net_credential.pk]},
|
||||
user=admin, expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_deprecated_credential_activity_stream(patch, admin_user, machine_credential, job_template):
|
||||
job_template.credentials.add(machine_credential)
|
||||
|
||||
@@ -22,20 +22,6 @@ from awx.main.models import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials(get, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(credential)
|
||||
jt.save()
|
||||
job = jt.create_unified_job()
|
||||
|
||||
url = reverse('api:job_extra_credentials_list', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_permission_denied_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
@@ -50,7 +36,7 @@ def test_job_relaunch_permission_denied_response(
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
# Job has prompted credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||
@@ -70,7 +56,7 @@ def test_job_relaunch_prompts_not_accepted_response(
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
# Job has prompted credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ def test_job_launch_with_default_creds(machine_credential, vault_credential, dep
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_enforces_unique_credentials_kinds(machine_credential, credentialtype_aws, deploy_jobtemplate):
|
||||
"""
|
||||
JT launching should require that extra_credentials have distinct CredentialTypes
|
||||
JT launching should require that credentials have distinct CredentialTypes
|
||||
"""
|
||||
creds = []
|
||||
for i in range(2):
|
||||
|
||||
@@ -45,27 +45,6 @@ def test_create(post, project, machine_credential, inventory, alice, grant_proje
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credential_creation(get, post, organization_factory, job_template_factory, credentialtype_aws):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind', ['scm', 'insights'])
|
||||
def test_invalid_credential_kind_xfail(get, post, organization_factory, job_template_factory, kind):
|
||||
@@ -87,42 +66,6 @@ def test_invalid_credential_kind_xfail(get, post, organization_factory, job_temp
|
||||
assert 'Cannot assign a Credential of kind `{}`.'.format(kind) in response.data.values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credential_unique_type_xfail(get, post, organization_factory, job_template_factory, credentialtype_aws):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
# this request should fail because you can't assign the same type (aws)
|
||||
# twice
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'joe',
|
||||
'password': 'another-secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 400
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
@@ -143,60 +86,6 @@ def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, invento
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'associate': True,
|
||||
'id': credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 204
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_detach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(credential)
|
||||
jt.save()
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'disassociate': True,
|
||||
'id': credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 204
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential_wrong_kind_xfail(get, post, organization_factory, job_template_factory, machine_credential):
|
||||
"""Extra credentials only allow net + cloud credentials"""
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'associate': True,
|
||||
'id': machine_credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 400
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"grant_project, grant_inventory, expect", [
|
||||
@@ -368,57 +257,6 @@ def test_launch_with_pending_deletion_inventory_workflow(get, post, organization
|
||||
assert resp.data['inventory'] == ['The inventory associated with this Workflow is being deleted.']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_launch_with_extra_credentials(get, post, organization_factory,
|
||||
job_template_factory, machine_credential,
|
||||
credential, net_credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.ask_credential_on_launch = True
|
||||
jt.save()
|
||||
|
||||
resp = post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': jt.pk}),
|
||||
dict(
|
||||
credentials=[machine_credential.pk, credential.pk, net_credential.pk]
|
||||
),
|
||||
objs.superusers.admin, expect=201
|
||||
)
|
||||
job_pk = resp.data.get('id')
|
||||
|
||||
resp = get(reverse('api:job_extra_credentials_list', kwargs={'pk': job_pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 2
|
||||
|
||||
resp = get(reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_launch_with_extra_credentials_not_allowed(get, post, organization_factory,
|
||||
job_template_factory, machine_credential,
|
||||
credential, net_credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(machine_credential)
|
||||
jt.ask_credential_on_launch = False
|
||||
jt.save()
|
||||
|
||||
resp = post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': jt.pk}),
|
||||
dict(
|
||||
credentials=[machine_credential.pk, credential.pk, net_credential.pk]
|
||||
),
|
||||
objs.superusers.admin
|
||||
)
|
||||
assert 'credentials' in resp.data['ignored_fields'].keys()
|
||||
job_pk = resp.data.get('id')
|
||||
|
||||
resp = get(reverse('api:job_extra_credentials_list', kwargs={'pk': job_pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_without_project(inventory):
|
||||
data = dict(name="Test", job_type="run",
|
||||
|
||||
@@ -71,6 +71,18 @@ def test_node_accepts_prompted_fields(inventory, project, workflow_job_template,
|
||||
user=admin_user, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("field_name, field_value", [
|
||||
('all_parents_must_converge', True),
|
||||
('all_parents_must_converge', False),
|
||||
])
|
||||
def test_create_node_with_field(field_name, field_value, workflow_job_template, post, admin_user):
|
||||
url = reverse('api:workflow_job_template_workflow_nodes_list',
|
||||
kwargs={'pk': workflow_job_template.pk})
|
||||
res = post(url, {field_name: field_value}, user=admin_user, expect=201)
|
||||
assert res.data[field_name] == field_value
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestApprovalNodes():
|
||||
def test_approval_node_creation(self, post, approval_node, admin_user):
|
||||
|
||||
@@ -65,6 +65,7 @@ class TestKeyRegeneration:
|
||||
assert nc['token'].startswith(PREFIX)
|
||||
|
||||
Slack = nt.CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
|
||||
|
||||
class TestBackend(Slack):
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Job, JobEvent
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -61,3 +63,148 @@ def test_parent_failed(emit, event):
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation():
|
||||
hostnames = [f'Host {i}' for i in range(100)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([
|
||||
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
|
||||
for h in hostnames
|
||||
])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map
|
||||
).save()
|
||||
|
||||
assert j.job_host_summaries.count() == len(hostnames)
|
||||
assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames)
|
||||
|
||||
for s in j.job_host_summaries.all():
|
||||
assert host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == j.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_deleted_hosts():
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([
|
||||
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
|
||||
for h in hostnames
|
||||
])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in inv.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map
|
||||
).save()
|
||||
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in j.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5',
|
||||
'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_limit():
|
||||
# Make an inventory with 10 hosts, run a playbook with a --limit
|
||||
# pointed at *one* host,
|
||||
# Verify that *only* that host has an associated JobHostSummary and that
|
||||
# *only* that host has an updated value for .last_job.
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([
|
||||
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
|
||||
for h in hostnames
|
||||
])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
|
||||
# host map is a data structure that tracks a mapping of host name --> ID
|
||||
# for the inventory, _regardless_ of whether or not there's a limit
|
||||
# applied to the actual playbook run
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# by making the playbook_on_stats *only* include Host 1, we're emulating
|
||||
# the behavior of a `--limit=Host 1`
|
||||
matching_host = Host.objects.get(name='Host 1')
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': {matching_host.name: len(matching_host.name)}, # effectively, limit=Host 1
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map
|
||||
).save()
|
||||
|
||||
# since the playbook_on_stats only references one host,
|
||||
# there should *only* be on JobHostSummary record (and it should
|
||||
# be related to the appropriate Host)
|
||||
assert JobHostSummary.objects.count() == 1
|
||||
for h in Host.objects.all():
|
||||
if h.name == 'Host 1':
|
||||
assert h.last_job_id == j.id
|
||||
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
|
||||
else:
|
||||
# all other hosts in the inventory should remain untouched
|
||||
assert h.last_job_id is None
|
||||
assert h.last_job_host_summary_id is None
|
||||
|
||||
@@ -17,7 +17,6 @@ from awx.main.models import (
|
||||
Job
|
||||
)
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
from awx.main.models.inventory import PluginFileInjector
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
|
||||
@@ -227,13 +226,6 @@ class TestSCMClean:
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventorySourceInjectors:
|
||||
def test_should_use_plugin(self):
|
||||
class foo(PluginFileInjector):
|
||||
plugin_name = 'foo_compute'
|
||||
initial_version = '2.7.8'
|
||||
assert not foo('2.7.7').should_use_plugin()
|
||||
assert foo('2.8').should_use_plugin()
|
||||
|
||||
def test_extra_credentials(self, project, credential):
|
||||
inventory_source = InventorySource.objects.create(
|
||||
name='foo', source='custom', source_project=project
|
||||
@@ -266,18 +258,6 @@ class TestInventorySourceInjectors:
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.filename == filename
|
||||
|
||||
@pytest.mark.parametrize('source,script_name', [
|
||||
('ec2', 'ec2.py'),
|
||||
('rhv', 'ovirt4.py'),
|
||||
('satellite6', 'foreman.py'),
|
||||
('openstack', 'openstack_inventory.py')
|
||||
], ids=['ec2', 'rhv', 'satellite6', 'openstack'])
|
||||
def test_script_filenames(self, source, script_name):
|
||||
"""Ansible has several exceptions in naming of scripts
|
||||
"""
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.script_name == script_name
|
||||
|
||||
def test_group_by_azure(self):
|
||||
injector = InventorySource.injectors['azure_rm']('2.9')
|
||||
inv_src = InventorySource(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user