mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
1151 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08aff9bd2c | ||
|
|
1897b18a6e | ||
|
|
ec92abf014 | ||
|
|
5ed7db8cc2 | ||
|
|
863d962ec2 | ||
|
|
049b3a2e87 | ||
|
|
196b6572b2 | ||
|
|
37cb912367 | ||
|
|
9958f382d7 | ||
|
|
1d767a15d8 | ||
|
|
612373c849 | ||
|
|
3ea6171b54 | ||
|
|
302d8589c9 | ||
|
|
01f51219a6 | ||
|
|
38ea82bf3d | ||
|
|
d4ad674899 | ||
|
|
9d3aca5e1b | ||
|
|
a454102e77 | ||
|
|
e1d60ff4f1 | ||
|
|
bd93d97a60 | ||
|
|
793e78d9c0 | ||
|
|
76ebcf914b | ||
|
|
effe7151eb | ||
|
|
0023591bb0 | ||
|
|
9c50609776 | ||
|
|
28cc08f215 | ||
|
|
b83cef6ed7 | ||
|
|
c729b698dd | ||
|
|
e70c7ab458 | ||
|
|
aa548442ce | ||
|
|
ab587e7e6c | ||
|
|
3f5c018c8a | ||
|
|
12e3d0aebf | ||
|
|
ba4ad191fc | ||
|
|
34d76422d6 | ||
|
|
2a81643308 | ||
|
|
76d4de24df | ||
|
|
ed7a7e5f7b | ||
|
|
f94959d120 | ||
|
|
b5728fc548 | ||
|
|
4a19da650d | ||
|
|
8db27611ca | ||
|
|
7161f28d26 | ||
|
|
290c242221 | ||
|
|
90fb7c6769 | ||
|
|
f8c69aadcb | ||
|
|
c7b38bc9b9 | ||
|
|
cc1ef50729 | ||
|
|
42b3aa45c5 | ||
|
|
1e91e4e531 | ||
|
|
b3979eb2b9 | ||
|
|
75ef30d21b | ||
|
|
31b78cc00f | ||
|
|
a510f9f2c7 | ||
|
|
05aab5da4c | ||
|
|
3054fbc61c | ||
|
|
be8a30b9d9 | ||
|
|
d4301bd9bd | ||
|
|
adb768bed3 | ||
|
|
9253ab28c8 | ||
|
|
31d4e8362e | ||
|
|
329b40fd69 | ||
|
|
6bc5c4da74 | ||
|
|
51d7de296f | ||
|
|
5987aafb82 | ||
|
|
7a0a2fb54c | ||
|
|
2f57a1ea93 | ||
|
|
6da445f7c0 | ||
|
|
176f8632e5 | ||
|
|
958c192ff7 | ||
|
|
7e8990dff9 | ||
|
|
a727de184b | ||
|
|
6d1ba411e6 | ||
|
|
b00979792e | ||
|
|
8be0b01c33 | ||
|
|
62a3e0df98 | ||
|
|
c7f49c1193 | ||
|
|
3fcf3b20c4 | ||
|
|
5db3a8e7dc | ||
|
|
3d7bd8579b | ||
|
|
99704af302 | ||
|
|
a13b733191 | ||
|
|
4a7cd56e4a | ||
|
|
839f49c6ed | ||
|
|
c6afd98500 | ||
|
|
67fb898a9d | ||
|
|
f18f9ec0ef | ||
|
|
ef22986aa0 | ||
|
|
1829017ad4 | ||
|
|
be3d095067 | ||
|
|
112b9e7381 | ||
|
|
87bd3c2f93 | ||
|
|
abb37299cb | ||
|
|
724ca9cd57 | ||
|
|
39fb0d1679 | ||
|
|
11630a8803 | ||
|
|
a7b96d5aec | ||
|
|
d8a80f9f3e | ||
|
|
1dcb1eda7c | ||
|
|
f64e31735c | ||
|
|
d7c33a7246 | ||
|
|
fedd1cf22f | ||
|
|
12ff7a481c | ||
|
|
9b5494a6cc | ||
|
|
99296cf5f1 | ||
|
|
e6b78292ec | ||
|
|
49b54f2d60 | ||
|
|
0a256a98be | ||
|
|
5756151568 | ||
|
|
6606fd7461 | ||
|
|
84b6866875 | ||
|
|
4b9024bcb0 | ||
|
|
4b1fc7894d | ||
|
|
a5a6fdf1d6 | ||
|
|
15c699de7c | ||
|
|
515d4fe20f | ||
|
|
75380b9576 | ||
|
|
cb279843d2 | ||
|
|
41f2b83ae2 | ||
|
|
40b1e89b67 | ||
|
|
8c56d1d3a7 | ||
|
|
2f77c67a62 | ||
|
|
5a502f8709 | ||
|
|
873ff3de78 | ||
|
|
eb3ef809e0 | ||
|
|
9c90694f12 | ||
|
|
ca3735ee73 | ||
|
|
380f122456 | ||
|
|
0aa8c7427d | ||
|
|
dbc65baa43 | ||
|
|
1ce587025e | ||
|
|
45458b3265 | ||
|
|
874465a2d4 | ||
|
|
34c3aaee3d | ||
|
|
13ff5ffdf2 | ||
|
|
a606fdc958 | ||
|
|
b11995e638 | ||
|
|
40f9d0b512 | ||
|
|
9a1b205e06 | ||
|
|
98c923a715 | ||
|
|
1d328134fd | ||
|
|
d3f047d731 | ||
|
|
8ca0c1b992 | ||
|
|
4a711ec2dc | ||
|
|
75fe801efb | ||
|
|
56df930b99 | ||
|
|
f48713f4ae | ||
|
|
bb009f0d12 | ||
|
|
dc1bf3ef07 | ||
|
|
9d4cfa7400 | ||
|
|
06be3a29b9 | ||
|
|
2addf20907 | ||
|
|
29bbecb6bf | ||
|
|
f4c18843a3 | ||
|
|
bda838f723 | ||
|
|
2bec5ddb41 | ||
|
|
74643520c7 | ||
|
|
44907b33dc | ||
|
|
f174902bb2 | ||
|
|
1223148116 | ||
|
|
ab1e45d6c4 | ||
|
|
bd50e5d6a8 | ||
|
|
f085b828e4 | ||
|
|
e95339ba6e | ||
|
|
d353daebc5 | ||
|
|
6681cd918c | ||
|
|
2b327935de | ||
|
|
0c4925afe8 | ||
|
|
e0062484d0 | ||
|
|
faa353521a | ||
|
|
ff7e244a84 | ||
|
|
17f71600df | ||
|
|
9120a69006 | ||
|
|
d9965cfe7e | ||
|
|
c38ee06642 | ||
|
|
b1d75327e3 | ||
|
|
8981c7d59a | ||
|
|
b10f06201d | ||
|
|
681fe4865c | ||
|
|
faae55d085 | ||
|
|
efddd9f679 | ||
|
|
6fb173da8a | ||
|
|
64e8b76a10 | ||
|
|
d0e160a037 | ||
|
|
20e5d8200e | ||
|
|
a6d3c0fd32 | ||
|
|
318e0631b7 | ||
|
|
da4153d653 | ||
|
|
26e9dd307e | ||
|
|
6a2d59963f | ||
|
|
68800d0e8e | ||
|
|
97dc77ea63 | ||
|
|
6bc1856658 | ||
|
|
9ea3ec24ca | ||
|
|
7466873f69 | ||
|
|
07fa533b6f | ||
|
|
8aa28092ff | ||
|
|
3579584ffc | ||
|
|
05cae23180 | ||
|
|
d6e89092d3 | ||
|
|
fe344038b5 | ||
|
|
250484339b | ||
|
|
5ca0cdb124 | ||
|
|
0d3f1f4ac2 | ||
|
|
18c69fa391 | ||
|
|
6d6eae571e | ||
|
|
7d8a910be7 | ||
|
|
df04660cdd | ||
|
|
9d1ed837f9 | ||
|
|
be4705ef8b | ||
|
|
015234287c | ||
|
|
677a8b34ba | ||
|
|
232c706b75 | ||
|
|
8725d3e539 | ||
|
|
e7290e6452 | ||
|
|
21105b836e | ||
|
|
99dc84c275 | ||
|
|
adfdfcdd0a | ||
|
|
6feb58f76d | ||
|
|
2910a9dfff | ||
|
|
371966613f | ||
|
|
91968a09c8 | ||
|
|
190098bbd5 | ||
|
|
2585c5030b | ||
|
|
22858f0044 | ||
|
|
7e6a73f892 | ||
|
|
1874e8bb4c | ||
|
|
83c286580b | ||
|
|
0be8fe521a | ||
|
|
186ec88581 | ||
|
|
6407ab58ff | ||
|
|
466e965047 | ||
|
|
e1de0a528d | ||
|
|
766a5c0c3f | ||
|
|
231abf865b | ||
|
|
70972f7ea1 | ||
|
|
993b0a889d | ||
|
|
205f2c33c1 | ||
|
|
1e77053bbf | ||
|
|
ae25717700 | ||
|
|
11244f85a4 | ||
|
|
e05c6e67b6 | ||
|
|
42f30e72b5 | ||
|
|
0fb3851a2b | ||
|
|
e4a50f3595 | ||
|
|
f524c94bad | ||
|
|
c7fe840868 | ||
|
|
0154d80f19 | ||
|
|
08d60d0b78 | ||
|
|
6908558acd | ||
|
|
4dc7178f3c | ||
|
|
b85cc716a4 | ||
|
|
418521f4a3 | ||
|
|
5b8fba58e8 | ||
|
|
842d48810c | ||
|
|
70e513a3cf | ||
|
|
c6c14d4fb9 | ||
|
|
a874ed0424 | ||
|
|
cf6a103207 | ||
|
|
d2e67aea19 | ||
|
|
e5cf5be18d | ||
|
|
f26ae8ef13 | ||
|
|
755c3e89e2 | ||
|
|
2800e89fd2 | ||
|
|
8d75fc5f56 | ||
|
|
17d2efde95 | ||
|
|
8909a8a8e4 | ||
|
|
90a86f53ba | ||
|
|
4be05f1bf6 | ||
|
|
29822ee140 | ||
|
|
7fc13b8bb5 | ||
|
|
e560dccd36 | ||
|
|
de56e20f11 | ||
|
|
d4cc595630 | ||
|
|
b754e0dbba | ||
|
|
72fe6e400e | ||
|
|
6bb9902588 | ||
|
|
04d22a930d | ||
|
|
fe3a2d1a4e | ||
|
|
50f9c70afd | ||
|
|
5ab7f888f1 | ||
|
|
7d692d08f9 | ||
|
|
f3c023a11f | ||
|
|
a87c87b7c9 | ||
|
|
5f1aeb0f4e | ||
|
|
cc001c9892 | ||
|
|
a68ab19e16 | ||
|
|
530a7ba51d | ||
|
|
383fe278f5 | ||
|
|
c41da766fb | ||
|
|
46795cc71e | ||
|
|
4fac608890 | ||
|
|
84b21620b2 | ||
|
|
eaaad89a8c | ||
|
|
6f309bd2d2 | ||
|
|
6e00038d35 | ||
|
|
ad4e413a36 | ||
|
|
27ca5e1fd5 | ||
|
|
4191b21052 | ||
|
|
9737ab620c | ||
|
|
81f0662161 | ||
|
|
d1dc6007fd | ||
|
|
52276ebbab | ||
|
|
d204f12184 | ||
|
|
42dd3c5cf5 | ||
|
|
5720601a2e | ||
|
|
5d1346b956 | ||
|
|
30d0130e79 | ||
|
|
4baecef866 | ||
|
|
aaeb2d6fb9 | ||
|
|
c707e60bde | ||
|
|
b7e26b3507 | ||
|
|
44fcf2e0e4 | ||
|
|
3cd8d4f7e6 | ||
|
|
ddde669083 | ||
|
|
36e384e8ab | ||
|
|
586c3e4583 | ||
|
|
14045c1017 | ||
|
|
d11dfd0a2b | ||
|
|
cb7914dfa4 | ||
|
|
639e01e884 | ||
|
|
e4d6d51cf5 | ||
|
|
7f452ee8d1 | ||
|
|
27d74528c0 | ||
|
|
0ada1e965f | ||
|
|
2808a852eb | ||
|
|
80393e9194 | ||
|
|
aa52e41c02 | ||
|
|
d7cdec37f2 | ||
|
|
f60857013e | ||
|
|
c9424f9af8 | ||
|
|
66f883befe | ||
|
|
fad0274373 | ||
|
|
8c715fc6e1 | ||
|
|
870ebb4b43 | ||
|
|
e78ef82385 | ||
|
|
1eb565543c | ||
|
|
082a819160 | ||
|
|
519d2eebcb | ||
|
|
f2ea9003a3 | ||
|
|
64ae7a6e45 | ||
|
|
b47be3c8c7 | ||
|
|
2f7ec6ff13 | ||
|
|
75065b6407 | ||
|
|
2cb6104fe4 | ||
|
|
f90f8ba9a8 | ||
|
|
9733fde560 | ||
|
|
231c76c9cb | ||
|
|
311daf10b8 | ||
|
|
4ca4563a19 | ||
|
|
96183cf9c4 | ||
|
|
0b555e938a | ||
|
|
c36dbb3448 | ||
|
|
adfce6edf1 | ||
|
|
140394fe1f | ||
|
|
6ff539e6ee | ||
|
|
49ba6c6b3d | ||
|
|
e2861c6c39 | ||
|
|
6dae4a1d6d | ||
|
|
e66f9241a9 | ||
|
|
72da961550 | ||
|
|
4c86c5065c | ||
|
|
80a855c57a | ||
|
|
11b85250e8 | ||
|
|
10cfac2f0e | ||
|
|
355a83441a | ||
|
|
e25adca233 | ||
|
|
7c743904b0 | ||
|
|
a97865de0c | ||
|
|
596a5173ce | ||
|
|
63209197dd | ||
|
|
af2484cd97 | ||
|
|
f01a936202 | ||
|
|
05f670a6d9 | ||
|
|
f4b0910e98 | ||
|
|
95b17892ee | ||
|
|
8fdc53cb21 | ||
|
|
04521f5c5c | ||
|
|
8c2b3e9b84 | ||
|
|
864fef4f29 | ||
|
|
787e369bcf | ||
|
|
805a0c7a9a | ||
|
|
c80a5e2164 | ||
|
|
eb0463890c | ||
|
|
f6076052bd | ||
|
|
0d18d46ccc | ||
|
|
23ea419aa9 | ||
|
|
b084622c9e | ||
|
|
ef7e1afa34 | ||
|
|
be86086134 | ||
|
|
a06d2946b6 | ||
|
|
0ba87c9729 | ||
|
|
41b476544d | ||
|
|
97a6255531 | ||
|
|
4159a9b6af | ||
|
|
798bc4b8de | ||
|
|
6adf4ca2c1 | ||
|
|
8cc18f501e | ||
|
|
63049fe99d | ||
|
|
d6ecb486d2 | ||
|
|
f4dc4d5849 | ||
|
|
87d55b13bc | ||
|
|
5d570a017a | ||
|
|
6d5897f371 | ||
|
|
e14f19468b | ||
|
|
8ad0b3f787 | ||
|
|
70b0679a0c | ||
|
|
add8673d7c | ||
|
|
84c09a19d1 | ||
|
|
fbc7d1a9f2 | ||
|
|
daeeb31590 | ||
|
|
66886fb57a | ||
|
|
1ddb675fa2 | ||
|
|
4fd04e095f | ||
|
|
9d000a76de | ||
|
|
7f5227809f | ||
|
|
f1d87bf392 | ||
|
|
941009bf6d | ||
|
|
9c71204435 | ||
|
|
15ef095366 | ||
|
|
c2daccade7 | ||
|
|
3bfb54d2fd | ||
|
|
a5d31e56d6 | ||
|
|
d222bed932 | ||
|
|
45cc8f1cc9 | ||
|
|
e0c4fd4b3a | ||
|
|
6ef3b18803 | ||
|
|
f23b4e7b9a | ||
|
|
1d4773545e | ||
|
|
e40c6da552 | ||
|
|
8ef81065b7 | ||
|
|
08fcdf0e25 | ||
|
|
387682ed8d | ||
|
|
707c3883a8 | ||
|
|
47b0a96e96 | ||
|
|
fb4495b1b5 | ||
|
|
5dbc269de1 | ||
|
|
5c7939a6ac | ||
|
|
890de400e2 | ||
|
|
e8de7bc845 | ||
|
|
40393e201f | ||
|
|
97e2137d07 | ||
|
|
29c3927a16 | ||
|
|
17a803f49c | ||
|
|
d0c3882d9d | ||
|
|
9da5cc9c23 | ||
|
|
7913247eaa | ||
|
|
569cd3b34e | ||
|
|
2c67bec3cb | ||
|
|
77e587babe | ||
|
|
235011f5e9 | ||
|
|
0c6a522813 | ||
|
|
dc833bbaa7 | ||
|
|
9b0329021c | ||
|
|
3611f3491b | ||
|
|
213a0c3cef | ||
|
|
0cc640403a | ||
|
|
85898fd708 | ||
|
|
14d4d624e4 | ||
|
|
de34a64115 | ||
|
|
d67a336e2f | ||
|
|
58966d7368 | ||
|
|
fc5322b2a4 | ||
|
|
817b350de9 | ||
|
|
0c4ed78bee | ||
|
|
0252af0d30 | ||
|
|
f56f6c11f7 | ||
|
|
ef84bfc1c2 | ||
|
|
8383568e3c | ||
|
|
fd5d9c45d7 | ||
|
|
23d0f6022c | ||
|
|
9e98058290 | ||
|
|
e073b5e017 | ||
|
|
21bec83a4e | ||
|
|
cd966f2669 | ||
|
|
1830da4268 | ||
|
|
98ec5c8250 | ||
|
|
0caf1e8a3d | ||
|
|
ae7b173e17 | ||
|
|
c326b186a6 | ||
|
|
4f2443e7bb | ||
|
|
213a70b98a | ||
|
|
c91cd606ed | ||
|
|
4eab362318 | ||
|
|
df81eb7533 | ||
|
|
1bb5d560bd | ||
|
|
652281b6fa | ||
|
|
b978a28f8b | ||
|
|
4664d9556d | ||
|
|
7f935084df | ||
|
|
988438f119 | ||
|
|
20a023e243 | ||
|
|
f49532090b | ||
|
|
75a1c69ea2 | ||
|
|
7f6659f767 | ||
|
|
8f05482f8f | ||
|
|
01a79dd23e | ||
|
|
3d308cc2a3 | ||
|
|
4f55ffe4a0 | ||
|
|
6ce972a2a4 | ||
|
|
57b3565f42 | ||
|
|
1abb0b2c35 | ||
|
|
809fcac738 | ||
|
|
fa665e81e4 | ||
|
|
c4b41a174b | ||
|
|
ce65fdd26c | ||
|
|
aa0179690d | ||
|
|
67aeecdee5 | ||
|
|
d14b71ccee | ||
|
|
47a3ee7d76 | ||
|
|
a2bfb0f65c | ||
|
|
fc9da002d2 | ||
|
|
e1c6057b4c | ||
|
|
520cbd2015 | ||
|
|
e2039b7d3f | ||
|
|
9125313ed3 | ||
|
|
2c8900568b | ||
|
|
5c1d2a6f0b | ||
|
|
bb5c7a98f3 | ||
|
|
3fb3079264 | ||
|
|
c3812de3d6 | ||
|
|
4821a94944 | ||
|
|
97e030dd1f | ||
|
|
dfd4cb55e5 | ||
|
|
13fadd3838 | ||
|
|
7b636a7566 | ||
|
|
28e3c63562 | ||
|
|
cca9de9a3e | ||
|
|
880341ac05 | ||
|
|
e9f2fddc7f | ||
|
|
a9a479a51b | ||
|
|
ddcb7d4881 | ||
|
|
cd90ad2497 | ||
|
|
e3dfc6c796 | ||
|
|
1a151ad63a | ||
|
|
4095a6c9d4 | ||
|
|
161fd9cde0 | ||
|
|
dc77ddbc5b | ||
|
|
81fe923577 | ||
|
|
3521c56baf | ||
|
|
dd609b8a7c | ||
|
|
ef1a5c09b6 | ||
|
|
fa3a41f25b | ||
|
|
50c7807483 | ||
|
|
531d97d3b3 | ||
|
|
d4c69429db | ||
|
|
79d580d5b9 | ||
|
|
8302992a35 | ||
|
|
4f8b197b55 | ||
|
|
5a4a812c73 | ||
|
|
5c5173956d | ||
|
|
2423d9df44 | ||
|
|
ce2fc1a9dd | ||
|
|
fd3a423b07 | ||
|
|
c6643946c5 | ||
|
|
6b422d3bb7 | ||
|
|
3f6d3506c6 | ||
|
|
ac11b3782b | ||
|
|
cfdecd7297 | ||
|
|
5debfdf5e8 | ||
|
|
3f9fd3a3a8 | ||
|
|
2493374d44 | ||
|
|
7722f0ca08 | ||
|
|
e3f10ebd65 | ||
|
|
713b2c1bf2 | ||
|
|
00a9ae0e72 | ||
|
|
6bcb471584 | ||
|
|
f5ea595763 | ||
|
|
1f1cb2bdac | ||
|
|
d9d3c5d15f | ||
|
|
47d6e5c028 | ||
|
|
467700e4bb | ||
|
|
cfa2eabb57 | ||
|
|
b4e508f72a | ||
|
|
2bda81661e | ||
|
|
b0f6d2214c | ||
|
|
c9bac0b51c | ||
|
|
487473f0d1 | ||
|
|
4364e00117 | ||
|
|
e033eb5aef | ||
|
|
bc169fe1cc | ||
|
|
c15502e581 | ||
|
|
45d5999bc2 | ||
|
|
78cd1abbb2 | ||
|
|
e0861fee3a | ||
|
|
cb806b1699 | ||
|
|
15b80cdb1a | ||
|
|
bf0f1f1496 | ||
|
|
dbe4417ac3 | ||
|
|
c76e97cecf | ||
|
|
ce4e34eb28 | ||
|
|
32286a9d49 | ||
|
|
ed0bb127e8 | ||
|
|
cac48e7cfb | ||
|
|
96bd0f9d9e | ||
|
|
bc7472f3f1 | ||
|
|
5b2f00b978 | ||
|
|
3f73176ef2 | ||
|
|
d987c6e3f7 | ||
|
|
5d11400f6c | ||
|
|
6f43f223b2 | ||
|
|
6b87241099 | ||
|
|
0f6615d9cd | ||
|
|
9b55fa61de | ||
|
|
165405ad3f | ||
|
|
7ca92e4c1e | ||
|
|
8c107a5fa8 | ||
|
|
8b35ac89fc | ||
|
|
b90f9ac401 | ||
|
|
7a093de9fd | ||
|
|
259fcdc5df | ||
|
|
aa4575c92e | ||
|
|
f0f4f487fb | ||
|
|
c4a79a778f | ||
|
|
f611d4275f | ||
|
|
8c3dfe8655 | ||
|
|
f9e081046c | ||
|
|
da9abc087e | ||
|
|
956f588fd8 | ||
|
|
cb238c1f24 | ||
|
|
3dee9f0512 | ||
|
|
261a635005 | ||
|
|
4f13592430 | ||
|
|
73c9d1b7a9 | ||
|
|
8180a2060a | ||
|
|
dfaf19cdf3 | ||
|
|
ea9ed31f9d | ||
|
|
05226333ff | ||
|
|
5b79843390 | ||
|
|
1eda939ce2 | ||
|
|
43456d13c4 | ||
|
|
215c3c87e5 | ||
|
|
957804e22a | ||
|
|
e14f17687c | ||
|
|
e0d61cfb8e | ||
|
|
1344706095 | ||
|
|
5c855b5bd1 | ||
|
|
6d0f2948aa | ||
|
|
736bd2ed67 | ||
|
|
47f31b41fb | ||
|
|
61eeb630f8 | ||
|
|
7f55a1da0d | ||
|
|
ceef7f57af | ||
|
|
393ad6b2f4 | ||
|
|
0768c6ac1d | ||
|
|
2824616ba6 | ||
|
|
c436dcf875 | ||
|
|
0de8a89293 | ||
|
|
81a509424a | ||
|
|
42f4956a7f | ||
|
|
e2d474ddd2 | ||
|
|
011d7eb892 | ||
|
|
018ff91620 | ||
|
|
368d933799 | ||
|
|
e9532dea8e | ||
|
|
b911f8bf77 | ||
|
|
e727909a61 | ||
|
|
13366c1e75 | ||
|
|
ca6d124417 | ||
|
|
35cca68f04 | ||
|
|
dcf17683e2 | ||
|
|
b851e2be4a | ||
|
|
69368d874e | ||
|
|
0ee223f799 | ||
|
|
7a43f00a5d | ||
|
|
4ed5bca5e3 | ||
|
|
0a87469225 | ||
|
|
63997838cd | ||
|
|
89b731a0cb | ||
|
|
9036ba492c | ||
|
|
d87144c4a7 | ||
|
|
c209955400 | ||
|
|
e5f48bfa62 | ||
|
|
6e2c04e16c | ||
|
|
8f096d11b6 | ||
|
|
b98bac0ee9 | ||
|
|
52a2ed2786 | ||
|
|
9710015a2f | ||
|
|
b70c354dfc | ||
|
|
e943ae59b7 | ||
|
|
6e8809fe72 | ||
|
|
d2d1074e8b | ||
|
|
1bcda0a4cb | ||
|
|
4e4afb4a98 | ||
|
|
750c8d8a6d | ||
|
|
74e264a6a5 | ||
|
|
c508dc7d83 | ||
|
|
ea355f6f8f | ||
|
|
95f5315fa3 | ||
|
|
e99e97bcb8 | ||
|
|
7b16931658 | ||
|
|
e395b87a1b | ||
|
|
71fcb1a82c | ||
|
|
21e5179a84 | ||
|
|
298eaa0b32 | ||
|
|
17c0293065 | ||
|
|
6f150f0362 | ||
|
|
71718ee2eb | ||
|
|
7ef68a03c6 | ||
|
|
bf3473d394 | ||
|
|
eab9ee5128 | ||
|
|
563a0cc2a4 | ||
|
|
3007b9c66a | ||
|
|
a4ec149344 | ||
|
|
a59bc33280 | ||
|
|
8f089c02a5 | ||
|
|
d663d397f8 | ||
|
|
546281d435 | ||
|
|
4e01b11577 | ||
|
|
d25f1d1357 | ||
|
|
2ab290ff2d | ||
|
|
ef88507d23 | ||
|
|
df9a012013 | ||
|
|
9160d91278 | ||
|
|
ab11f18957 | ||
|
|
ea30547754 | ||
|
|
4a0778a3d5 | ||
|
|
1270949909 | ||
|
|
180396b5df | ||
|
|
504b17b474 | ||
|
|
dd81f59d9a | ||
|
|
99478f5d25 | ||
|
|
b3ad12f31a | ||
|
|
926d6a6525 | ||
|
|
e84642b4a1 | ||
|
|
ef3c0cfb38 | ||
|
|
9a4439e731 | ||
|
|
9479b1b824 | ||
|
|
c481919a49 | ||
|
|
58f0e42bd6 | ||
|
|
70af2dd66b | ||
|
|
40dbe70854 | ||
|
|
a7368cec43 | ||
|
|
636153d92c | ||
|
|
ae9377e0e4 | ||
|
|
c586fa9821 | ||
|
|
fcf6b4ae45 | ||
|
|
7b4c63037a | ||
|
|
37a90320ec | ||
|
|
a803e86a95 | ||
|
|
cb4d55b47a | ||
|
|
e398a0ac5e | ||
|
|
196a6ff36c | ||
|
|
c3ba851908 | ||
|
|
11223472d3 | ||
|
|
d0a996b139 | ||
|
|
7dd635cd8d | ||
|
|
f715e4e410 | ||
|
|
a983d4bc1f | ||
|
|
f7cffbfe5c | ||
|
|
2329079326 | ||
|
|
bc6df9cab8 | ||
|
|
055e7b4974 | ||
|
|
efb4fb6fd0 | ||
|
|
f2be4de544 | ||
|
|
c44bf6f903 | ||
|
|
a6d031f46f | ||
|
|
73f16b2bee | ||
|
|
2129f12085 | ||
|
|
23185ca22f | ||
|
|
2b6cf97157 | ||
|
|
07e5a00f14 | ||
|
|
1b0f5b05ad | ||
|
|
5ff4625eb1 | ||
|
|
1829e7cad4 | ||
|
|
e097f5a021 | ||
|
|
caa5596386 | ||
|
|
ec390b049d | ||
|
|
0814a9c4a1 | ||
|
|
0a1b220f56 | ||
|
|
d39b3b3165 | ||
|
|
19ad7d3983 | ||
|
|
cd7e358b73 | ||
|
|
bc5881ad21 | ||
|
|
dd854baba2 | ||
|
|
7cce3cad06 | ||
|
|
622fbc116b | ||
|
|
b9d489c788 | ||
|
|
5cbcfbe0c6 | ||
|
|
d46a403a49 | ||
|
|
de808d4911 | ||
|
|
43eff55fd4 | ||
|
|
6c130fa6c3 | ||
|
|
90ea9a8cc4 | ||
|
|
b09bca54b7 | ||
|
|
8e4a87d0af | ||
|
|
fd50feb258 | ||
|
|
f749a5d44d | ||
|
|
c3366db5ca | ||
|
|
07a9cd106e | ||
|
|
b2a1824d21 | ||
|
|
303443796e | ||
|
|
495dc2202f | ||
|
|
33f5200a20 | ||
|
|
8674e3b4de | ||
|
|
ace459cf70 | ||
|
|
d0c952692d | ||
|
|
af8e071840 | ||
|
|
e6abd77c96 | ||
|
|
42bfff301c | ||
|
|
0aff1a2c75 | ||
|
|
685f4018f2 | ||
|
|
1dff691830 | ||
|
|
525021214c | ||
|
|
c12c64f5e7 | ||
|
|
0eaeadad87 | ||
|
|
eb5846d1be | ||
|
|
87e1ba4dea | ||
|
|
a9427dbf1b | ||
|
|
e96e1e925c | ||
|
|
7476fefd65 | ||
|
|
8b2fc26219 | ||
|
|
9480f911b2 | ||
|
|
d7fc3f53b8 | ||
|
|
91cbaa1096 | ||
|
|
3e13eff7f4 | ||
|
|
a562994b64 | ||
|
|
b02d9ae282 | ||
|
|
57820b7056 | ||
|
|
e3bbd436b4 | ||
|
|
9aa9524257 | ||
|
|
af5a898919 | ||
|
|
a04329efed | ||
|
|
a0b2ce3ef1 | ||
|
|
b394766075 | ||
|
|
cd62f39bce | ||
|
|
b7b97dd58d | ||
|
|
1d03625b27 | ||
|
|
af55c4c05e | ||
|
|
0a670e8db1 | ||
|
|
cf62fa67bd | ||
|
|
3c382322b0 | ||
|
|
f4ef3024fd | ||
|
|
67ca2fa335 | ||
|
|
c9ac805eed | ||
|
|
f40b637efc | ||
|
|
60ef160e85 | ||
|
|
df11a7fd3d | ||
|
|
1ae4ed4922 | ||
|
|
8a72a4d39d | ||
|
|
127495b53d | ||
|
|
017d367749 | ||
|
|
be507dbefb | ||
|
|
8c26f20188 | ||
|
|
2c52a7d9a8 | ||
|
|
b006510035 | ||
|
|
8e48a3a523 | ||
|
|
b26c8f6b62 | ||
|
|
68d7532d01 | ||
|
|
e9cf1475ca | ||
|
|
1b3ae50076 | ||
|
|
7791c5f5ba | ||
|
|
19abd24c91 | ||
|
|
b2ae68850c | ||
|
|
1a6ae6e107 | ||
|
|
86c7fd3b5d | ||
|
|
46ad3fa7b1 | ||
|
|
060585434a | ||
|
|
2657779eda | ||
|
|
ac890b8cda | ||
|
|
b6d8f9c6f6 | ||
|
|
5583af2a58 | ||
|
|
2ee6713050 | ||
|
|
b28409c1c7 | ||
|
|
ac5dec272b | ||
|
|
33b19ebe1f | ||
|
|
5d3e39beac | ||
|
|
bed63b3690 | ||
|
|
43ef4183df | ||
|
|
74869494f9 | ||
|
|
bd4337976e | ||
|
|
50079c0441 | ||
|
|
f3173dbe26 | ||
|
|
a1dd5a4e19 | ||
|
|
52e86cf0c3 | ||
|
|
3d9a47f0d9 | ||
|
|
5135b8a969 | ||
|
|
8a04c22b2b | ||
|
|
f7842cf283 | ||
|
|
827ad0fa75 | ||
|
|
602ef9750f | ||
|
|
8fb65b40de | ||
|
|
a7cda95803 | ||
|
|
bb33ed6415 | ||
|
|
358ad05e51 | ||
|
|
74c84bd7df | ||
|
|
62e1d5fdd2 | ||
|
|
b8beb1c64e | ||
|
|
14d86ef5d3 | ||
|
|
9ab7752d32 | ||
|
|
3a4f56bb2b | ||
|
|
8f1c20423b | ||
|
|
6fd5f9c6d8 | ||
|
|
6b187946fb | ||
|
|
d54f633a7b | ||
|
|
d0571c2cab | ||
|
|
32ee9838af | ||
|
|
c41068edc4 | ||
|
|
f2548c5e66 | ||
|
|
66a52655df | ||
|
|
32dbe3f86a | ||
|
|
c6ae7d84a2 | ||
|
|
7d384262e4 | ||
|
|
64debd7230 | ||
|
|
d39cfd1778 | ||
|
|
2e0edcbabd | ||
|
|
2650cbfc87 | ||
|
|
df72a01f27 | ||
|
|
7cf2bc2410 | ||
|
|
a63a204a21 | ||
|
|
928de6127b | ||
|
|
85eca47a93 | ||
|
|
99c8c4bf2b | ||
|
|
50d8eb30e1 | ||
|
|
b1d9b14ab1 | ||
|
|
56b3d6c79b | ||
|
|
9e528ea898 | ||
|
|
e09684462c | ||
|
|
22c4b28917 | ||
|
|
9d0a8d2047 | ||
|
|
023fbc931d | ||
|
|
c3ae700888 | ||
|
|
ee6445d620 | ||
|
|
8fb7cb6e82 | ||
|
|
b55212368b | ||
|
|
649d854225 | ||
|
|
fb1d918c2d | ||
|
|
e8d93c99a6 | ||
|
|
b54ec6b9c8 | ||
|
|
3acb474b19 | ||
|
|
2e0d381f8f | ||
|
|
7eb483d810 | ||
|
|
7b570b59c6 | ||
|
|
09f9204917 | ||
|
|
2a8e6ecba1 | ||
|
|
abb221d942 | ||
|
|
c8fdf46dda | ||
|
|
a9226fc25f | ||
|
|
67eba3cf5c | ||
|
|
20b8cdfb3d | ||
|
|
1dcb7591c5 | ||
|
|
bca9735534 | ||
|
|
f95576764d | ||
|
|
92a600aaa9 | ||
|
|
5cf7cc21c8 | ||
|
|
499fd7b2f1 | ||
|
|
0593ac197c | ||
|
|
4fc0d220cc | ||
|
|
d309acfddb | ||
|
|
2196089216 | ||
|
|
b6bf68427a | ||
|
|
502decf8fe | ||
|
|
20347420ca | ||
|
|
b29a9cd86e | ||
|
|
2d15d13359 | ||
|
|
31f5d13a69 | ||
|
|
67753b790c | ||
|
|
661a54d356 | ||
|
|
970a714291 | ||
|
|
dc206c9ad6 | ||
|
|
658bdddac3 | ||
|
|
9f5f86c6a7 | ||
|
|
5e37882267 | ||
|
|
0fc0106cc7 | ||
|
|
bcdb590a29 | ||
|
|
6b46c7db8f | ||
|
|
8c5bcffd42 | ||
|
|
aad185e785 | ||
|
|
2ff22bd681 | ||
|
|
2934fabd98 | ||
|
|
2359231bda | ||
|
|
ca5f27aa9e | ||
|
|
80adcaab81 | ||
|
|
8100fc1cfb | ||
|
|
006797014c | ||
|
|
38934dc8d0 | ||
|
|
0ec6d652f7 | ||
|
|
1525c6d97e | ||
|
|
bf1769af6c | ||
|
|
6384e638f5 | ||
|
|
1df5e55a4e | ||
|
|
d1005f91e7 | ||
|
|
d9451ac12c | ||
|
|
a82304765d | ||
|
|
cc3f2e0819 | ||
|
|
03c07c0843 | ||
|
|
1b94b616f0 | ||
|
|
98c5cb1c4c | ||
|
|
ce5a85a53b | ||
|
|
046385d72e | ||
|
|
7eba55fbde | ||
|
|
6ac51b7b13 | ||
|
|
97cc467ae1 | ||
|
|
3312ebcb05 | ||
|
|
4d06ae48d3 | ||
|
|
cf75ea91a1 | ||
|
|
5e34f6582b | ||
|
|
60008dbd74 | ||
|
|
4afd0672a1 | ||
|
|
875a1c0b5f | ||
|
|
f5d7ca6913 | ||
|
|
df8a66e504 | ||
|
|
43a0a15f6f | ||
|
|
36ed890c14 | ||
|
|
0e8e5f65e1 | ||
|
|
6399ec59c9 | ||
|
|
e44c73883e | ||
|
|
c3406748de | ||
|
|
6d70651611 | ||
|
|
5e13da62a4 | ||
|
|
658e5f0fc8 | ||
|
|
cebd918e49 | ||
|
|
a8c4e92804 | ||
|
|
dea71d2682 | ||
|
|
ed568f569c | ||
|
|
13c05c68fc | ||
|
|
2be7f853f3 | ||
|
|
3b5681465a | ||
|
|
a6c7502217 | ||
|
|
50a87843ee | ||
|
|
238b6cbb61 | ||
|
|
3a7bf6a8ac | ||
|
|
6e64aa81fd | ||
|
|
445612315b | ||
|
|
bb276a8fcb | ||
|
|
b7b0bdaeca | ||
|
|
cc1a97b6d8 | ||
|
|
c6227797b4 | ||
|
|
b383144b69 | ||
|
|
d1bc013da9 | ||
|
|
723f581fd0 | ||
|
|
d2c345a374 | ||
|
|
22677029e1 | ||
|
|
0943f989ce | ||
|
|
c1698fff8e | ||
|
|
200028b269 | ||
|
|
0cae612159 | ||
|
|
5584f6a98b | ||
|
|
e5acf93c66 | ||
|
|
01b4b47087 | ||
|
|
14e9923037 | ||
|
|
7e47a924c5 | ||
|
|
635aa9fd56 | ||
|
|
2e4eb1885f | ||
|
|
0ce70c08bd | ||
|
|
e3c9a42741 | ||
|
|
d5d45e644d | ||
|
|
09684e2c41 | ||
|
|
04622d5786 | ||
|
|
8c9544e5ed | ||
|
|
ca043d9bfd | ||
|
|
711937b104 | ||
|
|
a9a2c1fa7b | ||
|
|
ec8a452f1d | ||
|
|
07def62373 | ||
|
|
30352e375f | ||
|
|
6abe9d5c0f | ||
|
|
9b04e93765 | ||
|
|
728fb1aaef | ||
|
|
2596ad26b9 | ||
|
|
ef3b1ee195 | ||
|
|
b1a33869dc | ||
|
|
9f04fbe4a4 | ||
|
|
1ece764547 | ||
|
|
2358e306c1 | ||
|
|
82b9f8ebb0 | ||
|
|
43ca4526b1 | ||
|
|
4174fc22b0 | ||
|
|
16e135249c | ||
|
|
889dae357b | ||
|
|
0063668582 | ||
|
|
1e4cd9ea8f | ||
|
|
954ccccbc5 | ||
|
|
6f43875e80 | ||
|
|
80cccab919 | ||
|
|
088673ceb0 | ||
|
|
4f13255f35 | ||
|
|
8f36e21c97 | ||
|
|
6a11281355 | ||
|
|
df4e4f80ad | ||
|
|
5682fb1503 | ||
|
|
640d2a2797 | ||
|
|
b173880766 | ||
|
|
30ce85b80a | ||
|
|
003ec64413 | ||
|
|
eda6d729d6 | ||
|
|
4f83d44142 | ||
|
|
7452eb2fa1 | ||
|
|
8300f7f51b | ||
|
|
eed94b641e | ||
|
|
0138e92ddc | ||
|
|
456ef49ee3 | ||
|
|
b91dee68ac | ||
|
|
781d36ef83 | ||
|
|
a1cef744a7 | ||
|
|
ba5319f479 | ||
|
|
0dbf21a15c | ||
|
|
45d522829a | ||
|
|
8b1c358dc6 | ||
|
|
ebd9d3dc67 | ||
|
|
80cf154fb7 | ||
|
|
9add96a0d3 | ||
|
|
ed2ad1e210 | ||
|
|
808ed74700 | ||
|
|
9bebf3217e | ||
|
|
ae7d26fab0 | ||
|
|
0f54d30f2c | ||
|
|
631d3515f2 | ||
|
|
551218fd44 | ||
|
|
4af54517d2 | ||
|
|
334f571ad3 | ||
|
|
295afa805c | ||
|
|
ad4d286db5 | ||
|
|
eea97c8928 | ||
|
|
c29275315e | ||
|
|
ef89195e6c | ||
|
|
06ff26752a | ||
|
|
58f5e1882e | ||
|
|
2765367308 | ||
|
|
2a94611801 | ||
|
|
e4eda3ef0d | ||
|
|
fbf6315a8c | ||
|
|
8a3c10686e | ||
|
|
c121565209 | ||
|
|
3c3e659042 | ||
|
|
406cb07018 | ||
|
|
099a82fdf8 | ||
|
|
52b88d839e | ||
|
|
e245e50ee4 | ||
|
|
a52c0415d9 | ||
|
|
98c7df3399 | ||
|
|
570283fba2 | ||
|
|
1bf2a455c6 | ||
|
|
cb222aaa40 | ||
|
|
6c9fc4a592 | ||
|
|
53a6341320 | ||
|
|
c0f9ee5e6e | ||
|
|
3321f3e34d | ||
|
|
953b6679ef | ||
|
|
d285261697 | ||
|
|
d84b58c857 | ||
|
|
4d3cacf87f | ||
|
|
fcba02cd86 | ||
|
|
205dc93e65 | ||
|
|
28a29293c7 | ||
|
|
ed78978b5f | ||
|
|
6e1457607e | ||
|
|
b4d54895ff | ||
|
|
5df384edd6 | ||
|
|
dce3795e0c | ||
|
|
1a619de91f |
6
.dput.cf
6
.dput.cf
@@ -1,6 +0,0 @@
|
||||
[mini_dinstall]
|
||||
fqdn = localhost
|
||||
method = local
|
||||
incoming = FIXME/deb-repo/mini-dinstall/incoming
|
||||
run_dinstall = 0
|
||||
post_upload_command = mini-dinstall -b -v
|
||||
@@ -1,20 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[**.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[**.{js,less,html}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[**.{json}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
6
.github/ISSUE_TEMPLATE/bug_report.md
vendored
6
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,12 +7,6 @@ about: Create a report to help us improve
|
||||
##### ISSUE TYPE
|
||||
- Bug Report
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem. -->
|
||||
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
11
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -7,16 +7,5 @@ about: Suggest an idea for this project
|
||||
##### ISSUE TYPE
|
||||
- Feature Idea
|
||||
|
||||
##### COMPONENT NAME
|
||||
<!-- Pick the area of AWX for this issue, you can have multiple, delete the rest: -->
|
||||
- API
|
||||
- UI
|
||||
- Installer
|
||||
|
||||
##### SUMMARY
|
||||
<!-- Briefly describe the problem or desired enhancement. -->
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
|
||||
<!-- Include any links to sosreport, database dumps, screenshots or other
|
||||
information. -->
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,6 +8,7 @@ reference-schema.json
|
||||
.tags1
|
||||
|
||||
# Tower
|
||||
awx-dev
|
||||
awx/settings/local_*.py*
|
||||
awx/*.sqlite3
|
||||
awx/*.sqlite3_*
|
||||
@@ -29,6 +30,7 @@ awx/ui/templates/ui/index.html
|
||||
awx/ui/templates/ui/installing.html
|
||||
/tower-license
|
||||
/tower-license/**
|
||||
tools/prometheus/data
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
[DEFAULT]
|
||||
archivedir = FIXME/deb-repo
|
||||
mail_to =
|
||||
verify_sigs = false
|
||||
architectures = all, amd64
|
||||
archive_style = flat
|
||||
generate_release = true
|
||||
mail_on_success = false
|
||||
release_codename = ansible-tower
|
||||
release_description = Ansible Tower
|
||||
release_label = ansible-tower
|
||||
release_origin = ansible-tower
|
||||
|
||||
[trusty]
|
||||
|
||||
[precise]
|
||||
@@ -1,6 +0,0 @@
|
||||
[MASTER]
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=site-packages,ui,migrations,data
|
||||
|
||||
@@ -83,12 +83,10 @@ If you're not using Docker for Mac, or Docker for Windows, you may need, or choo
|
||||
(host)$ pip install docker-compose
|
||||
```
|
||||
|
||||
#### Node and npm
|
||||
#### Frontend Development
|
||||
|
||||
The AWX UI requires the following:
|
||||
See [the ui development documentation](awx/ui/README.md).
|
||||
|
||||
- Node 8.x LTS
|
||||
- NPM 6.x LTS
|
||||
|
||||
### Build the environment
|
||||
|
||||
|
||||
35
INSTALL.md
35
INSTALL.md
@@ -27,7 +27,7 @@ This document provides a guide for installing AWX.
|
||||
- [Start the build](#start-the-build-1)
|
||||
- [Accessing AWX](#accessing-awx-1)
|
||||
- [SSL Termination](#ssl-termination)
|
||||
- [Docker or Docker Compose](#docker-or-docker-compose)
|
||||
- [Docker Compose](#docker-compose)
|
||||
- [Prerequisites](#prerequisites-3)
|
||||
- [Pre-build steps](#pre-build-steps-2)
|
||||
- [Deploying to a remote host](#deploying-to-a-remote-host)
|
||||
@@ -59,10 +59,13 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
|
||||
- [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) Requires Version 2.4+
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module
|
||||
+ A recent version
|
||||
- [docker](https://pypi.org/project/docker/) Python module
|
||||
+ This is incompatible with `docker-py`. If you have previously installed `docker-py`, please uninstall it.
|
||||
+ We use this module instead of `docker-py` because it is what the `docker-compose` Python module requires.
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 8.x LTS version](https://nodejs.org/en/download/)
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
|
||||
### System Requirements
|
||||
@@ -73,7 +76,7 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
- At least 2 cpu cores
|
||||
- At least 20GB of space
|
||||
- Running Docker, Openshift, or Kubernetes
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 9.4.
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 9.6+.
|
||||
|
||||
### AWX Tunables
|
||||
|
||||
@@ -81,14 +84,14 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, docker-compose or a standalone Docker daemon. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
We currently support running AWX as a containerized application using Docker images deployed to either an OpenShift cluster, a Kubernetes cluster, or docker-compose. The remainder of this document will walk you through the process of building the images, and deploying them to either platform.
|
||||
|
||||
The [installer](./installer) directory contains an [inventory](./installer/inventory) file, and a playbook, [install.yml](./installer/install.yml). You'll begin by setting variables in the inventory file according to the platform you wish to use, and then you'll start the image build and deployment process by running the playbook.
|
||||
|
||||
In the sections below, you'll find deployment details and instructions for each platform:
|
||||
- [OpenShift](#openshift)
|
||||
- [Kubernetes](#kubernetes)
|
||||
- [Docker or Docker Compose](#docker-or-docker-compose).
|
||||
- [Docker Compose](#docker-compose).
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
@@ -391,14 +394,14 @@ If your provider is able to allocate an IP Address from the Ingress controller t
|
||||
Unlike Openshift's `Route` the Kubernetes `Ingress` doesn't yet handle SSL termination. As such the default configuration will only expose AWX through HTTP on port 80. You are responsible for configuring SSL support until support is added (either to Kubernetes or AWX itself).
|
||||
|
||||
|
||||
## Docker or Docker-Compose
|
||||
## Docker-Compose
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/) on the host where AWX will be deployed. After installing Docker, the Docker service must be started (depending on your OS, you may have to add the local user that uses Docker to the ``docker`` group, refer to the documentation for details)
|
||||
- [docker-py](https://github.com/docker/docker-py) Python module.
|
||||
|
||||
If you're installing using Docker Compose, you'll need [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
- [docker-compose](https://pypi.org/project/docker-compose/) Python module.
|
||||
+ This also installs the `docker` Python module, which is incompatible with `docker-py`. If you have previously installed `docker-py`, please uninstall it.
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/).
|
||||
|
||||
### Pre-build steps
|
||||
|
||||
@@ -441,13 +444,17 @@ Before starting the build process, review the [inventory](./installer/inventory)
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
|
||||
*use_docker_compose*
|
||||
*host_port_ssl*
|
||||
|
||||
> Switch to ``true`` to use Docker Compose instead of the standalone Docker install.
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key.
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
When using docker-compose, the `docker-compose.yml` file will be created there (default `/var/lib/awx`).
|
||||
> When using docker-compose, the `docker-compose.yml` file will be created there (default `/tmp/awxcompose`).
|
||||
|
||||
*ca_trust_dir*
|
||||
|
||||
@@ -527,7 +534,7 @@ After the playbook run completes, Docker will report up to 5 running containers.
|
||||
```bash
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
e240ed8209cd awx_task:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 8052/tcp awx_task
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:80->8052/tcp awx_web
|
||||
1cfd02601690 awx_web:1.0.0.8 "/tini -- /bin/sh ..." 2 minutes ago Up About a minute 0.0.0.0:443->8052/tcp awx_web
|
||||
55a552142bcd memcached:alpine "docker-entrypoint..." 2 minutes ago Up 2 minutes 11211/tcp memcached
|
||||
84011c072aad rabbitmq:3 "docker-entrypoint..." 2 minutes ago Up 2 minutes 4369/tcp, 5671-5672/tcp, 25672/tcp rabbitmq
|
||||
97e196120ab3 postgres:9.6 "docker-entrypoint..." 2 minutes ago Up 2 minutes 5432/tcp postgres
|
||||
|
||||
90
Makefile
90
Makefile
@@ -60,7 +60,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange dbshell runserver \
|
||||
receiver test test_unit test_ansible test_coverage coverage_html \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
dev_build release_build release_clean sdist \
|
||||
ui-docker-machine ui-docker ui-release ui-devel \
|
||||
ui-test ui-deps ui-test-ci VERSION
|
||||
@@ -124,8 +124,8 @@ virtualenv_ansible:
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
virtualenv -p python --system-site-packages $(VENV_BASE)/ansible && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed six packaging appdirs && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==36.0.1 && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==9.0.1; \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed setuptools==41.0.1 && \
|
||||
$(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --ignore-installed pip==19.1.1; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -134,8 +134,8 @@ virtualenv_ansible_py3:
|
||||
if [ ! -d "$(VENV_BASE)" ]; then \
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible3" ]; then \
|
||||
python3 -m venv --system-site-packages $(VENV_BASE)/ansible3; \
|
||||
if [ ! -d "$(VENV_BASE)/ansible" ]; then \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/ansible; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -145,7 +145,8 @@ virtualenv_awx:
|
||||
mkdir $(VENV_BASE); \
|
||||
fi; \
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv $(VENV_BASE)/awx; \
|
||||
$(PYTHON) -m venv --system-site-packages $(VENV_BASE)/awx; \
|
||||
$(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --ignore-installed docutils==0.14; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
@@ -158,22 +159,18 @@ requirements_ansible: virtualenv_ansible
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible3/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin
|
||||
$(VENV_BASE)/ansible3/bin/pip3 install ansible # can't inherit from system ansible, it's py2
|
||||
$(VENV_BASE)/ansible3/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --ignore-installed -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
|
||||
requirements_ansible_dev:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
$(VENV_BASE)/ansible/bin/pip install pytest mock; \
|
||||
fi
|
||||
|
||||
requirements_isolated:
|
||||
if [ ! -d "$(VENV_BASE)/awx" ]; then \
|
||||
$(PYTHON) -m venv $(VENV_BASE)/awx; \
|
||||
fi;
|
||||
echo "include-system-site-packages = true" >> $(VENV_BASE)/awx/lib/python$(PYTHON_VERSION)/pyvenv.cfg
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_isolated.txt
|
||||
|
||||
# Install third-party requirements needed for AWX's environment.
|
||||
requirements_awx: virtualenv_awx
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
@@ -189,7 +186,7 @@ requirements_awx_dev:
|
||||
|
||||
requirements: requirements_ansible requirements_awx
|
||||
|
||||
requirements_dev: requirements requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
requirements_dev: requirements requirements_awx_dev requirements_ansible_dev
|
||||
|
||||
requirements_test: requirements
|
||||
|
||||
@@ -221,7 +218,7 @@ init:
|
||||
if [ "$(AWX_GROUP_QUEUES)" == "tower,thepentagon" ]; then \
|
||||
$(MANAGEMENT_COMMAND) provision_instance --hostname=isolated; \
|
||||
$(MANAGEMENT_COMMAND) register_queue --queuename='thepentagon' --hostnames=isolated --controller=tower; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key > /awx_devel/awx/main/expect/authorized_keys; \
|
||||
$(MANAGEMENT_COMMAND) generate_isolated_key > /awx_devel/awx/main/isolated/authorized_keys; \
|
||||
fi;
|
||||
|
||||
# Refresh development environment after pulling new code.
|
||||
@@ -272,15 +269,7 @@ supervisor:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
supervisord --pidfile=/tmp/supervisor_pid
|
||||
|
||||
# Alternate approach to tmux to run all development tasks specified in
|
||||
# Procfile.
|
||||
honcho:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
honcho start -f tools/docker-compose/Procfile
|
||||
supervisord --pidfile=/tmp/supervisor_pid -n
|
||||
|
||||
collectstatic:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -292,7 +281,7 @@ uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1-once="exec:awx-manage run_dispatcher --reload"
|
||||
uwsgi -b 32768 --socket 127.0.0.1:8050 --module=awx.wsgi:application --home=/venv/awx --chdir=/awx_devel/ --vacuum --processes=5 --harakiri=120 --master --no-orphans --py-autoreload 1 --max-requests=1000 --stats /tmp/stats.socket --lazy-apps --logformat "%(addr) %(method) %(uri) - %(proto) %(status)" --hook-accepting1="exec:supervisorctl restart tower-processes:awx-dispatcher tower-processes:awx-receiver"
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -356,7 +345,8 @@ pylint: reports
|
||||
@(set -o pipefail && $@ | reports/$@.report)
|
||||
|
||||
genschema: reports
|
||||
$(MAKE) swagger PYTEST_ARGS="--genschema"
|
||||
$(MAKE) swagger PYTEST_ARGS="--genschema --create-db "
|
||||
mv swagger.json schema.json
|
||||
|
||||
swagger: reports
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -381,20 +371,12 @@ test:
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
test_combined: test_ansible test
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
py.test awx/main/tests/unit awx/conf/tests/unit awx/sso/tests/unit
|
||||
|
||||
test_ansible:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/ansible/bin/activate; \
|
||||
fi; \
|
||||
py.test awx/lib/tests -c awx/lib/tests/pytest.ini
|
||||
|
||||
# Run all API unit tests with coverage enabled.
|
||||
test_coverage:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -515,6 +497,10 @@ ui-devel: $(UI_DEPS_FLAG_FILE)
|
||||
ui-test: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui run test
|
||||
|
||||
ui-lint: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
$(NPM_BIN) run --prefix awx/ui lint
|
||||
|
||||
# A standard go-to target for API developers to use building the frontend
|
||||
ui: clean-ui ui-devel
|
||||
|
||||
@@ -565,22 +551,22 @@ docker-auth:
|
||||
fi;
|
||||
|
||||
# Docker isolated rampart
|
||||
docker-isolated:
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml create
|
||||
docker start tools_awx_1
|
||||
docker start tools_isolated_1
|
||||
echo "__version__ = '`git describe --long | cut -d - -f 1-1`'" | docker exec -i tools_isolated_1 /bin/bash -c "cat > /venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.py"
|
||||
docker-compose-isolated:
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-isolated-override.yml up
|
||||
|
||||
# Docker Compose Development environment
|
||||
docker-compose: docker-auth
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml up --no-recreate awx
|
||||
|
||||
docker-compose-cluster: docker-auth
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml up
|
||||
|
||||
docker-compose-credential-plugins: docker-auth
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx
|
||||
|
||||
docker-compose-test: docker-auth
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
cd tools && CURRENT_UID=$(shell id -u) OS="$(shell docker info | grep 'Operating System')" TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /bin/bash
|
||||
|
||||
docker-compose-runtest:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
@@ -588,12 +574,7 @@ docker-compose-runtest:
|
||||
docker-compose-build-swagger:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
|
||||
docker-compose-genschema:
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh genschema
|
||||
mv swagger.json schema.json
|
||||
|
||||
docker-compose-detect-schema-change:
|
||||
$(MAKE) docker-compose-genschema
|
||||
detect-schema-change: genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
# Ignore differences in whitespace with -b
|
||||
diff -u -b reference-schema.json schema.json
|
||||
@@ -606,12 +587,14 @@ docker-compose-build: awx-devel-build
|
||||
|
||||
# Base development image build
|
||||
awx-devel-build:
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile .
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:devel \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
docker tag ansible/awx_devel $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
# For use when developing on "isolated" AWX deployments
|
||||
awx-isolated-build:
|
||||
docker-compose-isolated-build: awx-devel-build
|
||||
docker build -t ansible/awx_isolated -f tools/docker-isolated/Dockerfile .
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
@@ -631,6 +614,9 @@ docker-compose-elk: docker-auth
|
||||
docker-compose-cluster-elk: docker-auth
|
||||
TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose -f tools/docker-compose-cluster.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
|
||||
@@ -37,9 +37,9 @@ if HAS_DJANGO is True:
|
||||
# This line exists to make sure we don't regress on FIPS support if we
|
||||
# upgrade Django; if you're upgrading Django and see this error,
|
||||
# update the version check below, and confirm that FIPS still works.
|
||||
if django.__version__ != '1.11.16':
|
||||
raise RuntimeError("Django version other than 1.11.16 detected {}. \
|
||||
Subclassing BaseDatabaseSchemaEditor is known to work for Django 1.11.16 \
|
||||
if django.__version__ != '1.11.20':
|
||||
raise RuntimeError("Django version other than 1.11.20 detected {}. \
|
||||
Subclassing BaseDatabaseSchemaEditor is known to work for Django 1.11.20 \
|
||||
and may not work in newer Django versions.".format(django.__version__))
|
||||
|
||||
|
||||
|
||||
@@ -101,6 +101,10 @@ class DeprecatedCredentialField(serializers.IntegerField):
|
||||
super(DeprecatedCredentialField, self).__init__(**kwargs)
|
||||
|
||||
def to_internal_value(self, pk):
|
||||
try:
|
||||
pk = int(pk)
|
||||
except ValueError:
|
||||
self.fail('invalid')
|
||||
try:
|
||||
Credential.objects.get(pk=pk)
|
||||
except ObjectDoesNotExist:
|
||||
|
||||
@@ -24,20 +24,6 @@ from rest_framework.filters import BaseFilterBackend
|
||||
# AWX
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.models.credential import CredentialType
|
||||
|
||||
|
||||
class V1CredentialFilterBackend(BaseFilterBackend):
|
||||
'''
|
||||
For /api/v1/ requests, filter out v2 (custom) credentials
|
||||
'''
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
# TODO: remove in 3.3
|
||||
from awx.api.versioning import get_request_version
|
||||
if get_request_version(request) == 1:
|
||||
queryset = queryset.filter(credential_type__managed_by_tower=True)
|
||||
return queryset
|
||||
|
||||
|
||||
class TypeFilterBackend(BaseFilterBackend):
|
||||
@@ -223,7 +209,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
raise ValueError('%s is not searchable' % new_lookup[:-8])
|
||||
new_lookups = []
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups
|
||||
else:
|
||||
@@ -292,39 +278,6 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
key = key[5:]
|
||||
q_not = True
|
||||
|
||||
# Make legacy v1 Job/Template fields work for backwards compatability
|
||||
# TODO: remove after API v1 deprecation period
|
||||
if queryset.model._meta.object_name in ('JobTemplate', 'Job') and key in (
|
||||
'credential', 'vault_credential', 'cloud_credential', 'network_credential'
|
||||
) or queryset.model._meta.object_name in ('InventorySource', 'InventoryUpdate') and key == 'credential':
|
||||
key = 'credentials'
|
||||
|
||||
# Make legacy v1 Credential fields work for backwards compatability
|
||||
# TODO: remove after API v1 deprecation period
|
||||
#
|
||||
# convert v1 `Credential.kind` queries to `Credential.credential_type__pk`
|
||||
if queryset.model._meta.object_name == 'Credential' and key == 'kind':
|
||||
key = key.replace('kind', 'credential_type')
|
||||
|
||||
if 'ssh' in values:
|
||||
# In 3.2, SSH and Vault became separate credential types, but in the v1 API,
|
||||
# they're both still "kind=ssh"
|
||||
# under the hood, convert `/api/v1/credentials/?kind=ssh` to
|
||||
# `/api/v1/credentials/?or__credential_type=<ssh_pk>&or__credential_type=<vault_pk>`
|
||||
values = set(values)
|
||||
values.add('vault')
|
||||
values = list(values)
|
||||
q_or = True
|
||||
|
||||
for i, kind in enumerate(values):
|
||||
if kind == 'vault':
|
||||
type_ = CredentialType.objects.get(kind=kind)
|
||||
else:
|
||||
type_ = CredentialType.from_v1_kind(kind)
|
||||
if type_ is None:
|
||||
raise ParseError(_('cannot filter on kind %s') % kind)
|
||||
values[i] = type_.pk
|
||||
|
||||
# Convert value(s) to python and add to the appropriate list.
|
||||
for value in values:
|
||||
if q_int:
|
||||
@@ -402,6 +355,8 @@ class OrderByBackend(BaseFilterBackend):
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
if order_by is None:
|
||||
order_by = self.get_default_ordering(view)
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
|
||||
@@ -428,6 +383,12 @@ class OrderByBackend(BaseFilterBackend):
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
def get_default_ordering(self, view):
|
||||
ordering = getattr(view, 'ordering', None)
|
||||
if isinstance(ordering, str):
|
||||
return (ordering,)
|
||||
return ordering
|
||||
|
||||
def _validate_ordering_fields(self, model, order_by):
|
||||
for field_name in order_by:
|
||||
# strip off the negation prefix `-` if it exists
|
||||
|
||||
@@ -33,12 +33,20 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import (
|
||||
UnifiedJob, UnifiedJobTemplate, User, Role, Credential
|
||||
)
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.main.utils import (
|
||||
camelcase_to_underscore,
|
||||
get_search_fields,
|
||||
getattrd,
|
||||
get_object_or_400,
|
||||
decrypt_field
|
||||
)
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
from awx.api.versioning import URLPathVersioning, get_request_version
|
||||
from awx.api.versioning import URLPathVersioning
|
||||
from awx.api.metadata import SublistAttachDetatchMetadata, Metadata
|
||||
|
||||
__all__ = ['APIView', 'GenericAPIView', 'ListAPIView', 'SimpleListAPIView',
|
||||
@@ -92,6 +100,8 @@ class LoggedLoginView(auth_views.LoginView):
|
||||
|
||||
return ret
|
||||
else:
|
||||
if 'username' in self.request.POST:
|
||||
logger.warn(smart_text(u"Login failed for user {} from {}".format(self.request.POST.get('username'),request.META.get('REMOTE_ADDR', None))))
|
||||
ret.status_code = 401
|
||||
return ret
|
||||
|
||||
@@ -109,39 +119,12 @@ class LoggedLogoutView(auth_views.LogoutView):
|
||||
return ret
|
||||
|
||||
|
||||
def get_view_name(cls, suffix=None):
|
||||
'''
|
||||
Wrapper around REST framework get_view_name() to support get_name() method
|
||||
and view_name property on a view class.
|
||||
'''
|
||||
name = ''
|
||||
if hasattr(cls, 'get_name') and callable(cls.get_name):
|
||||
name = cls().get_name()
|
||||
elif hasattr(cls, 'view_name'):
|
||||
if callable(cls.view_name):
|
||||
name = cls.view_name()
|
||||
else:
|
||||
name = cls.view_name
|
||||
if name:
|
||||
return ('%s %s' % (name, suffix)) if suffix else name
|
||||
return views.get_view_name(cls, suffix=None)
|
||||
def get_view_description(view, html=False):
|
||||
'''Wrapper around REST framework get_view_description() to continue
|
||||
to support our historical div.
|
||||
|
||||
|
||||
def get_view_description(cls, request, html=False):
|
||||
'''
|
||||
Wrapper around REST framework get_view_description() to support
|
||||
get_description() method and view_description property on a view class.
|
||||
'''
|
||||
if hasattr(cls, 'get_description') and callable(cls.get_description):
|
||||
desc = cls().get_description(request, html=html)
|
||||
cls = type(cls.__name__, (object,), {'__doc__': desc})
|
||||
elif hasattr(cls, 'view_description'):
|
||||
if callable(cls.view_description):
|
||||
view_desc = cls.view_description()
|
||||
else:
|
||||
view_desc = cls.view_description
|
||||
cls = type(cls.__name__, (object,), {'__doc__': view_desc})
|
||||
desc = views.get_view_description(cls, html=html)
|
||||
desc = views.get_view_description(view, html=html)
|
||||
if html:
|
||||
desc = '<div class="description">%s</div>' % desc
|
||||
return mark_safe(desc)
|
||||
@@ -254,14 +237,6 @@ class APIView(views.APIView):
|
||||
# `curl https://user:pass@tower.example.org/api/v2/job_templates/N/launch/`
|
||||
return 'Bearer realm=api authorization_url=/api/o/authorize/'
|
||||
|
||||
def get_view_description(self, html=False):
|
||||
"""
|
||||
Return some descriptive text for the view, as used in OPTIONS responses
|
||||
and in the browsable API.
|
||||
"""
|
||||
func = self.settings.VIEW_DESCRIPTION_FUNCTION
|
||||
return func(self.__class__, getattr(self, '_request', None), html)
|
||||
|
||||
def get_description_context(self):
|
||||
return {
|
||||
'view': self,
|
||||
@@ -270,20 +245,14 @@ class APIView(views.APIView):
|
||||
'swagger_method': getattr(self.request, 'swagger_method', None),
|
||||
}
|
||||
|
||||
def get_description(self, request, html=False):
|
||||
self.request = request
|
||||
@property
|
||||
def description(self):
|
||||
template_list = []
|
||||
for klass in inspect.getmro(type(self)):
|
||||
template_basename = camelcase_to_underscore(klass.__name__)
|
||||
template_list.append('api/%s.md' % template_basename)
|
||||
context = self.get_description_context()
|
||||
|
||||
# "v2" -> 2
|
||||
default_version = int(settings.REST_FRAMEWORK['DEFAULT_VERSION'].lstrip('v'))
|
||||
request_version = get_request_version(self.request)
|
||||
if request_version is not None and request_version < default_version:
|
||||
context['deprecated'] = True
|
||||
|
||||
description = render_to_string(template_list, context)
|
||||
if context.get('deprecated') and context.get('swagger_method') is None:
|
||||
# render deprecation messages at the very top
|
||||
@@ -379,12 +348,14 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
'model_verbose_name_plural': smart_text(self.model._meta.verbose_name_plural),
|
||||
})
|
||||
serializer = self.get_serializer()
|
||||
metadata = self.metadata_class()
|
||||
metadata.request = self.request
|
||||
for method, key in [
|
||||
('GET', 'serializer_fields'),
|
||||
('POST', 'serializer_create_fields'),
|
||||
('PUT', 'serializer_update_fields')
|
||||
]:
|
||||
d[key] = self.metadata_class().get_serializer_info(serializer, method=method)
|
||||
d[key] = metadata.get_serializer_info(serializer, method=method)
|
||||
d['settings'] = settings
|
||||
return d
|
||||
|
||||
@@ -805,6 +776,7 @@ class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
|
||||
class ResourceAccessList(ParentMixin, ListAPIView):
|
||||
|
||||
serializer_class = ResourceAccessListElementSerializer
|
||||
ordering = ('username',)
|
||||
|
||||
def get_queryset(self):
|
||||
obj = self.get_parent_object()
|
||||
@@ -831,10 +803,6 @@ class CopyAPIView(GenericAPIView):
|
||||
new_in_330 = True
|
||||
new_in_api_v2 = True
|
||||
|
||||
def v1_not_allowed(self):
|
||||
return Response({'detail': 'Action only possible starting with v2 API.'},
|
||||
status=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
def _get_copy_return_serializer(self, *args, **kwargs):
|
||||
if not self.copy_return_serializer_class:
|
||||
return self.get_serializer(*args, **kwargs)
|
||||
@@ -848,15 +816,15 @@ class CopyAPIView(GenericAPIView):
|
||||
def _decrypt_model_field_if_needed(obj, field_name, field_val):
|
||||
if field_name in getattr(type(obj), 'REENCRYPTION_BLACKLIST_AT_COPY', []):
|
||||
return field_val
|
||||
if isinstance(field_val, dict):
|
||||
if isinstance(obj, Credential) and field_name == 'inputs':
|
||||
for secret in obj.credential_type.secret_fields:
|
||||
if secret in field_val:
|
||||
field_val[secret] = decrypt_field(obj, secret)
|
||||
elif isinstance(field_val, dict):
|
||||
for sub_field in field_val:
|
||||
if isinstance(sub_field, str) \
|
||||
and isinstance(field_val[sub_field], str):
|
||||
try:
|
||||
field_val[sub_field] = decrypt_field(obj, field_name, sub_field)
|
||||
except AttributeError:
|
||||
# Catching the corner case with v1 credential fields
|
||||
field_val[sub_field] = decrypt_field(obj, sub_field)
|
||||
field_val[sub_field] = decrypt_field(obj, field_name, sub_field)
|
||||
elif isinstance(field_val, str):
|
||||
try:
|
||||
field_val = decrypt_field(obj, field_name)
|
||||
@@ -941,21 +909,20 @@ class CopyAPIView(GenericAPIView):
|
||||
return ret
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
if get_request_version(request) < 2:
|
||||
return self.v1_not_allowed()
|
||||
obj = self.get_object()
|
||||
if not request.user.can_access(obj.__class__, 'read', obj):
|
||||
raise PermissionDenied()
|
||||
create_kwargs = self._build_create_dict(obj)
|
||||
for key in create_kwargs:
|
||||
create_kwargs[key] = getattr(create_kwargs[key], 'pk', None) or create_kwargs[key]
|
||||
can_copy = request.user.can_access(self.model, 'add', create_kwargs) and \
|
||||
request.user.can_access(self.model, 'copy_related', obj)
|
||||
try:
|
||||
can_copy = request.user.can_access(self.model, 'add', create_kwargs) and \
|
||||
request.user.can_access(self.model, 'copy_related', obj)
|
||||
except PermissionDenied:
|
||||
return Response({'can_copy': False})
|
||||
return Response({'can_copy': can_copy})
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
if get_request_version(request) < 2:
|
||||
return self.v1_not_allowed()
|
||||
obj = self.get_object()
|
||||
create_kwargs = self._build_create_dict(obj)
|
||||
create_kwargs_check = {}
|
||||
@@ -972,7 +939,7 @@ class CopyAPIView(GenericAPIView):
|
||||
None, None, self.model, obj, request.user, create_kwargs=create_kwargs,
|
||||
copy_name=serializer.validated_data.get('name', '')
|
||||
)
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role:
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
permission_check_func = None
|
||||
|
||||
@@ -232,28 +232,13 @@ class RoleMetadata(Metadata):
|
||||
return metadata
|
||||
|
||||
|
||||
# TODO: Tower 3.3 remove class and all uses in views.py when API v1 is removed
|
||||
class JobTypeMetadata(Metadata):
|
||||
def get_field_info(self, field):
|
||||
res = super(JobTypeMetadata, self).get_field_info(field)
|
||||
|
||||
if field.field_name == 'job_type':
|
||||
index = 0
|
||||
for choice in res['choices']:
|
||||
if choice[0] == 'scan':
|
||||
res['choices'].pop(index)
|
||||
break
|
||||
index += 1
|
||||
return res
|
||||
|
||||
|
||||
class SublistAttachDetatchMetadata(Metadata):
|
||||
|
||||
def determine_actions(self, request, view):
|
||||
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
|
||||
method = 'POST'
|
||||
if method in actions:
|
||||
for field in actions[method]:
|
||||
for field in list(actions[method].keys()):
|
||||
if field == 'id':
|
||||
continue
|
||||
actions[method].pop(field)
|
||||
|
||||
15
awx/api/metrics.py
Normal file
15
awx/api/metrics.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import (
|
||||
MetricsView
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', MetricsView.as_view(), name='metrics_view'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -18,7 +18,7 @@ class Pagination(pagination.PageNumberPagination):
|
||||
url = self.request and self.request.get_full_path() or ''
|
||||
url = url.encode('utf-8')
|
||||
page_number = self.page.next_page_number()
|
||||
return replace_query_param(url, self.page_query_param, page_number)
|
||||
return replace_query_param(self.cap_page_size(url), self.page_query_param, page_number)
|
||||
|
||||
def get_previous_link(self):
|
||||
if not self.page.has_previous():
|
||||
@@ -26,4 +26,16 @@ class Pagination(pagination.PageNumberPagination):
|
||||
url = self.request and self.request.get_full_path() or ''
|
||||
url = url.encode('utf-8')
|
||||
page_number = self.page.previous_page_number()
|
||||
return replace_query_param(url, self.page_query_param, page_number)
|
||||
return replace_query_param(self.cap_page_size(url), self.page_query_param, page_number)
|
||||
|
||||
def cap_page_size(self, url):
|
||||
if int(self.request.query_params.get(self.page_size_query_param, 0)) > self.max_page_size:
|
||||
url = replace_query_param(url, self.page_size_query_param, self.max_page_size)
|
||||
return url
|
||||
|
||||
def get_html_context(self):
|
||||
context = super().get_html_context()
|
||||
context['page_links'] = [pl._replace(url=self.cap_page_size(pl.url))
|
||||
for pl in context['page_links']]
|
||||
|
||||
return context
|
||||
|
||||
@@ -9,13 +9,13 @@ from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
|
||||
from rest_framework import permissions
|
||||
|
||||
# AWX
|
||||
from awx.main.access import * # noqa
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.access import check_user_access
|
||||
from awx.main.models import Inventory, UnifiedJob
|
||||
from awx.main.utils import get_object_or_400
|
||||
|
||||
logger = logging.getLogger('awx.api.permissions')
|
||||
|
||||
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission',
|
||||
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission', 'VariableDataPermission',
|
||||
'TaskPermission', 'ProjectUpdatePermission', 'InventoryInventorySourcesUpdatePermission',
|
||||
'UserPermission', 'IsSuperUser', 'InstanceGroupTowerPermission',]
|
||||
|
||||
@@ -74,12 +74,8 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
# FIXME: For some reason this needs to return True
|
||||
# because it is first called with obj=None?
|
||||
return True
|
||||
if getattr(view, 'is_variable_data', False):
|
||||
return check_user_access(request.user, view.model, 'change', obj,
|
||||
dict(variables=request.data))
|
||||
else:
|
||||
return check_user_access(request.user, view.model, 'change', obj,
|
||||
request.data)
|
||||
return check_user_access(request.user, view.model, 'change', obj,
|
||||
request.data)
|
||||
|
||||
def check_patch_permissions(self, request, view, obj=None):
|
||||
return self.check_put_permissions(request, view, obj)
|
||||
@@ -103,8 +99,7 @@ class ModelAccessPermission(permissions.BasePermission):
|
||||
return False
|
||||
|
||||
# Always allow superusers
|
||||
if getattr(view, 'always_allow_superuser', True) and request.user.is_superuser \
|
||||
and not hasattr(request.user, 'oauth_scopes'):
|
||||
if getattr(view, 'always_allow_superuser', True) and request.user.is_superuser:
|
||||
return True
|
||||
|
||||
# Check if view supports the request method before checking permission
|
||||
@@ -164,6 +159,15 @@ class JobTemplateCallbackPermission(ModelAccessPermission):
|
||||
return True
|
||||
|
||||
|
||||
class VariableDataPermission(ModelAccessPermission):
|
||||
|
||||
def check_put_permissions(self, request, view, obj=None):
|
||||
if not obj:
|
||||
return True
|
||||
return check_user_access(request.user, view.model, 'change', obj,
|
||||
dict(variables=request.data))
|
||||
|
||||
|
||||
class TaskPermission(ModelAccessPermission):
|
||||
'''
|
||||
Permission checks used for API callbacks from running a task.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,17 @@ from rest_framework.views import APIView
|
||||
from rest_framework_swagger import renderers
|
||||
|
||||
|
||||
class SuperUserSchemaGenerator(SchemaGenerator):
|
||||
|
||||
def has_view_permissions(self, path, method, view):
|
||||
#
|
||||
# Generate the Swagger schema as if you were a superuser and
|
||||
# permissions didn't matter; this short-circuits the schema path
|
||||
# discovery to include _all_ potential paths in the API.
|
||||
#
|
||||
return True
|
||||
|
||||
|
||||
class AutoSchema(DRFAuthSchema):
|
||||
|
||||
def get_link(self, path, method, base_url):
|
||||
@@ -42,7 +53,6 @@ class AutoSchema(DRFAuthSchema):
|
||||
return link
|
||||
|
||||
def get_description(self, path, method):
|
||||
self.view._request = self.view.request
|
||||
setattr(self.view.request, 'swagger_method', method)
|
||||
description = super(AutoSchema, self).get_description(path, method)
|
||||
return description
|
||||
@@ -59,7 +69,7 @@ class SwaggerSchemaView(APIView):
|
||||
]
|
||||
|
||||
def get(self, request):
|
||||
generator = SchemaGenerator(
|
||||
generator = SuperUserSchemaGenerator(
|
||||
title='Ansible Tower API',
|
||||
patterns=None,
|
||||
urlconf=None
|
||||
|
||||
@@ -5,7 +5,7 @@ The following lists the expected format and details of our rrules:
|
||||
* INTERVAL is required
|
||||
* SECONDLY is not supported
|
||||
* TZID is not supported
|
||||
* RRULE must preceed the rule statements
|
||||
* RRULE must precede the rule statements
|
||||
* BYDAY is supported but not BYDAY with a numerical prefix
|
||||
* BYYEARDAY and BYWEEKNO are not supported
|
||||
* Only one rrule statement per schedule is supported
|
||||
|
||||
@@ -29,17 +29,6 @@ to the redirect_uri specified in the application. The client application will th
|
||||
AWX will respond with the `access_token`, `token_type`, `refresh_token`, and `expires_in`. For more
|
||||
information on testing this flow, refer to [django-oauth-toolkit](http://django-oauth-toolkit.readthedocs.io/en/latest/tutorial/tutorial_01.html#test-your-authorization-server).
|
||||
|
||||
## Create Token for an Application using Implicit grant type
|
||||
Suppose we have an application "admin's app" of grant type `implicit`.
|
||||
In API browser, first make sure the user is logged in via session auth, then visit authorization
|
||||
endpoint with given parameters:
|
||||
```text
|
||||
http://localhost:8013/api/o/authorize/?response_type=token&client_id=L0uQQWW8pKX51hoqIRQGsuqmIdPi2AcXZ9EJRGmj&scope=read
|
||||
```
|
||||
Here the value of `client_id` should be the same as that of `client_id` field of underlying application.
|
||||
On success, an authorization page should be displayed asking the logged in user to grant/deny the access token.
|
||||
Once the user clicks on 'grant', the API browser will try POSTing to the same endpoint with the same parameters
|
||||
in POST body, on success a 302 redirect will be returned.
|
||||
|
||||
## Create Token for an Application using Password grant type
|
||||
|
||||
@@ -56,6 +45,7 @@ For example:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "grant_type=password&username=<username>&password=<password>&scope=read" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569e
|
||||
IaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
@@ -85,6 +75,7 @@ format:
|
||||
The `/api/o/token/` endpoint is used for refreshing access token:
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "grant_type=refresh_token&refresh_token=AL0NK9TTpv0qp54dGbC4VUZtsZ9r8z" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://localhost:8013/api/o/token/ -i
|
||||
@@ -114,6 +105,7 @@ Revoking is done by POSTing to `/api/o/revoke_token/` with the token to revoke a
|
||||
|
||||
```bash
|
||||
curl -X POST -d "token=rQONsve372fQwuc2pn76k3IHDCYpi7" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-u "gwSPoasWSdNkMDtBN3Hu2WYQpPWCO9SwUEsKK22l:fI6ZpfocHYBGfm1tP92r0yIgCyfRdDQt0Tos9L8a4fNsJjQQMwp9569eIaUBsaVDgt2eiwOGe0bg5m5vCSstClZmtdy359RVx2rQK5YlIWyPlrolpt2LEpVeKXWaiybo" \
|
||||
http://localhost:8013/api/o/revoke_token/ -i
|
||||
```
|
||||
|
||||
@@ -8,15 +8,15 @@ job template.
|
||||
|
||||
For example, using curl:
|
||||
|
||||
curl -H "Content-Type: application/json" -d '{"host_config_key": "HOST_CONFIG_KEY"}' http://server/api/v1/job_templates/N/callback/
|
||||
curl -H "Content-Type: application/json" -d '{"host_config_key": "HOST_CONFIG_KEY"}' http://server/api/v2/job_templates/N/callback/
|
||||
|
||||
Or using wget:
|
||||
|
||||
wget -O /dev/null --post-data='{"host_config_key": "HOST_CONFIG_KEY"}' --header=Content-Type:application/json http://server/api/v1/job_templates/N/callback/
|
||||
wget -O /dev/null --post-data='{"host_config_key": "HOST_CONFIG_KEY"}' --header=Content-Type:application/json http://server/api/v2/job_templates/N/callback/
|
||||
|
||||
You may also pass `extra_vars` to the callback:
|
||||
|
||||
curl -H "Content-Type: application/json" -d '{"host_config_key": "HOST_CONFIG_KEY", "extra_vars": {"key": "value"}}' http://server/api/v1/job_templates/N/callback/
|
||||
curl -H "Content-Type: application/json" -d '{"host_config_key": "HOST_CONFIG_KEY", "extra_vars": {"key": "value"}}' http://server/api/v2/job_templates/N/callback/
|
||||
|
||||
The response will return status 202 if the request is valid, 403 for an
|
||||
invalid host config key, or 400 if the host cannot be determined from the
|
||||
@@ -30,7 +30,7 @@ A GET request may be used to verify that the correct host will be selected.
|
||||
This request must authenticate as a valid user with permission to edit the
|
||||
job template. For example:
|
||||
|
||||
curl http://user:password@server/api/v1/job_templates/N/callback/
|
||||
curl http://user:password@server/api/v2/job_templates/N/callback/
|
||||
|
||||
The response will include the host config key as well as the host name(s)
|
||||
that would match the request:
|
||||
|
||||
@@ -3,7 +3,7 @@ Launch a Job Template:
|
||||
Make a POST request to this resource to launch the system job template.
|
||||
|
||||
Variables specified inside of the parameter `extra_vars` are passed to the
|
||||
system job task as command line parameters. These tasks can be ran manually
|
||||
system job task as command line parameters. These tasks can be run manually
|
||||
on the host system via the `awx-manage` command.
|
||||
|
||||
For example on `cleanup_jobs` and `cleanup_activitystream`:
|
||||
|
||||
@@ -6,4 +6,4 @@ One result should be returned containing the following fields:
|
||||
|
||||
{% include "api/_result_fields_common.md" %}
|
||||
|
||||
Use the primary URL for the user (/api/v1/users/N/) to modify the user.
|
||||
Use the primary URL for the user (/api/v2/users/N/) to modify the user.
|
||||
|
||||
@@ -4,4 +4,7 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
from .urls import urlpatterns
|
||||
|
||||
__all__ = ['urlpatterns']
|
||||
__all__ = ['urlpatterns', 'app_name']
|
||||
|
||||
|
||||
app_name = 'api'
|
||||
|
||||
@@ -12,6 +12,8 @@ from awx.api.views import (
|
||||
CredentialOwnerUsersList,
|
||||
CredentialOwnerTeamsList,
|
||||
CredentialCopy,
|
||||
CredentialInputSourceSubList,
|
||||
CredentialExternalTest,
|
||||
)
|
||||
|
||||
|
||||
@@ -24,6 +26,8 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/owner_users/$', CredentialOwnerUsersList.as_view(), name='credential_owner_users_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/owner_teams/$', CredentialOwnerTeamsList.as_view(), name='credential_owner_teams_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/copy/$', CredentialCopy.as_view(), name='credential_copy'),
|
||||
url(r'^(?P<pk>[0-9]+)/input_sources/$', CredentialInputSourceSubList.as_view(), name='credential_input_source_sublist'),
|
||||
url(r'^(?P<pk>[0-9]+)/test/$', CredentialExternalTest.as_view(), name='credential_external_test'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
17
awx/api/urls/credential_input_source.py
Normal file
17
awx/api/urls/credential_input_source.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Copyright (c) 2019 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf.urls import url
|
||||
|
||||
from awx.api.views import (
|
||||
CredentialInputSourceDetail,
|
||||
CredentialInputSourceList,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
url(r'^$', CredentialInputSourceList.as_view(), name='credential_input_source_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', CredentialInputSourceDetail.as_view(), name='credential_input_source_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -8,6 +8,7 @@ from awx.api.views import (
|
||||
CredentialTypeDetail,
|
||||
CredentialTypeCredentialList,
|
||||
CredentialTypeActivityStreamList,
|
||||
CredentialTypeExternalTest,
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +17,7 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/$', CredentialTypeDetail.as_view(), name='credential_type_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/credentials/$', CredentialTypeCredentialList.as_view(), name='credential_type_credential_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/activity_stream/$', CredentialTypeActivityStreamList.as_view(), name='credential_type_activity_stream_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/test/$', CredentialTypeExternalTest.as_view(), name='credential_type_external_test'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -16,8 +16,6 @@ from awx.api.views import (
|
||||
HostSmartInventoriesList,
|
||||
HostAdHocCommandsList,
|
||||
HostAdHocCommandEventsList,
|
||||
HostFactVersionsList,
|
||||
HostFactCompareView,
|
||||
HostInsights,
|
||||
)
|
||||
|
||||
@@ -35,8 +33,6 @@ urls = [
|
||||
url(r'^(?P<pk>[0-9]+)/smart_inventories/$', HostSmartInventoriesList.as_view(), name='host_smart_inventories_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_commands/$', HostAdHocCommandsList.as_view(), name='host_ad_hoc_commands_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/ad_hoc_command_events/$', HostAdHocCommandEventsList.as_view(), name='host_ad_hoc_command_events_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/fact_versions/$', HostFactVersionsList.as_view(), name='host_fact_versions_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/fact_view/$', HostFactCompareView.as_view(), name='host_fact_compare_view'),
|
||||
url(r'^(?P<pk>[0-9]+)/insights/$', HostInsights.as_view(), name='host_insights'),
|
||||
]
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ from django.conf.urls import url
|
||||
from awx.api.views import (
|
||||
JobList,
|
||||
JobDetail,
|
||||
JobStart,
|
||||
JobCancel,
|
||||
JobRelaunch,
|
||||
JobCreateSchedule,
|
||||
@@ -23,7 +22,6 @@ from awx.api.views import (
|
||||
urls = [
|
||||
url(r'^$', JobList.as_view(), name='job_list'),
|
||||
url(r'^(?P<pk>[0-9]+)/$', JobDetail.as_view(), name='job_detail'),
|
||||
url(r'^(?P<pk>[0-9]+)/start/$', JobStart.as_view(), name='job_start'), # Todo: Remove In 3.3
|
||||
url(r'^(?P<pk>[0-9]+)/cancel/$', JobCancel.as_view(), name='job_cancel'),
|
||||
url(r'^(?P<pk>[0-9]+)/relaunch/$', JobRelaunch.as_view(), name='job_relaunch'),
|
||||
url(r'^(?P<pk>[0-9]+)/create_schedule/$', JobCreateSchedule.as_view(), name='job_create_schedule'),
|
||||
|
||||
@@ -11,10 +11,9 @@ from awx.api.generics import (
|
||||
)
|
||||
from awx.api.views import (
|
||||
ApiRootView,
|
||||
ApiV1RootView,
|
||||
ApiV2RootView,
|
||||
ApiV1PingView,
|
||||
ApiV1ConfigView,
|
||||
ApiV2PingView,
|
||||
ApiV2ConfigView,
|
||||
AuthView,
|
||||
UserMeList,
|
||||
DashboardView,
|
||||
@@ -34,6 +33,8 @@ from awx.api.views import (
|
||||
OAuth2ApplicationDetail,
|
||||
)
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
@@ -47,6 +48,7 @@ from .inventory_update import urls as inventory_update_urls
|
||||
from .inventory_script import urls as inventory_script_urls
|
||||
from .credential_type import urls as credential_type_urls
|
||||
from .credential import urls as credential_urls
|
||||
from .credential_input_source import urls as credential_input_source_urls
|
||||
from .role import urls as role_urls
|
||||
from .job_template import urls as job_template_urls
|
||||
from .job import urls as job_urls
|
||||
@@ -71,10 +73,25 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
|
||||
|
||||
v1_urls = [
|
||||
url(r'^$', ApiV1RootView.as_view(), name='api_v1_root_view'),
|
||||
url(r'^ping/$', ApiV1PingView.as_view(), name='api_v1_ping_view'),
|
||||
url(r'^config/$', ApiV1ConfigView.as_view(), name='api_v1_config_view'),
|
||||
v2_urls = [
|
||||
url(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
url(r'^credential_types/', include(credential_type_urls)),
|
||||
url(r'^credential_input_sources/', include(credential_input_source_urls)),
|
||||
url(r'^hosts/(?P<pk>[0-9]+)/ansible_facts/$', HostAnsibleFactsDetail.as_view(), name='host_ansible_facts_detail'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/extra_credentials/$', JobExtraCredentialsList.as_view(), name='job_extra_credentials_list'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
|
||||
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
|
||||
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
|
||||
url(r'^applications/$', OAuth2ApplicationList.as_view(), name='o_auth2_application_list'),
|
||||
url(r'^applications/(?P<pk>[0-9]+)/$', OAuth2ApplicationDetail.as_view(), name='o_auth2_application_detail'),
|
||||
url(r'^applications/(?P<pk>[0-9]+)/tokens/$', ApplicationOAuth2TokenList.as_view(), name='application_o_auth2_token_list'),
|
||||
url(r'^tokens/$', OAuth2TokenList.as_view(), name='o_auth2_token_list'),
|
||||
url(r'^', include(oauth2_urls)),
|
||||
url(r'^metrics/$', MetricsView.as_view(), name='metrics_view'),
|
||||
url(r'^ping/$', ApiV2PingView.as_view(), name='api_v2_ping_view'),
|
||||
url(r'^config/$', ApiV2ConfigView.as_view(), name='api_v2_config_view'),
|
||||
url(r'^auth/$', AuthView.as_view()),
|
||||
url(r'^me/$', UserMeList.as_view(), name='user_me_list'),
|
||||
url(r'^dashboard/$', DashboardView.as_view(), name='dashboard_view'),
|
||||
@@ -116,28 +133,10 @@ v1_urls = [
|
||||
url(r'^activity_stream/', include(activity_stream_urls)),
|
||||
]
|
||||
|
||||
v2_urls = [
|
||||
url(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
url(r'^credential_types/', include(credential_type_urls)),
|
||||
url(r'^hosts/(?P<pk>[0-9]+)/ansible_facts/$', HostAnsibleFactsDetail.as_view(), name='host_ansible_facts_detail'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/extra_credentials/$', JobExtraCredentialsList.as_view(), name='job_extra_credentials_list'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
|
||||
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
|
||||
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
|
||||
url(r'^applications/$', OAuth2ApplicationList.as_view(), name='o_auth2_application_list'),
|
||||
url(r'^applications/(?P<pk>[0-9]+)/$', OAuth2ApplicationDetail.as_view(), name='o_auth2_application_detail'),
|
||||
url(r'^applications/(?P<pk>[0-9]+)/tokens/$', ApplicationOAuth2TokenList.as_view(), name='application_o_auth2_token_list'),
|
||||
url(r'^tokens/$', OAuth2TokenList.as_view(), name='o_auth2_token_list'),
|
||||
url(r'^', include(oauth2_urls)),
|
||||
]
|
||||
|
||||
app_name = 'api'
|
||||
urlpatterns = [
|
||||
url(r'^$', ApiRootView.as_view(), name='api_root_view'),
|
||||
url(r'^(?P<version>(v2))/', include(v2_urls)),
|
||||
url(r'^(?P<version>(v1|v2))/', include(v1_urls)),
|
||||
url(r'^login/$', LoggedLoginView.as_view(
|
||||
template_name='rest_framework/login.html',
|
||||
extra_context={'inside_login_context': True}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.urlresolvers import NoReverseMatch
|
||||
from django.urls import NoReverseMatch
|
||||
|
||||
from rest_framework.reverse import _reverse
|
||||
from rest_framework.versioning import URLPathVersioning as BaseVersioning
|
||||
@@ -27,19 +27,6 @@ def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **e
|
||||
return url
|
||||
|
||||
|
||||
def get_request_version(request):
|
||||
"""
|
||||
The API version of a request as an integer i.e., 1 or 2
|
||||
"""
|
||||
version = settings.REST_FRAMEWORK['DEFAULT_VERSION']
|
||||
if request and hasattr(request, 'version'):
|
||||
version = request.version
|
||||
if version is None:
|
||||
# For requests to /api/
|
||||
return None
|
||||
return int(version.lstrip('v'))
|
||||
|
||||
|
||||
def reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
|
||||
if request is None or getattr(request, 'version', None) is None:
|
||||
# We need the "current request" to determine the correct version to
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,11 +44,9 @@ from awx.api.serializers import (
|
||||
InstanceGroupSerializer,
|
||||
InventoryUpdateEventSerializer,
|
||||
CustomInventoryScriptSerializer,
|
||||
InventoryDetailSerializer,
|
||||
JobTemplateSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
ActivityStreamEnforcementMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
ControlledByScmMixin,
|
||||
)
|
||||
@@ -62,7 +60,7 @@ class InventoryUpdateEventsList(SubListAPIView):
|
||||
serializer_class = InventoryUpdateEventSerializer
|
||||
parent_model = InventoryUpdate
|
||||
relationship = 'inventory_update_events'
|
||||
view_name = _('Inventory Update Events List')
|
||||
name = _('Inventory Update Events List')
|
||||
search_fields = ('stdout',)
|
||||
|
||||
def finalize_response(self, request, response, *args, **kwargs):
|
||||
@@ -116,17 +114,11 @@ class InventoryList(ListCreateAPIView):
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Inventory.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'read_role', 'update_role', 'use_role', 'adhoc_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by', 'organization')
|
||||
return qs
|
||||
|
||||
|
||||
class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
model = Inventory
|
||||
serializer_class = InventoryDetailSerializer
|
||||
serializer_class = InventorySerializer
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
@@ -149,7 +141,7 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, ControlledByScmMixin, Retri
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class InventoryActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
|
||||
class InventoryActivityStreamList(SubListAPIView):
|
||||
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
|
||||
40
awx/api/views/metrics.py
Normal file
40
awx/api/views/metrics.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
|
||||
# AWX
|
||||
# from awx.main.analytics import collectors
|
||||
from awx.main.analytics.metrics import metrics
|
||||
from awx.api import renderers
|
||||
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class MetricsView(APIView):
|
||||
|
||||
name = _('Metrics')
|
||||
swagger_topic = 'Metrics'
|
||||
|
||||
renderer_classes = [renderers.PlainTextRenderer,
|
||||
renderers.BrowsableAPIRenderer,]
|
||||
|
||||
def get(self, request, format='txt'):
|
||||
''' Show Metrics Details '''
|
||||
if (request.user.is_superuser or request.user.is_system_auditor):
|
||||
return Response(metrics().decode('UTF-8'))
|
||||
raise PermissionDenied()
|
||||
@@ -31,48 +31,11 @@ from awx.main.models.organization import Team
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.inventory import Inventory
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
from awx.conf.license import (
|
||||
feature_enabled,
|
||||
LicenseForbids,
|
||||
)
|
||||
from awx.api.exceptions import ActiveJobConflict
|
||||
|
||||
logger = logging.getLogger('awx.api.views.mixin')
|
||||
|
||||
|
||||
class ActivityStreamEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports activity streams.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(ActivityStreamEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('activity_streams'):
|
||||
raise LicenseForbids(_('Your license does not allow use of the activity stream.'))
|
||||
return ret
|
||||
|
||||
|
||||
class SystemTrackingEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports system tracking.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(SystemTrackingEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise LicenseForbids(_('Your license does not permit use of system tracking.'))
|
||||
return ret
|
||||
|
||||
|
||||
class WorkflowsEnforcementMixin(object):
|
||||
'''
|
||||
Mixin to check that license supports workflows.
|
||||
'''
|
||||
def check_permissions(self, request):
|
||||
ret = super(WorkflowsEnforcementMixin, self).check_permissions(request)
|
||||
if not feature_enabled('workflows') and request.method not in ('GET', 'OPTIONS', 'DELETE'):
|
||||
raise LicenseForbids(_('Your license does not allow use of workflows.'))
|
||||
return ret
|
||||
|
||||
|
||||
class UnifiedJobDeletionMixin(object):
|
||||
'''
|
||||
Special handling when deleting a running unified job object.
|
||||
|
||||
@@ -7,13 +7,8 @@ import logging
|
||||
# Django
|
||||
from django.db.models import Count
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.conf.license import (
|
||||
feature_enabled,
|
||||
LicenseForbids,
|
||||
)
|
||||
from awx.main.models import (
|
||||
ActivityStream,
|
||||
Inventory,
|
||||
@@ -50,7 +45,6 @@ from awx.api.serializers import (
|
||||
InstanceGroupSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import (
|
||||
ActivityStreamEnforcementMixin,
|
||||
RelatedJobsPreventDeleteMixin,
|
||||
OrganizationCountsMixin,
|
||||
)
|
||||
@@ -69,24 +63,6 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
"""Create a new organzation.
|
||||
|
||||
If there is already an organization and the license of this
|
||||
instance does not permit multiple organizations, then raise
|
||||
LicenseForbids.
|
||||
"""
|
||||
# Sanity check: If the multiple organizations feature is disallowed
|
||||
# by the license, then we are only willing to create this organization
|
||||
# if no organizations exist in the system.
|
||||
if (not feature_enabled('multiple_organizations') and
|
||||
self.model.objects.exists()):
|
||||
raise LicenseForbids(_('Your license only permits a single '
|
||||
'organization to exist.'))
|
||||
|
||||
# Okay, create the organization as usual.
|
||||
return super(OrganizationList, self).create(request, *args, **kwargs)
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
|
||||
@@ -140,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
|
||||
serializer_class = UserSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'member_role.members'
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class OrganizationAdminsList(BaseUsersList):
|
||||
@@ -148,6 +125,7 @@ class OrganizationAdminsList(BaseUsersList):
|
||||
serializer_class = UserSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'admin_role.members'
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
class OrganizationProjectsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -177,7 +155,7 @@ class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
class OrganizationActivityStreamList(ActivityStreamEnforcementMixin, SubListAPIView):
|
||||
class OrganizationActivityStreamList(SubListAPIView):
|
||||
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
@@ -244,4 +222,3 @@ class OrganizationObjectRolesList(SubListAPIView):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ from awx.main.utils import (
|
||||
get_custom_venv_choices,
|
||||
to_python_boolean,
|
||||
)
|
||||
from awx.api.versioning import reverse, get_request_version, drf_reverse
|
||||
from awx.conf.license import get_license, feature_enabled
|
||||
from awx.api.versioning import reverse, drf_reverse
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import (
|
||||
Project,
|
||||
@@ -42,7 +42,7 @@ logger = logging.getLogger('awx.api.views.root')
|
||||
class ApiRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
view_name = _('REST API')
|
||||
name = _('REST API')
|
||||
versioning_class = None
|
||||
swagger_topic = 'Versioning'
|
||||
|
||||
@@ -50,23 +50,21 @@ class ApiRootView(APIView):
|
||||
def get(self, request, format=None):
|
||||
''' List supported API versions '''
|
||||
|
||||
v1 = reverse('api:api_v1_root_view', kwargs={'version': 'v1'})
|
||||
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
|
||||
data = OrderedDict()
|
||||
data['description'] = _('AWX REST API')
|
||||
data['current_version'] = v2
|
||||
data['available_versions'] = dict(v1 = v1, v2 = v2)
|
||||
data['available_versions'] = dict(v2 = v2)
|
||||
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
|
||||
if feature_enabled('rebranding'):
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
return Response(data)
|
||||
|
||||
|
||||
class ApiOAuthAuthorizationRootView(APIView):
|
||||
|
||||
permission_classes = (AllowAny,)
|
||||
view_name = _("API OAuth 2 Authorization Root")
|
||||
name = _("API OAuth 2 Authorization Root")
|
||||
versioning_class = None
|
||||
swagger_topic = 'Authentication'
|
||||
|
||||
@@ -86,10 +84,10 @@ class ApiVersionRootView(APIView):
|
||||
def get(self, request, format=None):
|
||||
''' List top level resources '''
|
||||
data = OrderedDict()
|
||||
data['ping'] = reverse('api:api_v1_ping_view', request=request)
|
||||
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
||||
data['instances'] = reverse('api:instance_list', request=request)
|
||||
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
||||
data['config'] = reverse('api:api_v1_config_view', request=request)
|
||||
data['config'] = reverse('api:api_v2_config_view', request=request)
|
||||
data['settings'] = reverse('api:setting_category_list', request=request)
|
||||
data['me'] = reverse('api:user_me_list', request=request)
|
||||
data['dashboard'] = reverse('api:dashboard_view', request=request)
|
||||
@@ -99,10 +97,11 @@ class ApiVersionRootView(APIView):
|
||||
data['project_updates'] = reverse('api:project_update_list', request=request)
|
||||
data['teams'] = reverse('api:team_list', request=request)
|
||||
data['credentials'] = reverse('api:credential_list', request=request)
|
||||
if get_request_version(request) > 1:
|
||||
data['credential_types'] = reverse('api:credential_type_list', request=request)
|
||||
data['applications'] = reverse('api:o_auth2_application_list', request=request)
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['credential_types'] = reverse('api:credential_type_list', request=request)
|
||||
data['credential_input_sources'] = reverse('api:credential_input_source_list', request=request)
|
||||
data['applications'] = reverse('api:o_auth2_application_list', request=request)
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['inventory_scripts'] = reverse('api:inventory_script_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
@@ -130,21 +129,17 @@ class ApiVersionRootView(APIView):
|
||||
return Response(data)
|
||||
|
||||
|
||||
class ApiV1RootView(ApiVersionRootView):
|
||||
view_name = _('Version 1')
|
||||
|
||||
|
||||
class ApiV2RootView(ApiVersionRootView):
|
||||
view_name = _('Version 2')
|
||||
name = _('Version 2')
|
||||
|
||||
|
||||
class ApiV1PingView(APIView):
|
||||
class ApiV2PingView(APIView):
|
||||
"""A simple view that reports very basic information about this
|
||||
instance, which is acceptable to be public information.
|
||||
"""
|
||||
permission_classes = (AllowAny,)
|
||||
authentication_classes = ()
|
||||
view_name = _('Ping')
|
||||
name = _('Ping')
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def get(self, request, format=None):
|
||||
@@ -157,29 +152,30 @@ class ApiV1PingView(APIView):
|
||||
'ha': is_ha_environment(),
|
||||
'version': get_awx_version(),
|
||||
'active_node': settings.CLUSTER_HOST_ID,
|
||||
'install_uuid': settings.INSTALL_UUID,
|
||||
}
|
||||
|
||||
response['instances'] = []
|
||||
for instance in Instance.objects.all():
|
||||
response['instances'].append(dict(node=instance.hostname, heartbeat=instance.modified,
|
||||
response['instances'].append(dict(node=instance.hostname, uuid=instance.uuid, heartbeat=instance.modified,
|
||||
capacity=instance.capacity, version=instance.version))
|
||||
sorted(response['instances'], key=operator.itemgetter('node'))
|
||||
response['instance_groups'] = []
|
||||
for instance_group in InstanceGroup.objects.all():
|
||||
for instance_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
response['instance_groups'].append(dict(name=instance_group.name,
|
||||
capacity=instance_group.capacity,
|
||||
instances=[x.hostname for x in instance_group.instances.all()]))
|
||||
return Response(response)
|
||||
|
||||
|
||||
class ApiV1ConfigView(APIView):
|
||||
class ApiV2ConfigView(APIView):
|
||||
|
||||
permission_classes = (IsAuthenticated,)
|
||||
view_name = _('Configuration')
|
||||
name = _('Configuration')
|
||||
swagger_topic = 'System Configuration'
|
||||
|
||||
def check_permissions(self, request):
|
||||
super(ApiV1ConfigView, self).check_permissions(request)
|
||||
super(ApiV2ConfigView, self).check_permissions(request)
|
||||
if not request.user.is_superuser and request.method.lower() not in {'options', 'head', 'get'}:
|
||||
self.permission_denied(request) # Raises PermissionDenied exception.
|
||||
|
||||
@@ -211,7 +207,7 @@ class ApiV1ConfigView(APIView):
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
# names that are managed by LDAP and should be read-only for users with
|
||||
# a non-empty ldap_dn attribute.
|
||||
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None) and feature_enabled('ldap'):
|
||||
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
|
||||
user_ldap_fields = ['username', 'password']
|
||||
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
|
||||
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
|
||||
@@ -220,7 +216,8 @@ class ApiV1ConfigView(APIView):
|
||||
if request.user.is_superuser \
|
||||
or request.user.is_system_auditor \
|
||||
or Organization.accessible_objects(request.user, 'admin_role').exists() \
|
||||
or Organization.accessible_objects(request.user, 'auditor_role').exists():
|
||||
or Organization.accessible_objects(request.user, 'auditor_role').exists() \
|
||||
or Organization.accessible_objects(request.user, 'project_admin_role').exists():
|
||||
data.update(dict(
|
||||
project_base_dir = settings.PROJECTS_ROOT,
|
||||
project_local_paths = Project.get_local_path_choices(),
|
||||
@@ -276,6 +273,3 @@ class ApiV1ConfigView(APIView):
|
||||
except Exception:
|
||||
# FIX: Log
|
||||
return Response({"error": _("Failed to remove license.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -78,9 +78,6 @@ register(
|
||||
# the other settings change, the cached value for this setting will be
|
||||
# cleared to require it to be recomputed.
|
||||
depends_on=['ANSIBLE_COW_SELECTION'],
|
||||
# Optional; licensed feature required to be able to view or modify this
|
||||
# setting.
|
||||
feature_required='rebranding',
|
||||
# Optional; field is stored encrypted in the database and only $encrypted$
|
||||
# is returned via the API.
|
||||
encrypted=True,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Python
|
||||
import os
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
from collections import OrderedDict
|
||||
@@ -8,7 +9,10 @@ from django.core.validators import URLValidator
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import * # noqa
|
||||
from rest_framework.fields import ( # noqa
|
||||
BooleanField, CharField, ChoiceField, DictField, EmailField, IntegerField,
|
||||
ListField, NullBooleanField
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.conf.fields')
|
||||
|
||||
@@ -93,6 +97,26 @@ class StringListBooleanField(ListField):
|
||||
self.fail('type_error', input_type=type(data))
|
||||
|
||||
|
||||
class StringListPathField(StringListField):
|
||||
|
||||
default_error_messages = {
|
||||
'type_error': _('Expected list of strings but got {input_type} instead.'),
|
||||
'path_error': _('{path} is not a valid path choice.'),
|
||||
}
|
||||
|
||||
def to_internal_value(self, paths):
|
||||
if isinstance(paths, (list, tuple)):
|
||||
for p in paths:
|
||||
if not isinstance(p, str):
|
||||
self.fail('type_error', input_type=type(p))
|
||||
if not os.path.exists(p):
|
||||
self.fail('path_error', path=p)
|
||||
|
||||
return super(StringListPathField, self).to_internal_value(sorted({os.path.normpath(path) for path in paths}))
|
||||
else:
|
||||
self.fail('type_error', input_type=type(paths))
|
||||
|
||||
|
||||
class URLField(CharField):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
@@ -1,64 +1,19 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Django
|
||||
from django.core.signals import setting_changed
|
||||
from django.dispatch import receiver
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import APIException
|
||||
|
||||
# Tower
|
||||
from awx.main.utils.common import get_licenser
|
||||
from awx.main.utils import memoize, memoize_delete
|
||||
|
||||
__all__ = ['LicenseForbids', 'get_license', 'get_licensed_features',
|
||||
'feature_enabled', 'feature_exists']
|
||||
|
||||
|
||||
class LicenseForbids(APIException):
|
||||
status_code = 402
|
||||
default_detail = _('Your Tower license does not allow that.')
|
||||
__all__ = ['get_license']
|
||||
|
||||
|
||||
def _get_validated_license_data():
|
||||
return get_licenser().validate()
|
||||
|
||||
|
||||
@receiver(setting_changed)
|
||||
def _on_setting_changed(sender, **kwargs):
|
||||
# Clear cached result above when license changes.
|
||||
if kwargs.get('setting', None) == 'LICENSE':
|
||||
memoize_delete('feature_enabled')
|
||||
|
||||
|
||||
def get_license(show_key=False):
|
||||
"""Return a dictionary representing the active license on this Tower instance."""
|
||||
license_data = _get_validated_license_data()
|
||||
if not show_key:
|
||||
license_data.pop('license_key', None)
|
||||
return license_data
|
||||
|
||||
|
||||
def get_licensed_features():
|
||||
"""Return a set of all features enabled by the active license."""
|
||||
features = set()
|
||||
for feature, enabled in _get_validated_license_data().get('features', {}).items():
|
||||
if enabled:
|
||||
features.add(feature)
|
||||
return features
|
||||
|
||||
|
||||
@memoize(track_function=True)
|
||||
def feature_enabled(name):
|
||||
"""Return True if the requested feature is enabled, False otherwise."""
|
||||
validated_license_data = _get_validated_license_data()
|
||||
if validated_license_data.get('license_type', 'UNLICENSED') == 'open':
|
||||
return True
|
||||
return validated_license_data.get('features', {}).get(name, False)
|
||||
|
||||
|
||||
def feature_exists(name):
|
||||
"""Return True if the requested feature name exists, False otherwise."""
|
||||
return bool(name in _get_validated_license_data().get('features', {}))
|
||||
|
||||
@@ -22,7 +22,7 @@ def fill_ldap_group_type_params(apps, schema_editor):
|
||||
modified=now())
|
||||
|
||||
init_attrs = set(inspect.getargspec(group_type.__init__).args[1:])
|
||||
for k in group_type_params.keys():
|
||||
for k in list(group_type_params.keys()):
|
||||
if k not in init_attrs:
|
||||
del group_type_params[k]
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ class SettingsRegistry(object):
|
||||
def get_dependent_settings(self, setting):
|
||||
return self._dependent_settings.get(setting, set())
|
||||
|
||||
def get_registered_categories(self, features_enabled=None):
|
||||
def get_registered_categories(self):
|
||||
categories = {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
@@ -77,10 +77,6 @@ class SettingsRegistry(object):
|
||||
category_slug = kwargs.get('category_slug', None)
|
||||
if category_slug is None or category_slug in categories:
|
||||
continue
|
||||
if features_enabled is not None:
|
||||
feature_required = kwargs.get('feature_required', None)
|
||||
if feature_required and feature_required not in features_enabled:
|
||||
continue
|
||||
if category_slug == 'user':
|
||||
categories['user'] = _('User')
|
||||
categories['user-defaults'] = _('User-Defaults')
|
||||
@@ -88,7 +84,7 @@ class SettingsRegistry(object):
|
||||
categories[category_slug] = kwargs.get('category', None) or category_slug
|
||||
return categories
|
||||
|
||||
def get_registered_settings(self, category_slug=None, read_only=None, features_enabled=None, slugs_to_ignore=set()):
|
||||
def get_registered_settings(self, category_slug=None, read_only=None, slugs_to_ignore=set()):
|
||||
setting_names = []
|
||||
if category_slug == 'user-defaults':
|
||||
category_slug = 'user'
|
||||
@@ -100,14 +96,10 @@ class SettingsRegistry(object):
|
||||
if kwargs.get('category_slug', None) in slugs_to_ignore:
|
||||
continue
|
||||
if (read_only in {True, False} and kwargs.get('read_only', False) != read_only and
|
||||
setting not in ('AWX_ISOLATED_PRIVATE_KEY', 'AWX_ISOLATED_PUBLIC_KEY')):
|
||||
setting not in ('INSTALL_UUID', 'AWX_ISOLATED_PRIVATE_KEY', 'AWX_ISOLATED_PUBLIC_KEY')):
|
||||
# Note: Doesn't catch fields that set read_only via __init__;
|
||||
# read-only field kwargs should always include read_only=True.
|
||||
continue
|
||||
if features_enabled is not None:
|
||||
feature_required = kwargs.get('feature_required', None)
|
||||
if feature_required and feature_required not in features_enabled:
|
||||
continue
|
||||
setting_names.append(setting)
|
||||
return setting_names
|
||||
|
||||
@@ -135,7 +127,6 @@ class SettingsRegistry(object):
|
||||
category = field_kwargs.pop('category', None)
|
||||
depends_on = frozenset(field_kwargs.pop('depends_on', None) or [])
|
||||
placeholder = field_kwargs.pop('placeholder', empty)
|
||||
feature_required = field_kwargs.pop('feature_required', empty)
|
||||
encrypted = bool(field_kwargs.pop('encrypted', False))
|
||||
defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
|
||||
if getattr(field_kwargs.get('child', None), 'source', None) is not None:
|
||||
@@ -146,8 +137,6 @@ class SettingsRegistry(object):
|
||||
field_instance.depends_on = depends_on
|
||||
if placeholder is not empty:
|
||||
field_instance.placeholder = placeholder
|
||||
if feature_required is not empty:
|
||||
field_instance.feature_required = feature_required
|
||||
field_instance.defined_in_file = defined_in_file
|
||||
if field_instance.defined_in_file:
|
||||
field_instance.help_text = (
|
||||
|
||||
@@ -88,7 +88,7 @@ class SettingSingletonSerializer(serializers.Serializer):
|
||||
continue
|
||||
extra_kwargs = {}
|
||||
# Make LICENSE and AWX_ISOLATED_KEY_GENERATION read-only here;
|
||||
# LICENSE is only updated via /api/v1/config/
|
||||
# LICENSE is only updated via /api/v2/config/
|
||||
# AWX_ISOLATED_KEY_GENERATION is only set/unset via the setup playbook
|
||||
if key in ('LICENSE', 'AWX_ISOLATED_KEY_GENERATION'):
|
||||
extra_kwargs['read_only'] = True
|
||||
|
||||
@@ -24,7 +24,6 @@ from rest_framework.fields import empty, SkipField
|
||||
|
||||
# Tower
|
||||
from awx.main.utils import encrypt_field, decrypt_field
|
||||
from awx.main.utils.db import get_tower_migration_version
|
||||
from awx.conf import settings_registry
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.migrations._reencrypt import decrypt_field as old_decrypt_field
|
||||
@@ -90,45 +89,42 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
if 'migrate' in sys.argv and get_tower_migration_version() < '310':
|
||||
logger.info('Using default settings until version 3.1 migration.')
|
||||
else:
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
# We want the _full_ traceback with the context
|
||||
# First we get the current call stack, which constitutes the "top",
|
||||
# it has the context up to the point where the context manager is used
|
||||
top_stack = StringIO()
|
||||
traceback.print_stack(file=top_stack)
|
||||
top_lines = top_stack.getvalue().strip('\n').split('\n')
|
||||
top_stack.close()
|
||||
# Get "bottom" stack from the local error that happened
|
||||
# inside of the "with" block this wraps
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
bottom_stack = StringIO()
|
||||
traceback.print_tb(exc_traceback, file=bottom_stack)
|
||||
bottom_lines = bottom_stack.getvalue().strip('\n').split('\n')
|
||||
# Glue together top and bottom where overlap is found
|
||||
bottom_cutoff = 0
|
||||
for i, line in enumerate(bottom_lines):
|
||||
if line in top_lines:
|
||||
# start of overlapping section, take overlap from bottom
|
||||
top_lines = top_lines[:top_lines.index(line)]
|
||||
bottom_cutoff = i
|
||||
break
|
||||
bottom_lines = bottom_lines[bottom_cutoff:]
|
||||
tb_lines = top_lines + bottom_lines
|
||||
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.warning('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
else:
|
||||
logger.error('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
tb_string = '\n'.join(
|
||||
['Traceback (most recent call last):'] +
|
||||
tb_lines +
|
||||
['{}: {}'.format(exc_type.__name__, str(exc_value))]
|
||||
)
|
||||
bottom_stack.close()
|
||||
# Log the combined stack
|
||||
if trans_safe:
|
||||
if 'check_migrations' not in sys.argv:
|
||||
logger.debug('Database settings are not available, using defaults, error:\n{}'.format(tb_string))
|
||||
else:
|
||||
logger.debug('Error modifying something related to database settings.\n{}'.format(tb_string))
|
||||
finally:
|
||||
if trans_safe and is_atomic and rollback_set:
|
||||
transaction.set_rollback(rollback_set)
|
||||
@@ -381,8 +377,9 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting = None
|
||||
setting_id = None
|
||||
if not field.read_only or name in (
|
||||
# these two values are read-only - however - we *do* want
|
||||
# these values are read-only - however - we *do* want
|
||||
# to fetch their value from the database
|
||||
'INSTALL_UUID',
|
||||
'AWX_ISOLATED_PRIVATE_KEY',
|
||||
'AWX_ISOLATED_PUBLIC_KEY',
|
||||
):
|
||||
|
||||
@@ -2,7 +2,7 @@ import urllib.parse
|
||||
|
||||
import pytest
|
||||
|
||||
from django.core.urlresolvers import resolve
|
||||
from django.urls import resolve
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from rest_framework.test import (
|
||||
|
||||
@@ -65,41 +65,6 @@ def test_non_admin_user_does_not_see_categories(api_request, dummy_setting, norm
|
||||
assert not response.data['results']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch(
|
||||
'awx.conf.views.VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE',
|
||||
{
|
||||
1: set([]),
|
||||
2: set(['foobar']),
|
||||
}
|
||||
)
|
||||
def test_version_specific_category_slug_to_exclude_does_not_show_up(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
'FOO_BAR',
|
||||
field_class=fields.IntegerField,
|
||||
category='FooBar',
|
||||
category_slug='foobar'
|
||||
):
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v2'})
|
||||
)
|
||||
for item in response.data['results']:
|
||||
assert item['slug'] != 'foobar'
|
||||
response = api_request(
|
||||
'get',
|
||||
reverse('api:setting_category_list',
|
||||
kwargs={'version': 'v1'})
|
||||
)
|
||||
contains = False
|
||||
for item in response.data['results']:
|
||||
if item['slug'] != 'foobar':
|
||||
contains = True
|
||||
break
|
||||
assert contains
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_detail_retrieve(api_request, dummy_setting):
|
||||
with dummy_setting(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from rest_framework.fields import ValidationError
|
||||
from awx.conf.fields import StringListBooleanField, ListTuplesField
|
||||
from awx.conf.fields import StringListBooleanField, StringListPathField, ListTuplesField
|
||||
|
||||
|
||||
class TestStringListBooleanField():
|
||||
@@ -84,3 +84,49 @@ class TestListTuplesField():
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " \
|
||||
"but got {} instead.".format(t)
|
||||
|
||||
|
||||
class TestStringListPathField():
|
||||
|
||||
FIELD_VALUES = [
|
||||
((".", "..", "/"), [".", "..", "/"]),
|
||||
(("/home",), ["/home"]),
|
||||
(("///home///",), ["/home"]),
|
||||
(("/home/././././",), ["/home"]),
|
||||
(("/home", "/home", "/home/"), ["/home"]),
|
||||
(["/home/", "/home/", "/opt/", "/opt/", "/var/"], ["/home", "/opt", "/var"])
|
||||
]
|
||||
|
||||
FIELD_VALUES_INVALID_TYPE = [
|
||||
1.245,
|
||||
{"a": "b"},
|
||||
("/home"),
|
||||
]
|
||||
|
||||
FIELD_VALUES_INVALID_PATH = [
|
||||
"",
|
||||
"~/",
|
||||
"home",
|
||||
"/invalid_path",
|
||||
"/home/invalid_path",
|
||||
]
|
||||
|
||||
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
|
||||
def test_to_internal_value_valid(self, value_in, value_known):
|
||||
field = StringListPathField()
|
||||
v = field.to_internal_value(value_in)
|
||||
assert v == value_known
|
||||
|
||||
@pytest.mark.parametrize("value", FIELD_VALUES_INVALID_TYPE)
|
||||
def test_to_internal_value_invalid_type(self, value):
|
||||
field = StringListPathField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected list of strings but got {} instead.".format(type(value))
|
||||
|
||||
@pytest.mark.parametrize("value", FIELD_VALUES_INVALID_PATH)
|
||||
def test_to_internal_value_invalid_path(self, value):
|
||||
field = StringListPathField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value([value])
|
||||
assert e.value.detail[0] == "{} is not a valid path choice.".format(value)
|
||||
|
||||
|
||||
@@ -119,20 +119,6 @@ def test_get_registered_read_only_settings(reg):
|
||||
]
|
||||
|
||||
|
||||
def test_get_registered_settings_with_required_features(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='superpowers',
|
||||
)
|
||||
assert reg.get_registered_settings(features_enabled=[]) == []
|
||||
assert reg.get_registered_settings(features_enabled=['superpowers']) == [
|
||||
'AWX_SOME_SETTING_ENABLED'
|
||||
]
|
||||
|
||||
|
||||
def test_get_dependent_settings(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
@@ -173,45 +159,6 @@ def test_get_registered_categories(reg):
|
||||
}
|
||||
|
||||
|
||||
def test_get_registered_categories_with_required_features(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='superpowers'
|
||||
)
|
||||
reg.register(
|
||||
'AWX_SOME_OTHER_SETTING_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
category=_('OtherSystem'),
|
||||
category_slug='other-system',
|
||||
feature_required='sortapowers'
|
||||
)
|
||||
assert reg.get_registered_categories(features_enabled=[]) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
}
|
||||
assert reg.get_registered_categories(features_enabled=['superpowers']) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'system': _('System'),
|
||||
}
|
||||
assert reg.get_registered_categories(features_enabled=['sortapowers']) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'other-system': _('OtherSystem'),
|
||||
}
|
||||
assert reg.get_registered_categories(
|
||||
features_enabled=['superpowers', 'sortapowers']
|
||||
) == {
|
||||
'all': _('All'),
|
||||
'changed': _('Changed'),
|
||||
'system': _('System'),
|
||||
'other-system': _('OtherSystem'),
|
||||
}
|
||||
|
||||
|
||||
def test_is_setting_encrypted(reg):
|
||||
reg.register(
|
||||
'AWX_SOME_SETTING_ENABLED',
|
||||
@@ -237,7 +184,6 @@ def test_simple_field(reg):
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
placeholder='Example Value',
|
||||
feature_required='superpowers'
|
||||
)
|
||||
|
||||
field = reg.get_setting_field('AWX_SOME_SETTING')
|
||||
@@ -246,7 +192,6 @@ def test_simple_field(reg):
|
||||
assert field.category_slug == 'system'
|
||||
assert field.default is empty
|
||||
assert field.placeholder == 'Example Value'
|
||||
assert field.feature_required == 'superpowers'
|
||||
|
||||
|
||||
def test_field_with_custom_attribute(reg):
|
||||
|
||||
@@ -1,108 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python
|
||||
import difflib
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# AWX
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
__all__ = ['comment_assignments', 'conf_to_dict']
|
||||
|
||||
|
||||
def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'):
|
||||
if isinstance(patterns, str):
|
||||
patterns = [patterns]
|
||||
diffs = []
|
||||
for pattern in patterns:
|
||||
for filename in sorted(glob.glob(pattern)):
|
||||
filename = os.path.abspath(os.path.normpath(filename))
|
||||
if backup_suffix:
|
||||
backup_filename = '{}{}'.format(filename, backup_suffix)
|
||||
else:
|
||||
backup_filename = None
|
||||
diff = comment_assignments_in_file(filename, assignment_names, dry_run, backup_filename)
|
||||
if diff:
|
||||
diffs.append(diff)
|
||||
return diffs
|
||||
|
||||
|
||||
def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None):
|
||||
from redbaron import RedBaron, indent
|
||||
|
||||
if isinstance(assignment_names, str):
|
||||
assignment_names = [assignment_names]
|
||||
else:
|
||||
assignment_names = assignment_names[:]
|
||||
current_file_data = open(filename).read()
|
||||
|
||||
for assignment_name in assignment_names[:]:
|
||||
if assignment_name in current_file_data:
|
||||
continue
|
||||
if assignment_name in assignment_names:
|
||||
assignment_names.remove(assignment_name)
|
||||
if not assignment_names:
|
||||
return ''
|
||||
|
||||
replace_lines = {}
|
||||
rb = RedBaron(current_file_data)
|
||||
for assignment_node in rb.find_all('assignment'):
|
||||
for assignment_name in assignment_names:
|
||||
|
||||
# Only target direct assignments to a variable.
|
||||
name_node = assignment_node.find('name', value=assignment_name)
|
||||
if not name_node:
|
||||
continue
|
||||
if assignment_node.target.type != 'name':
|
||||
continue
|
||||
|
||||
# Build a new node that comments out the existing assignment node.
|
||||
indentation = '{}# '.format(assignment_node.indentation or '')
|
||||
new_node_content = indent(assignment_node.dumps(), indentation)
|
||||
new_node_lines = new_node_content.splitlines()
|
||||
# Add a pass statement in case the assignment block is the only
|
||||
# child in a parent code block to prevent a syntax error.
|
||||
if assignment_node.indentation:
|
||||
new_node_lines[0] = new_node_lines[0].replace(indentation, '{}pass # '.format(assignment_node.indentation or ''), 1)
|
||||
new_node_lines[0] = '{0}This setting is now configured via the Tower API.\n{1}'.format(indentation, new_node_lines[0])
|
||||
|
||||
# Store new node lines in dictionary to be replaced in file.
|
||||
start_lineno = assignment_node.absolute_bounding_box.top_left.line
|
||||
end_lineno = assignment_node.absolute_bounding_box.bottom_right.line
|
||||
for n, new_node_line in enumerate(new_node_lines):
|
||||
new_lineno = start_lineno + n
|
||||
assert new_lineno <= end_lineno
|
||||
replace_lines[new_lineno] = new_node_line
|
||||
|
||||
if not replace_lines:
|
||||
return ''
|
||||
|
||||
# Iterate through all lines in current file and replace as needed.
|
||||
current_file_lines = current_file_data.splitlines()
|
||||
new_file_lines = []
|
||||
for n, line in enumerate(current_file_lines):
|
||||
new_file_lines.append(replace_lines.get(n + 1, line))
|
||||
new_file_data = '\n'.join(new_file_lines)
|
||||
new_file_lines = new_file_data.splitlines()
|
||||
|
||||
# If changed, syntax check and write the new file; return a diff of changes.
|
||||
diff_lines = []
|
||||
if new_file_data != current_file_data:
|
||||
compile(new_file_data, filename, 'exec')
|
||||
if backup_filename:
|
||||
from_file = backup_filename
|
||||
else:
|
||||
from_file = '{}.old'.format(filename)
|
||||
to_file = filename
|
||||
diff_lines = list(difflib.unified_diff(current_file_lines, new_file_lines, fromfile=from_file, tofile=to_file, lineterm=''))
|
||||
if not dry_run:
|
||||
if backup_filename:
|
||||
shutil.copy2(filename, backup_filename)
|
||||
with open(filename, 'w') as fileobj:
|
||||
fileobj.write(new_file_data)
|
||||
return '\n'.join(diff_lines)
|
||||
__all__ = ['conf_to_dict']
|
||||
|
||||
|
||||
def conf_to_dict(obj):
|
||||
@@ -110,10 +11,3 @@ def conf_to_dict(obj):
|
||||
'category': settings_registry.get_setting_category(obj.key),
|
||||
'name': obj.key,
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py')
|
||||
diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP'])
|
||||
for diff in diffs:
|
||||
print(diff)
|
||||
|
||||
@@ -17,13 +17,17 @@ from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
|
||||
# Tower
|
||||
from awx.api.generics import * # noqa
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
GenericAPIView,
|
||||
ListAPIView,
|
||||
RetrieveUpdateDestroyAPIView,
|
||||
)
|
||||
from awx.api.permissions import IsSuperUser
|
||||
from awx.api.versioning import reverse, get_request_version
|
||||
from awx.main.utils import * # noqa
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.main.tasks import handle_setting_changes
|
||||
from awx.conf.license import get_licensed_features
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
@@ -31,24 +35,17 @@ from awx.conf import settings_registry
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
|
||||
VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE = {
|
||||
1: set([
|
||||
'named-url',
|
||||
]),
|
||||
2: set([]),
|
||||
}
|
||||
|
||||
|
||||
class SettingCategoryList(ListAPIView):
|
||||
|
||||
model = Setting # Not exactly, but needed for the view.
|
||||
serializer_class = SettingCategorySerializer
|
||||
filter_backends = []
|
||||
view_name = _('Setting Categories')
|
||||
name = _('Setting Categories')
|
||||
|
||||
def get_queryset(self):
|
||||
setting_categories = []
|
||||
categories = settings_registry.get_registered_categories(features_enabled=get_licensed_features())
|
||||
categories = settings_registry.get_registered_categories()
|
||||
if self.request.user.is_superuser or self.request.user.is_system_auditor:
|
||||
pass # categories = categories
|
||||
elif 'user' in categories:
|
||||
@@ -56,8 +53,6 @@ class SettingCategoryList(ListAPIView):
|
||||
else:
|
||||
categories = {}
|
||||
for category_slug in sorted(categories.keys()):
|
||||
if category_slug in VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE[get_request_version(self.request)]:
|
||||
continue
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': category_slug}, request=self.request)
|
||||
setting_categories.append(SettingCategory(url, category_slug, categories[category_slug]))
|
||||
return setting_categories
|
||||
@@ -68,13 +63,11 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = Setting # Not exactly, but needed for the view.
|
||||
serializer_class = SettingSingletonSerializer
|
||||
filter_backends = []
|
||||
view_name = _('Setting Detail')
|
||||
name = _('Setting Detail')
|
||||
|
||||
def get_queryset(self):
|
||||
self.category_slug = self.kwargs.get('category_slug', 'all')
|
||||
all_category_slugs = list(settings_registry.get_registered_categories(features_enabled=get_licensed_features()).keys())
|
||||
for slug_to_delete in VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE[get_request_version(self.request)]:
|
||||
all_category_slugs.remove(slug_to_delete)
|
||||
all_category_slugs = list(settings_registry.get_registered_categories().keys())
|
||||
if self.request.user.is_superuser or getattr(self.request.user, 'is_system_auditor', False):
|
||||
category_slugs = all_category_slugs
|
||||
else:
|
||||
@@ -85,8 +78,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
raise PermissionDenied()
|
||||
|
||||
registered_settings = settings_registry.get_registered_settings(
|
||||
category_slug=self.category_slug, read_only=False, features_enabled=get_licensed_features(),
|
||||
slugs_to_ignore=VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE[get_request_version(self.request)]
|
||||
category_slug=self.category_slug, read_only=False,
|
||||
)
|
||||
if self.category_slug == 'user':
|
||||
return Setting.objects.filter(key__in=registered_settings, user=self.request.user)
|
||||
@@ -96,8 +88,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
def get_object(self):
|
||||
settings_qs = self.get_queryset()
|
||||
registered_settings = settings_registry.get_registered_settings(
|
||||
category_slug=self.category_slug, features_enabled=get_licensed_features(),
|
||||
slugs_to_ignore=VERSION_SPECIFIC_CATEGORIES_TO_EXCLUDE[get_request_version(self.request)]
|
||||
category_slug=self.category_slug,
|
||||
)
|
||||
all_settings = {}
|
||||
for setting in settings_qs:
|
||||
@@ -163,7 +154,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
|
||||
class SettingLoggingTest(GenericAPIView):
|
||||
|
||||
view_name = _('Logging Connectivity Test')
|
||||
name = _('Logging Connectivity Test')
|
||||
model = Setting
|
||||
serializer_class = SettingSingletonSerializer
|
||||
permission_classes = (IsSuperUser,)
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# AWX Display Callback
|
||||
from . import cleanup # noqa (registers control persistent cleanup)
|
||||
from . import display # noqa (wraps ansible.display.Display methods)
|
||||
from .module import AWXDefaultCallbackModule, AWXMinimalCallbackModule
|
||||
|
||||
__all__ = ['AWXDefaultCallbackModule', 'AWXMinimalCallbackModule']
|
||||
@@ -1,85 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import atexit
|
||||
import glob
|
||||
import os
|
||||
import pwd
|
||||
|
||||
# PSUtil
|
||||
try:
|
||||
import psutil
|
||||
except ImportError:
|
||||
raise ImportError('psutil is missing; {}bin/pip install psutil'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = []
|
||||
|
||||
main_pid = os.getpid()
|
||||
|
||||
|
||||
@atexit.register
|
||||
def terminate_ssh_control_masters():
|
||||
# Only run this cleanup from the main process.
|
||||
if os.getpid() != main_pid:
|
||||
return
|
||||
# Determine if control persist is being used and if any open sockets
|
||||
# exist after running the playbook.
|
||||
cp_path = os.environ.get('ANSIBLE_SSH_CONTROL_PATH', '')
|
||||
if not cp_path:
|
||||
return
|
||||
cp_dir = os.path.dirname(cp_path)
|
||||
if not os.path.exists(cp_dir):
|
||||
return
|
||||
cp_pattern = os.path.join(cp_dir, 'ansible-ssh-*')
|
||||
cp_files = glob.glob(cp_pattern)
|
||||
if not cp_files:
|
||||
return
|
||||
|
||||
# Attempt to find any running control master processes.
|
||||
username = pwd.getpwuid(os.getuid())[0]
|
||||
ssh_cm_procs = []
|
||||
for proc in psutil.process_iter():
|
||||
try:
|
||||
pname = proc.name()
|
||||
pcmdline = proc.cmdline()
|
||||
pusername = proc.username()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
if pusername != username:
|
||||
continue
|
||||
if pname != 'ssh':
|
||||
continue
|
||||
for cp_file in cp_files:
|
||||
if pcmdline and cp_file in pcmdline[0]:
|
||||
ssh_cm_procs.append(proc)
|
||||
break
|
||||
|
||||
# Terminate then kill control master processes. Workaround older
|
||||
# version of psutil that may not have wait_procs implemented.
|
||||
for proc in ssh_cm_procs:
|
||||
try:
|
||||
proc.terminate()
|
||||
except psutil.NoSuchProcess:
|
||||
continue
|
||||
procs_gone, procs_alive = psutil.wait_procs(ssh_cm_procs, timeout=5)
|
||||
for proc in procs_alive:
|
||||
proc.kill()
|
||||
@@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import functools
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
# Ansible
|
||||
from ansible.utils.display import Display
|
||||
|
||||
# Tower Display Callback
|
||||
from .events import event_context
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
def with_context(**context):
|
||||
global event_context
|
||||
|
||||
def wrap(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return wrap
|
||||
|
||||
|
||||
for attr in dir(Display):
|
||||
if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
|
||||
continue
|
||||
if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
|
||||
continue
|
||||
if not callable(getattr(Display, attr)):
|
||||
continue
|
||||
setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
|
||||
|
||||
|
||||
def with_verbosity(f):
|
||||
global event_context
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
host = args[2] if len(args) >= 3 else kwargs.get('host', None)
|
||||
caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
|
||||
context = dict(verbose=True, verbosity=(caplevel + 1))
|
||||
if host is not None:
|
||||
context['remote_addr'] = host
|
||||
with event_context.set_local(**context):
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.verbose = with_verbosity(Display.verbose)
|
||||
|
||||
|
||||
def display_with_context(f):
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
|
||||
stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
|
||||
event_uuid = event_context.get().get('uuid', None)
|
||||
with event_context.display_lock:
|
||||
# If writing only to a log file or there is already an event UUID
|
||||
# set (from a callback module method), skip dumping the event data.
|
||||
if log_only or event_uuid:
|
||||
return f(*args, **kwargs)
|
||||
try:
|
||||
fileobj = sys.stderr if stderr else sys.stdout
|
||||
event_context.add_local(uuid=str(uuid.uuid4()))
|
||||
event_context.dump_begin(fileobj)
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
event_context.dump_end(fileobj)
|
||||
event_context.remove_local(uuid=None)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
Display.display = display_with_context(Display.display)
|
||||
@@ -1,186 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import base64
|
||||
import contextlib
|
||||
import datetime
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
import stat
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
raise ImportError('python-memcached is missing; {}bin/pip install python-memcached'.format(
|
||||
os.environ['VIRTUAL_ENV']
|
||||
))
|
||||
|
||||
__all__ = ['event_context']
|
||||
|
||||
|
||||
class IsolatedFileWrite:
|
||||
'''
|
||||
Stand-in class that will write partial event data to a file as a
|
||||
replacement for memcache when a job is running on an isolated host.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.private_data_dir = os.getenv('AWX_ISOLATED_DATA_DIR')
|
||||
|
||||
def set(self, key, value):
|
||||
# Strip off the leading memcache key identifying characters :1:ev-
|
||||
event_uuid = key[len(':1:ev-'):]
|
||||
# Write data in a staging area and then atomic move to pickup directory
|
||||
filename = '{}-partial.json'.format(event_uuid)
|
||||
dropoff_location = os.path.join(self.private_data_dir, 'artifacts', 'job_events', filename)
|
||||
write_location = '.'.join([dropoff_location, 'tmp'])
|
||||
with os.fdopen(os.open(write_location, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as f:
|
||||
f.write(value)
|
||||
os.rename(write_location, dropoff_location)
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
'''
|
||||
Store global and local (per thread/process) data associated with callback
|
||||
events and other display output methods.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.display_lock = multiprocessing.RLock()
|
||||
cache_actual = os.getenv('CACHE', '127.0.0.1:11211')
|
||||
if os.getenv('AWX_ISOLATED_DATA_DIR', False):
|
||||
self.cache = IsolatedFileWrite()
|
||||
else:
|
||||
self.cache = memcache.Client([cache_actual], debug=0)
|
||||
|
||||
def add_local(self, **kwargs):
|
||||
if not hasattr(self, '_local'):
|
||||
self._local = threading.local()
|
||||
self._local._ctx = {}
|
||||
self._local._ctx.update(kwargs)
|
||||
|
||||
def remove_local(self, **kwargs):
|
||||
if hasattr(self, '_local'):
|
||||
for key in kwargs.keys():
|
||||
self._local._ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_local(self, **kwargs):
|
||||
try:
|
||||
self.add_local(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_local(**kwargs)
|
||||
|
||||
def get_local(self):
|
||||
return getattr(getattr(self, '_local', None), '_ctx', {})
|
||||
|
||||
def add_global(self, **kwargs):
|
||||
if not hasattr(self, '_global_ctx'):
|
||||
self._global_ctx = {}
|
||||
self._global_ctx.update(kwargs)
|
||||
|
||||
def remove_global(self, **kwargs):
|
||||
if hasattr(self, '_global_ctx'):
|
||||
for key in kwargs.keys():
|
||||
self._global_ctx.pop(key, None)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_global(self, **kwargs):
|
||||
try:
|
||||
self.add_global(**kwargs)
|
||||
yield
|
||||
finally:
|
||||
self.remove_global(**kwargs)
|
||||
|
||||
def get_global(self):
|
||||
return getattr(self, '_global_ctx', {})
|
||||
|
||||
def get(self):
|
||||
ctx = {}
|
||||
ctx.update(self.get_global())
|
||||
ctx.update(self.get_local())
|
||||
return ctx
|
||||
|
||||
def get_begin_dict(self):
|
||||
event_data = self.get()
|
||||
if os.getenv('JOB_ID', ''):
|
||||
event_data['job_id'] = int(os.getenv('JOB_ID', '0'))
|
||||
if os.getenv('AD_HOC_COMMAND_ID', ''):
|
||||
event_data['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
|
||||
if os.getenv('PROJECT_UPDATE_ID', ''):
|
||||
event_data['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
|
||||
event_data.setdefault('pid', os.getpid())
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
event_data.setdefault('created', datetime.datetime.utcnow().isoformat())
|
||||
if not event_data.get('parent_uuid', None) and event_data.get('job_id', None):
|
||||
for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
|
||||
parent_uuid = event_data.get(key, None)
|
||||
if parent_uuid and parent_uuid != event_data.get('uuid', None):
|
||||
event_data['parent_uuid'] = parent_uuid
|
||||
break
|
||||
|
||||
event = event_data.pop('event', None)
|
||||
if not event:
|
||||
event = 'verbose'
|
||||
for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
|
||||
if event_data.get(key, False):
|
||||
event = key
|
||||
break
|
||||
max_res = int(os.getenv("MAX_EVENT_RES", 700000))
|
||||
if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
|
||||
event_data['res'] = {}
|
||||
event_dict = dict(event=event, event_data=event_data)
|
||||
for key in list(event_data.keys()):
|
||||
if key in ('job_id', 'ad_hoc_command_id', 'project_update_id', 'uuid', 'parent_uuid', 'created',):
|
||||
event_dict[key] = event_data.pop(key)
|
||||
elif key in ('verbosity', 'pid'):
|
||||
event_dict[key] = event_data[key]
|
||||
return event_dict
|
||||
|
||||
def get_end_dict(self):
|
||||
return {}
|
||||
|
||||
def dump(self, fileobj, data, max_width=78, flush=False):
|
||||
b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
|
||||
with self.display_lock:
|
||||
# pattern corresponding to OutputEventFilter expectation
|
||||
fileobj.write(u'\x1b[K')
|
||||
for offset in range(0, len(b64data), max_width):
|
||||
chunk = b64data[offset:offset + max_width]
|
||||
escaped_chunk = u'{}\x1b[{}D'.format(chunk, len(chunk))
|
||||
fileobj.write(escaped_chunk)
|
||||
fileobj.write(u'\x1b[K')
|
||||
if flush:
|
||||
fileobj.flush()
|
||||
|
||||
def dump_begin(self, fileobj):
|
||||
begin_dict = self.get_begin_dict()
|
||||
self.cache.set(":1:ev-{}".format(begin_dict['uuid']), json.dumps(begin_dict))
|
||||
self.dump(fileobj, {'uuid': begin_dict['uuid']})
|
||||
|
||||
def dump_end(self, fileobj):
|
||||
self.dump(fileobj, self.get_end_dict(), flush=True)
|
||||
|
||||
|
||||
event_context = EventContext()
|
||||
@@ -1,29 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import os
|
||||
|
||||
# Ansible
|
||||
import ansible
|
||||
|
||||
# Because of the way Ansible loads plugins, it's not possible to import
|
||||
# ansible.plugins.callback.minimal when being loaded as the minimal plugin. Ugh.
|
||||
with open(os.path.join(os.path.dirname(ansible.__file__), 'plugins', 'callback', 'minimal.py')) as in_file:
|
||||
exec(in_file.read())
|
||||
@@ -1,501 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible by Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import uuid
|
||||
from copy import copy
|
||||
|
||||
# Ansible
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.plugins.callback.default import CallbackModule as DefaultCallbackModule
|
||||
|
||||
# AWX Display Callback
|
||||
from .events import event_context
|
||||
from .minimal import CallbackModule as MinimalCallbackModule
|
||||
|
||||
CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result" # noqa
|
||||
|
||||
|
||||
class BaseCallbackModule(CallbackBase):
|
||||
'''
|
||||
Callback module for logging ansible/ansible-playbook events.
|
||||
'''
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
|
||||
# These events should never have an associated play.
|
||||
EVENTS_WITHOUT_PLAY = [
|
||||
'playbook_on_start',
|
||||
'playbook_on_stats',
|
||||
]
|
||||
|
||||
# These events should never have an associated task.
|
||||
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
|
||||
'playbook_on_setup',
|
||||
'playbook_on_notify',
|
||||
'playbook_on_import_for_host',
|
||||
'playbook_on_not_import_for_host',
|
||||
'playbook_on_no_hosts_matched',
|
||||
'playbook_on_no_hosts_remaining',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
super(BaseCallbackModule, self).__init__()
|
||||
self.task_uuids = set()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_event_data(self, event, **event_data):
|
||||
event_data.setdefault('uuid', str(uuid.uuid4()))
|
||||
|
||||
if event not in self.EVENTS_WITHOUT_TASK:
|
||||
task = event_data.pop('task', None)
|
||||
else:
|
||||
task = None
|
||||
|
||||
if event_data.get('res'):
|
||||
if event_data['res'].get('_ansible_no_log', False):
|
||||
event_data['res'] = {'censored': CENSORED}
|
||||
if event_data['res'].get('results', []):
|
||||
event_data['res']['results'] = copy(event_data['res']['results'])
|
||||
for i, item in enumerate(event_data['res'].get('results', [])):
|
||||
if isinstance(item, dict) and item.get('_ansible_no_log', False):
|
||||
event_data['res']['results'][i] = {'censored': CENSORED}
|
||||
|
||||
with event_context.display_lock:
|
||||
try:
|
||||
event_context.add_local(event=event, **event_data)
|
||||
if task:
|
||||
self.set_task(task, local=True)
|
||||
event_context.dump_begin(sys.stdout)
|
||||
yield
|
||||
finally:
|
||||
event_context.dump_end(sys.stdout)
|
||||
if task:
|
||||
self.clear_task(local=True)
|
||||
event_context.remove_local(event=None, **event_data)
|
||||
|
||||
def set_playbook(self, playbook):
|
||||
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
|
||||
self.playbook_uuid = str(uuid.uuid4())
|
||||
file_name = getattr(playbook, '_file_name', '???')
|
||||
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
|
||||
self.clear_play()
|
||||
|
||||
def set_play(self, play):
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
|
||||
self.clear_task()
|
||||
|
||||
def clear_play(self):
|
||||
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
|
||||
self.clear_task()
|
||||
|
||||
def set_task(self, task, local=False):
|
||||
# FIXME: Task is "global" unless using free strategy!
|
||||
task_ctx = dict(
|
||||
task=(task.name or task.action),
|
||||
task_uuid=str(task._uuid),
|
||||
task_action=task.action,
|
||||
task_args='',
|
||||
)
|
||||
try:
|
||||
task_ctx['task_path'] = task.get_path()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if C.DISPLAY_ARGS_TO_STDOUT:
|
||||
if task.no_log:
|
||||
task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
|
||||
else:
|
||||
task_args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
task_ctx['task_args'] = task_args
|
||||
if getattr(task, '_role', None):
|
||||
task_role = task._role._role_name
|
||||
else:
|
||||
task_role = getattr(task, 'role_name', '')
|
||||
if task_role:
|
||||
task_ctx['role'] = task_role
|
||||
if local:
|
||||
event_context.add_local(**task_ctx)
|
||||
else:
|
||||
event_context.add_global(**task_ctx)
|
||||
|
||||
def clear_task(self, local=False):
|
||||
task_ctx = dict(task=None, task_path=None, task_uuid=None, task_action=None, task_args=None, role=None)
|
||||
if local:
|
||||
event_context.remove_local(**task_ctx)
|
||||
else:
|
||||
event_context.remove_global(**task_ctx)
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.set_playbook(playbook)
|
||||
event_data = dict(
|
||||
uuid=self.playbook_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_start(playbook)
|
||||
|
||||
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
|
||||
encrypt=None, confirm=False, salt_size=None,
|
||||
salt=None, default=None):
|
||||
event_data = dict(
|
||||
varname=varname,
|
||||
private=private,
|
||||
prompt=prompt,
|
||||
encrypt=encrypt,
|
||||
confirm=confirm,
|
||||
salt_size=salt_size,
|
||||
salt=salt,
|
||||
default=default,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_vars_prompt(
|
||||
varname, private, prompt, encrypt, confirm, salt_size, salt,
|
||||
default,
|
||||
)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
event_data = dict(
|
||||
included_file=included_file._filename if included_file is not None else None,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_include', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_include(included_file)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.set_play(play)
|
||||
if hasattr(play, 'hosts'):
|
||||
if isinstance(play.hosts, list):
|
||||
pattern = ','.join(play.hosts)
|
||||
else:
|
||||
pattern = play.hosts
|
||||
else:
|
||||
pattern = ''
|
||||
name = play.get_name().strip() or pattern
|
||||
event_data = dict(
|
||||
name=name,
|
||||
pattern=pattern,
|
||||
uuid=str(play._uuid),
|
||||
)
|
||||
with self.capture_event_data('playbook_on_play_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_play_start(play)
|
||||
|
||||
def v2_playbook_on_import_for_host(self, result, imported_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_import_for_host(result, imported_file)
|
||||
|
||||
def v2_playbook_on_not_import_for_host(self, result, missing_file):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_not_import_for_host'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_not_import_for_host(result, missing_file)
|
||||
|
||||
def v2_playbook_on_setup(self):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
with self.capture_event_data('playbook_on_setup'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_setup()
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
# FIXME: Flag task path output as vv.
|
||||
task_uuid = str(task._uuid)
|
||||
if task_uuid in self.task_uuids:
|
||||
# FIXME: When this task UUID repeats, it means the play is using the
|
||||
# free strategy, so different hosts may be running different tasks
|
||||
# within a play.
|
||||
return
|
||||
self.task_uuids.add(task_uuid)
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
is_conditional=is_conditional,
|
||||
uuid=task_uuid,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_cleanup_task_start(task)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
# NOTE: Re-using playbook_on_task_start event for this v2-specific
|
||||
# event, but setting is_conditional=True, which is how v1 identified a
|
||||
# task run as a handler.
|
||||
self.set_task(task)
|
||||
event_data = dict(
|
||||
task=task,
|
||||
name=task.get_name(),
|
||||
uuid=str(task._uuid),
|
||||
is_conditional=True,
|
||||
)
|
||||
with self.capture_event_data('playbook_on_task_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_handler_task_start(task)
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_matched'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_matched()
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
with self.capture_event_data('playbook_on_no_hosts_remaining'):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_no_hosts_remaining()
|
||||
|
||||
def v2_playbook_on_notify(self, handler, host):
|
||||
# NOTE: Not used by Ansible < 2.5.
|
||||
event_data = dict(
|
||||
host=host.get_name(),
|
||||
handler=handler.get_name(),
|
||||
)
|
||||
with self.capture_event_data('playbook_on_notify', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_notify(handler, host)
|
||||
|
||||
'''
|
||||
ansible_stats is, retoractively, added in 2.2
|
||||
'''
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self.clear_play()
|
||||
# FIXME: Add count of plays/tasks.
|
||||
event_data = dict(
|
||||
changed=stats.changed,
|
||||
dark=stats.dark,
|
||||
failures=stats.failures,
|
||||
ok=stats.ok,
|
||||
processed=stats.processed,
|
||||
skipped=stats.skipped
|
||||
)
|
||||
|
||||
# write custom set_stat artifact data to the local disk so that it can
|
||||
# be persisted by awx after the process exits
|
||||
custom_artifact_data = stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
|
||||
if custom_artifact_data:
|
||||
# create the directory for custom stats artifacts to live in (if it doesn't exist)
|
||||
custom_artifacts_dir = os.path.join(os.getenv('AWX_PRIVATE_DATA_DIR'), 'artifacts')
|
||||
if not os.path.isdir(custom_artifacts_dir):
|
||||
os.makedirs(custom_artifacts_dir, mode=stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR)
|
||||
|
||||
custom_artifacts_path = os.path.join(custom_artifacts_dir, 'custom')
|
||||
with codecs.open(custom_artifacts_path, 'w', encoding='utf-8') as f:
|
||||
os.chmod(custom_artifacts_path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
json.dump(custom_artifact_data, f)
|
||||
|
||||
with self.capture_event_data('playbook_on_stats', **event_data):
|
||||
super(BaseCallbackModule, self).v2_playbook_on_stats(stats)
|
||||
|
||||
@staticmethod
|
||||
def _get_event_loop(task):
|
||||
if hasattr(task, 'loop_with'): # Ansible >=2.5
|
||||
return task.loop_with
|
||||
elif hasattr(task, 'loop'): # Ansible <2.4
|
||||
return task.loop
|
||||
return None
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
# FIXME: Display detailed results or not based on verbosity.
|
||||
|
||||
# strip environment vars from the job event; it already exists on the
|
||||
# job and sensitive values are filtered there
|
||||
if result._task.action in ('setup', 'gather_facts'):
|
||||
result._result.get('ansible_facts', {}).pop('ansible_env', None)
|
||||
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_ok(result)
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
# FIXME: Add verbosity for exception/results output.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
res=result._result,
|
||||
task=result._task,
|
||||
ignore_errors=ignore_errors,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
event_loop=self._get_event_loop(result._task),
|
||||
)
|
||||
with self.capture_event_data('runner_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_skipped(result)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
remote_addr=result._host.address,
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_on_unreachable', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_unreachable(result)
|
||||
|
||||
def v2_runner_on_no_hosts(self, task):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
task=task,
|
||||
)
|
||||
with self.capture_event_data('runner_on_no_hosts', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_no_hosts(task)
|
||||
|
||||
def v2_runner_on_async_poll(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_poll', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_poll(result)
|
||||
|
||||
def v2_runner_on_async_ok(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_ok(result)
|
||||
|
||||
def v2_runner_on_async_failed(self, result):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
jid=result._result.get('ansible_job_id'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_async_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_async_failed(result)
|
||||
|
||||
def v2_runner_on_file_diff(self, result, diff):
|
||||
# NOTE: Not used by Ansible 2.x.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=diff,
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_file_diff(result, diff)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
# NOTE: Logged as runner_on_file_diff.
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
diff=result._result.get('diff'),
|
||||
)
|
||||
with self.capture_event_data('runner_on_file_diff', **event_data):
|
||||
super(BaseCallbackModule, self).v2_on_file_diff(result)
|
||||
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_ok', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_ok(result)
|
||||
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_failed', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_failed(result)
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_item_on_skipped', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_item_on_skipped(result)
|
||||
|
||||
def v2_runner_retry(self, result):
|
||||
event_data = dict(
|
||||
host=result._host.get_name(),
|
||||
task=result._task,
|
||||
res=result._result,
|
||||
)
|
||||
with self.capture_event_data('runner_retry', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_retry(result)
|
||||
|
||||
def v2_runner_on_start(self, host, task):
|
||||
event_data = dict(
|
||||
host=host.get_name(),
|
||||
task=task
|
||||
)
|
||||
with self.capture_event_data('runner_on_start', **event_data):
|
||||
super(BaseCallbackModule, self).v2_runner_on_start(host, task)
|
||||
|
||||
|
||||
|
||||
class AWXDefaultCallbackModule(BaseCallbackModule, DefaultCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'awx_display'
|
||||
|
||||
|
||||
class AWXMinimalCallbackModule(BaseCallbackModule, MinimalCallbackModule):
|
||||
|
||||
CALLBACK_NAME = 'minimal'
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
pass
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.set_task(task)
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add awx/lib to sys.path.
|
||||
awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
if awx_lib_path not in sys.path:
|
||||
sys.path.insert(0, awx_lib_path)
|
||||
|
||||
# Tower Display Callback
|
||||
from awx_display_callback import AWXDefaultCallbackModule as CallbackModule # noqa
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
#
|
||||
# This file is part of Ansible Tower, but depends on code imported from Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
# Python
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add awx/lib to sys.path.
|
||||
awx_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
if awx_lib_path not in sys.path:
|
||||
sys.path.insert(0, awx_lib_path)
|
||||
|
||||
# Tower Display Callback
|
||||
from awx_display_callback import AWXMinimalCallbackModule as CallbackModule # noqa
|
||||
@@ -1,26 +0,0 @@
|
||||
# Python
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Based on http://stackoverflow.com/a/6879344/131141 -- Initialize awx display
|
||||
# callback as early as possible to wrap ansible.display.Display methods.
|
||||
|
||||
|
||||
def argv_ready(argv):
|
||||
if argv and os.path.basename(argv[0]) in {'ansible', 'ansible-playbook'}:
|
||||
import awx_display_callback # noqa
|
||||
|
||||
|
||||
class argv_placeholder(object):
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
argv_ready(sys.argv)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
if hasattr(sys, 'argv'):
|
||||
argv_ready(sys.argv)
|
||||
else:
|
||||
sys.argv = argv_placeholder()
|
||||
@@ -1,2 +0,0 @@
|
||||
[pytest]
|
||||
addopts = -v
|
||||
@@ -1,353 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
# ansible uses `ANSIBLE_CALLBACK_PLUGINS` and `ANSIBLE_STDOUT_CALLBACK` to
|
||||
# discover callback plugins; `ANSIBLE_CALLBACK_PLUGINS` is a list of paths to
|
||||
# search for a plugin implementation (which should be named `CallbackModule`)
|
||||
#
|
||||
# this code modifies the Python path to make our
|
||||
# `awx.lib.awx_display_callback` callback importable (because `awx.lib`
|
||||
# itself is not a package)
|
||||
#
|
||||
# we use the `awx_display_callback` imports below within this file, but
|
||||
# Ansible also uses them when it discovers this file in
|
||||
# `ANSIBLE_CALLBACK_PLUGINS`
|
||||
CALLBACK = os.path.splitext(os.path.basename(__file__))[0]
|
||||
PLUGINS = os.path.dirname(__file__)
|
||||
with mock.patch.dict(os.environ, {'ANSIBLE_STDOUT_CALLBACK': CALLBACK,
|
||||
'ANSIBLE_CALLBACK_PLUGINS': PLUGINS}):
|
||||
from ansible import __version__ as ANSIBLE_VERSION
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
from ansible.executor.playbook_executor import PlaybookExecutor
|
||||
from ansible.inventory.manager import InventoryManager
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.vars.manager import VariableManager
|
||||
|
||||
# Add awx/lib to sys.path so we can use the plugin
|
||||
path = os.path.abspath(os.path.join(PLUGINS, '..', '..', 'lib'))
|
||||
if path not in sys.path:
|
||||
sys.path.insert(0, path)
|
||||
|
||||
from awx_display_callback import AWXDefaultCallbackModule as CallbackModule # noqa
|
||||
from awx_display_callback.events import event_context # noqa
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cache(request):
|
||||
class Cache(OrderedDict):
|
||||
def set(self, key, value):
|
||||
self[key] = value
|
||||
local_cache = Cache()
|
||||
patch = mock.patch.object(event_context, 'cache', local_cache)
|
||||
patch.start()
|
||||
request.addfinalizer(patch.stop)
|
||||
return local_cache
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def executor(tmpdir_factory, request):
|
||||
playbooks = request.node.callspec.params.get('playbook')
|
||||
playbook_files = []
|
||||
for name, playbook in playbooks.items():
|
||||
filename = str(tmpdir_factory.mktemp('data').join(name))
|
||||
with open(filename, 'w') as f:
|
||||
f.write(playbook)
|
||||
playbook_files.append(filename)
|
||||
|
||||
cli = PlaybookCLI(['', 'playbook.yml'])
|
||||
cli.parse()
|
||||
options = cli.parser.parse_args(['-v'])[0]
|
||||
loader = DataLoader()
|
||||
variable_manager = VariableManager(loader=loader)
|
||||
inventory = InventoryManager(loader=loader, sources='localhost,')
|
||||
variable_manager.set_inventory(inventory)
|
||||
|
||||
return PlaybookExecutor(playbooks=playbook_files, inventory=inventory,
|
||||
variable_manager=variable_manager, loader=loader,
|
||||
options=options, passwords={})
|
||||
|
||||
|
||||
@pytest.mark.parametrize('event', {'playbook_on_start',
|
||||
'playbook_on_play_start',
|
||||
'playbook_on_task_start', 'runner_on_ok',
|
||||
'playbook_on_stats'})
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'helloworld.yml': '''
|
||||
- name: Hello World Sample
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Hello Message
|
||||
debug:
|
||||
msg: "Hello World!"
|
||||
'''}, # noqa
|
||||
{'results_included.yml': '''
|
||||
- name: Run module which generates results list
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
vars:
|
||||
results: ['foo', 'bar']
|
||||
tasks:
|
||||
- name: Generate results list
|
||||
debug:
|
||||
var: results
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_receives_events(executor, cache, event, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert event in [task['event'] for task in cache.values()]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_on_ok.yml': '''
|
||||
- name: args should not be logged when task-level no_log is set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
'''}, # noqa
|
||||
{'no_log_on_fail.yml': '''
|
||||
- name: failed args should not be logged when task-level no_log is set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
failed_when: true
|
||||
ignore_errors: true
|
||||
'''}, # noqa
|
||||
{'no_log_on_skip.yml': '''
|
||||
- name: skipped task args should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
when: false
|
||||
'''}, # noqa
|
||||
{'no_log_on_play.yml': '''
|
||||
- name: args should not be logged when play-level no_log set
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
no_log: true
|
||||
tasks:
|
||||
- shell: echo "SENSITIVE"
|
||||
'''}, # noqa
|
||||
{'async_no_log.yml': '''
|
||||
- name: async task args should suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
no_log: true
|
||||
tasks:
|
||||
- async: 10
|
||||
poll: 1
|
||||
shell: echo "SENSITIVE"
|
||||
no_log: true
|
||||
'''}, # noqa
|
||||
{'with_items.yml': '''
|
||||
- name: with_items tasks should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo {{ item }}
|
||||
no_log: true
|
||||
with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
|
||||
when: item != "SENSITIVE-SKIPPED"
|
||||
failed_when: item == "SENSITIVE-FAILED"
|
||||
ignore_errors: yes
|
||||
'''}, # noqa, NOTE: with_items will be deprecated in 2.9
|
||||
{'loop.yml': '''
|
||||
- name: loop tasks should be suppressed with no_log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo {{ item }}
|
||||
no_log: true
|
||||
loop: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
|
||||
when: item != "SENSITIVE-SKIPPED"
|
||||
failed_when: item == "SENSITIVE-FAILED"
|
||||
ignore_errors: yes
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_no_log_filters(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert 'SENSITIVE' not in json.dumps(cache.items())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_on_ok.yml': '''
|
||||
- name: args should not be logged when no_log is set at the task or module level
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- shell: echo "PUBLIC"
|
||||
- shell: echo "PRIVATE"
|
||||
no_log: true
|
||||
- uri: url=https://example.org username="PUBLIC" password="PRIVATE"
|
||||
- copy: content="PRIVATE" dest="/tmp/tmp_no_log"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_task_args_leak(executor, cache, playbook):
|
||||
executor.run()
|
||||
events = cache.values()
|
||||
assert events[0]['event'] == 'playbook_on_start'
|
||||
assert events[1]['event'] == 'playbook_on_play_start'
|
||||
|
||||
# task 1
|
||||
assert events[2]['event'] == 'playbook_on_task_start'
|
||||
assert events[3]['event'] == 'runner_on_ok'
|
||||
|
||||
# task 2 no_log=True
|
||||
assert events[4]['event'] == 'playbook_on_task_start'
|
||||
assert events[5]['event'] == 'runner_on_ok'
|
||||
assert 'PUBLIC' in json.dumps(cache.items())
|
||||
assert 'PRIVATE' not in json.dumps(cache.items())
|
||||
# make sure playbook was successful, so all tasks were hit
|
||||
assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'loop_with_no_log.yml': '''
|
||||
- name: playbook variable should not be overwritten when using no log
|
||||
connection: local
|
||||
hosts: all
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- command: "{{ item }}"
|
||||
register: command_register
|
||||
no_log: True
|
||||
with_items:
|
||||
- "echo helloworld!"
|
||||
- debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_censoring_does_not_overwrite(executor, cache, playbook):
|
||||
executor.run()
|
||||
events = cache.values()
|
||||
assert events[0]['event'] == 'playbook_on_start'
|
||||
assert events[1]['event'] == 'playbook_on_play_start'
|
||||
|
||||
# task 1
|
||||
assert events[2]['event'] == 'playbook_on_task_start'
|
||||
# Ordering of task and item events may differ randomly
|
||||
assert set(['runner_on_ok', 'runner_item_on_ok']) == set([data['event'] for data in events[3:5]])
|
||||
|
||||
# task 2 no_log=True
|
||||
assert events[5]['event'] == 'playbook_on_task_start'
|
||||
assert events[6]['event'] == 'runner_on_ok'
|
||||
assert 'helloworld!' in events[6]['event_data']['res']['msg']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'strip_env_vars.yml': '''
|
||||
- name: sensitive environment variables should be stripped from events
|
||||
connection: local
|
||||
hosts: all
|
||||
tasks:
|
||||
- shell: echo "Hello, World!"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_strips_task_environ_variables(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
for event in cache.values():
|
||||
assert os.environ['PATH'] not in json.dumps(event)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'custom_set_stat.yml': '''
|
||||
- name: custom set_stat calls should persist to the local disk so awx can save them
|
||||
connection: local
|
||||
hosts: all
|
||||
tasks:
|
||||
- set_stats:
|
||||
data:
|
||||
foo: "bar"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_callback_plugin_saves_custom_stats(executor, cache, playbook):
|
||||
try:
|
||||
private_data_dir = tempfile.mkdtemp()
|
||||
with mock.patch.dict(os.environ, {'AWX_PRIVATE_DATA_DIR': private_data_dir}):
|
||||
executor.run()
|
||||
artifacts_path = os.path.join(private_data_dir, 'artifacts', 'custom')
|
||||
with open(artifacts_path, 'r') as f:
|
||||
assert json.load(f) == {'foo': 'bar'}
|
||||
finally:
|
||||
shutil.rmtree(os.path.join(private_data_dir))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'handle_playbook_on_notify.yml': '''
|
||||
- name: handle playbook_on_notify events properly
|
||||
connection: local
|
||||
hosts: all
|
||||
handlers:
|
||||
- name: my_handler
|
||||
debug: msg="My Handler"
|
||||
tasks:
|
||||
- debug: msg="My Task"
|
||||
changed_when: true
|
||||
notify:
|
||||
- my_handler
|
||||
'''}, # noqa
|
||||
])
|
||||
@pytest.mark.skipif(ANSIBLE_VERSION < '2.5', reason="v2_playbook_on_notify doesn't work before ansible 2.5")
|
||||
def test_callback_plugin_records_notify_events(executor, cache, playbook):
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
notify_events = [x[1] for x in cache.items() if x[1]['event'] == 'playbook_on_notify']
|
||||
assert len(notify_events) == 1
|
||||
assert notify_events[0]['event_data']['handler'] == 'my_handler'
|
||||
assert notify_events[0]['event_data']['host'] == 'localhost'
|
||||
assert notify_events[0]['event_data']['task'] == 'debug'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('playbook', [
|
||||
{'no_log_module_with_var.yml': '''
|
||||
- name: ensure that module-level secrets are redacted
|
||||
connection: local
|
||||
hosts: all
|
||||
vars:
|
||||
- pw: SENSITIVE
|
||||
tasks:
|
||||
- uri:
|
||||
url: https://example.org
|
||||
user: john-jacob-jingleheimer-schmidt
|
||||
password: "{{ pw }}"
|
||||
'''}, # noqa
|
||||
])
|
||||
def test_module_level_no_log(executor, cache, playbook):
|
||||
# https://github.com/ansible/tower/issues/1101
|
||||
# It's possible for `no_log=True` to be defined at the _module_ level,
|
||||
# e.g., for the URI module password parameter
|
||||
# This test ensures that we properly redact those
|
||||
executor.run()
|
||||
assert len(cache)
|
||||
assert 'john-jacob-jingleheimer-schmidt' in json.dumps(cache.items())
|
||||
assert 'SENSITIVE' not in json.dumps(cache.items())
|
||||
@@ -28,11 +28,19 @@ from awx.main.utils import (
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.models import (
|
||||
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialType,
|
||||
CredentialInputSource, CustomInventoryScript, Group, Host, Instance, InstanceGroup,
|
||||
Inventory, InventorySource, InventoryUpdate, InventoryUpdateEvent, Job, JobEvent,
|
||||
JobHostSummary, JobLaunchConfig, JobTemplate, Label, Notification,
|
||||
NotificationTemplate, Organization, Project, ProjectUpdate,
|
||||
ProjectUpdateEvent, Role, Schedule, SystemJob, SystemJobEvent,
|
||||
SystemJobTemplate, Team, UnifiedJob, UnifiedJobTemplate, WorkflowJob,
|
||||
WorkflowJobNode, WorkflowJobTemplate, WorkflowJobTemplateNode,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
)
|
||||
from awx.main.models.mixins import ResourceMixin
|
||||
|
||||
from awx.conf.license import LicenseForbids, feature_enabled
|
||||
|
||||
__all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors',
|
||||
'user_accessible_objects', 'consumer_access',]
|
||||
|
||||
@@ -74,6 +82,17 @@ def get_object_from_data(field, Model, data, obj=None):
|
||||
raise ParseError(_("Bad data found in related field %s." % field))
|
||||
|
||||
|
||||
def vars_are_encrypted(vars):
|
||||
'''Returns True if any of the values in the dictionary vars contains
|
||||
content which is encrypted by the AWX encryption algorithm
|
||||
'''
|
||||
for value in vars.values():
|
||||
if isinstance(value, str):
|
||||
if value.startswith('$encrypted$'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
@@ -314,11 +333,35 @@ class BaseAccess(object):
|
||||
elif not add_host_name and free_instances < 0:
|
||||
raise PermissionDenied(_("Host count exceeds available instances."))
|
||||
|
||||
if feature is not None:
|
||||
if "features" in validation_info and not validation_info["features"].get(feature, False):
|
||||
raise LicenseForbids(_("Feature %s is not enabled in the active license.") % feature)
|
||||
elif "features" not in validation_info:
|
||||
raise LicenseForbids(_("Features not found in active license."))
|
||||
def check_org_host_limit(self, data, add_host_name=None):
|
||||
validation_info = get_licenser().validate()
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
|
||||
inventory = get_object_from_data('inventory', Inventory, data)
|
||||
if inventory is None: # In this case a missing inventory error is launched
|
||||
return # further down the line, so just ignore it.
|
||||
|
||||
org = inventory.organization
|
||||
if org is None or org.max_hosts == 0:
|
||||
return
|
||||
|
||||
active_count = Host.objects.org_active_count(org.id)
|
||||
if active_count > org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_("You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts)
|
||||
)
|
||||
|
||||
if add_host_name:
|
||||
host_exists = Host.objects.filter(inventory__organization=org.id, name=add_host_name).exists()
|
||||
if not host_exists and active_count == org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_("You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts)
|
||||
)
|
||||
|
||||
def get_user_capabilities(self, obj, method_list=[], parent_obj=None, capabilities_cache={}):
|
||||
if obj is None:
|
||||
@@ -343,14 +386,11 @@ class BaseAccess(object):
|
||||
if obj.validation_errors:
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif isinstance(obj, (WorkflowJobTemplate, WorkflowJob)) and (not feature_enabled('workflows')):
|
||||
user_capabilities[display_method] = (display_method == 'delete')
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, WorkflowJobTemplate) and obj.organization_id is None:
|
||||
user_capabilities[display_method] = self.user.is_superuser
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, Project) and obj.scm_type == '':
|
||||
# Connot copy manual project without errors
|
||||
# Cannot copy manual project without errors
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, Group): # TODO: remove in 3.3
|
||||
@@ -386,7 +426,7 @@ class BaseAccess(object):
|
||||
if display_method == 'schedule':
|
||||
user_capabilities['schedule'] = user_capabilities['start']
|
||||
continue
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CustomInventoryScript)):
|
||||
elif display_method == 'delete' and not isinstance(obj, (User, UnifiedJob, CustomInventoryScript, CredentialInputSource)):
|
||||
user_capabilities['delete'] = user_capabilities['edit']
|
||||
continue
|
||||
elif display_method == 'copy' and isinstance(obj, (Group, Host)):
|
||||
@@ -420,6 +460,42 @@ class BaseAccess(object):
|
||||
return False
|
||||
|
||||
|
||||
class NotificationAttachMixin(BaseAccess):
|
||||
'''For models that can have notifications attached
|
||||
|
||||
I can attach a notification template when
|
||||
- I have notification_admin_role to organization of the NT
|
||||
- I can read the object I am attaching it to
|
||||
|
||||
I can unattach when those same critiera are met
|
||||
'''
|
||||
notification_attach_roles = None
|
||||
|
||||
def _can_attach(self, notification_template, resource_obj):
|
||||
if not NotificationTemplateAccess(self.user).can_change(notification_template, {}):
|
||||
return False
|
||||
if self.notification_attach_roles is None:
|
||||
return self.can_read(resource_obj)
|
||||
return any(self.user in getattr(resource_obj, role) for role in self.notification_attach_roles)
|
||||
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if isinstance(sub_obj, NotificationTemplate):
|
||||
# reverse obj and sub_obj
|
||||
return self._can_attach(notification_template=sub_obj, resource_obj=obj)
|
||||
return super(NotificationAttachMixin, self).can_attach(
|
||||
obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
@check_superuser
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||
if isinstance(sub_obj, NotificationTemplate):
|
||||
# due to this special case, we use symmetrical logic with attach permission
|
||||
return self._can_attach(notification_template=sub_obj, resource_obj=obj)
|
||||
return super(NotificationAttachMixin, self).can_unattach(
|
||||
obj, sub_obj, relationship, relationship, data=data
|
||||
)
|
||||
|
||||
|
||||
class InstanceAccess(BaseAccess):
|
||||
|
||||
model = Instance
|
||||
@@ -434,12 +510,16 @@ class InstanceAccess(BaseAccess):
|
||||
skip_sub_obj_read_check=False):
|
||||
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
|
||||
return self.user.is_superuser
|
||||
return super(InstanceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(InstanceAccess, self).can_attach(
|
||||
obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check
|
||||
)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None):
|
||||
if relationship == 'rampart_groups' and isinstance(sub_obj, InstanceGroup):
|
||||
return self.user.is_superuser
|
||||
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(InstanceAccess, self).can_unattach(
|
||||
obj, sub_obj, relationship, relationship, data=data
|
||||
)
|
||||
|
||||
def can_add(self, data):
|
||||
return False
|
||||
@@ -576,23 +656,22 @@ class UserAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
# Reverse obj and sub_obj, defer to RoleAccess if this is a role assignment.
|
||||
# The only thing that a User should ever have attached is a Role
|
||||
if relationship == 'roles':
|
||||
role_access = RoleAccess(self.user)
|
||||
return role_access.can_attach(sub_obj, obj, 'members', *args, **kwargs)
|
||||
return super(UserAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
logger.error('Unexpected attempt to associate {} with a user.'.format(sub_obj))
|
||||
return False
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
# The only thing that a User should ever have to be unattached is a Role
|
||||
if relationship == 'roles':
|
||||
role_access = RoleAccess(self.user)
|
||||
return role_access.can_unattach(sub_obj, obj, 'members', *args, **kwargs)
|
||||
return super(UserAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
logger.error('Unexpected attempt to de-associate {} from a user.'.format(sub_obj))
|
||||
return False
|
||||
|
||||
|
||||
class OAuth2ApplicationAccess(BaseAccess):
|
||||
@@ -608,13 +687,14 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
|
||||
model = OAuth2Application
|
||||
select_related = ('user',)
|
||||
prefetch_related = ('organization', 'oauth2accesstoken_set')
|
||||
|
||||
def filtered_queryset(self):
|
||||
org_access_qs = Organization.accessible_objects(self.user, 'member_role')
|
||||
return self.model.objects.filter(organization__in=org_access_qs)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
return self.user.is_superuser or self.check_related('organization', Organization, data, obj=obj,
|
||||
role_field='admin_role', mandatory=True)
|
||||
|
||||
def can_delete(self, obj):
|
||||
@@ -622,7 +702,7 @@ class OAuth2ApplicationAccess(BaseAccess):
|
||||
|
||||
def can_add(self, data):
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
return True
|
||||
if not data:
|
||||
return Organization.accessible_objects(self.user, 'admin_role').exists()
|
||||
return self.check_related('organization', Organization, data, role_field='admin_role', mandatory=True)
|
||||
@@ -636,29 +716,30 @@ class OAuth2TokenAccess(BaseAccess):
|
||||
- I am the user of the token.
|
||||
I can create an OAuth2 app token when:
|
||||
- I have the read permission of the related application.
|
||||
I can read, change or delete a personal token when:
|
||||
I can read, change or delete a personal token when:
|
||||
- I am the user of the token
|
||||
- I am the superuser
|
||||
I can create an OAuth2 Personal Access Token when:
|
||||
- I am a user. But I can only create a PAT for myself.
|
||||
- I am a user. But I can only create a PAT for myself.
|
||||
'''
|
||||
|
||||
model = OAuth2AccessToken
|
||||
|
||||
|
||||
select_related = ('user', 'application')
|
||||
|
||||
def filtered_queryset(self):
|
||||
prefetch_related = ('refresh_token',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
org_access_qs = Organization.objects.filter(
|
||||
Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
|
||||
return self.model.objects.filter(application__organization__in=org_access_qs) | self.model.objects.filter(user__id=self.user.pk)
|
||||
|
||||
|
||||
def can_delete(self, obj):
|
||||
if (self.user.is_superuser) | (obj.user == self.user):
|
||||
return True
|
||||
elif not obj.application:
|
||||
return False
|
||||
return self.user in obj.application.organization.admin_role
|
||||
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.can_delete(obj)
|
||||
|
||||
@@ -671,7 +752,7 @@ class OAuth2TokenAccess(BaseAccess):
|
||||
return True
|
||||
|
||||
|
||||
class OrganizationAccess(BaseAccess):
|
||||
class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can see organizations when:
|
||||
- I am a superuser.
|
||||
@@ -685,6 +766,8 @@ class OrganizationAccess(BaseAccess):
|
||||
|
||||
model = Organization
|
||||
prefetch_related = ('created_by', 'modified_by',)
|
||||
# organization admin_role is not a parent of organization auditor_role
|
||||
notification_attach_roles = ['admin_role', 'auditor_role']
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
@@ -694,13 +777,18 @@ class OrganizationAccess(BaseAccess):
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
self.check_license(feature='multiple_organizations', check_expiration=False)
|
||||
self.check_license(check_expiration=False)
|
||||
is_change_possible = self.can_change(obj, None)
|
||||
if not is_change_possible:
|
||||
return False
|
||||
return True
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
# If the request is updating the membership, check the membership role permissions instead
|
||||
if relationship in ('member_role.members', 'admin_role.members'):
|
||||
rel_role = getattr(obj, relationship.split('.')[0])
|
||||
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
if relationship == "instance_groups":
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
@@ -708,6 +796,11 @@ class OrganizationAccess(BaseAccess):
|
||||
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
|
||||
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
# If the request is updating the membership, check the membership role permissions instead
|
||||
if relationship in ('member_role.members', 'admin_role.members'):
|
||||
rel_role = getattr(obj, relationship.split('.')[0])
|
||||
return RoleAccess(self.user).can_unattach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
if relationship == "instance_groups":
|
||||
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -735,7 +828,7 @@ class InventoryAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = Inventory
|
||||
select_related = ('created_by', 'modified_by', 'organization',)
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
|
||||
def filtered_queryset(self, allowed=None, ad_hoc=None):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
@@ -826,6 +919,10 @@ class HostAccess(BaseAccess):
|
||||
|
||||
# Check to see if we have enough licenses
|
||||
self.check_license(add_host_name=data.get('name', None))
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit(data, add_host_name=data.get('name', None))
|
||||
|
||||
return True
|
||||
|
||||
def can_change(self, obj, data):
|
||||
@@ -838,6 +935,10 @@ class HostAccess(BaseAccess):
|
||||
if data and 'name' in data:
|
||||
self.check_license(add_host_name=data['name'])
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory},
|
||||
add_host_name=data['name'])
|
||||
|
||||
# Checks for admin or change permission on inventory, controls whether
|
||||
# the user can edit variable data.
|
||||
return obj and self.user in obj.inventory.admin_role
|
||||
@@ -900,21 +1001,8 @@ class GroupAccess(BaseAccess):
|
||||
def can_delete(self, obj):
|
||||
return bool(obj and self.user in obj.inventory.admin_role)
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# TODO: Delete for 3.3, only used by v1 serializer
|
||||
# Used as another alias to inventory_source start access for user_capabilities
|
||||
if obj:
|
||||
try:
|
||||
return self.user.can_access(
|
||||
InventorySource, 'start', obj.deprecated_inventory_source,
|
||||
validate_license=validate_license)
|
||||
obj.deprecated_inventory_source
|
||||
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
class InventorySourceAccess(BaseAccess):
|
||||
class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can see inventory sources whenever I can see their inventory.
|
||||
I can change inventory sources whenever I can change their inventory.
|
||||
@@ -991,8 +1079,8 @@ class InventoryUpdateAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = InventoryUpdate
|
||||
select_related = ('created_by', 'modified_by', 'inventory_source__inventory',)
|
||||
prefetch_related = ('unified_job_template', 'instance_group', 'credentials',)
|
||||
select_related = ('created_by', 'modified_by', 'inventory_source',)
|
||||
prefetch_related = ('unified_job_template', 'instance_group', 'credentials__credential_type', 'inventory', 'source_script')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(inventory_source__inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
|
||||
@@ -1006,11 +1094,7 @@ class InventoryUpdateAccess(BaseAccess):
|
||||
return self.user in obj.inventory_source.inventory.admin_role
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# For relaunching
|
||||
if obj and obj.inventory_source:
|
||||
access = InventorySourceAccess(self.user)
|
||||
return access.can_start(obj.inventory_source, validate_license=validate_license)
|
||||
return False
|
||||
return InventorySourceAccess(self.user).can_start(obj, validate_license=validate_license)
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
@@ -1028,6 +1112,7 @@ class CredentialTypeAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = CredentialType
|
||||
prefetch_related = ('created_by', 'modified_by',)
|
||||
|
||||
def can_read(self, obj):
|
||||
return True
|
||||
@@ -1110,6 +1195,55 @@ class CredentialAccess(BaseAccess):
|
||||
# return True
|
||||
return self.can_change(obj, None)
|
||||
|
||||
def get_user_capabilities(self, obj, **kwargs):
|
||||
user_capabilities = super(CredentialAccess, self).get_user_capabilities(obj, **kwargs)
|
||||
user_capabilities['use'] = self.can_use(obj)
|
||||
return user_capabilities
|
||||
|
||||
|
||||
class CredentialInputSourceAccess(BaseAccess):
|
||||
'''
|
||||
I can see a CredentialInputSource when:
|
||||
- I can see the associated target_credential
|
||||
I can create/change a CredentialInputSource when:
|
||||
- I'm an admin of the associated target_credential
|
||||
- I have use access to the associated source credential
|
||||
I can delete a CredentialInputSource when:
|
||||
- I'm an admin of the associated target_credential
|
||||
'''
|
||||
|
||||
model = CredentialInputSource
|
||||
select_related = ('target_credential', 'source_credential')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return CredentialInputSource.objects.filter(
|
||||
target_credential__in=Credential.accessible_pk_qs(self.user, 'read_role'))
|
||||
|
||||
@check_superuser
|
||||
def can_read(self, obj):
|
||||
return self.user in obj.target_credential.read_role
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
return (
|
||||
self.check_related('target_credential', Credential, data, role_field='admin_role') and
|
||||
self.check_related('source_credential', Credential, data, role_field='use_role')
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
if self.can_add(data) is False:
|
||||
return False
|
||||
|
||||
return (
|
||||
self.user in obj.target_credential.admin_role and
|
||||
self.user in obj.source_credential.use_role
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return self.user in obj.target_credential.admin_role
|
||||
|
||||
|
||||
class TeamAccess(BaseAccess):
|
||||
'''
|
||||
@@ -1117,6 +1251,7 @@ class TeamAccess(BaseAccess):
|
||||
- I'm a superuser.
|
||||
- I'm an admin of the team
|
||||
- I'm a member of that team.
|
||||
- I'm a member of the team's organization
|
||||
I can create/change a team when:
|
||||
- I'm a superuser.
|
||||
- I'm an admin for the team
|
||||
@@ -1129,7 +1264,10 @@ class TeamAccess(BaseAccess):
|
||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \
|
||||
(self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||
return self.model.objects.all()
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
return self.model.objects.filter(
|
||||
Q(organization=Organization.accessible_pk_qs(self.user, 'member_role')) |
|
||||
Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role'))
|
||||
)
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
@@ -1167,6 +1305,12 @@ class TeamAccess(BaseAccess):
|
||||
*args, **kwargs)
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
# If the request is updating the membership, check the membership role permissions instead
|
||||
if relationship in ('member_role.members', 'admin_role.members'):
|
||||
rel_role = getattr(obj, relationship.split('.')[0])
|
||||
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
return super(TeamAccess, self).can_attach(obj, sub_obj, relationship,
|
||||
*args, **kwargs)
|
||||
|
||||
@@ -1177,11 +1321,17 @@ class TeamAccess(BaseAccess):
|
||||
role_access = RoleAccess(self.user)
|
||||
return role_access.can_unattach(sub_obj, obj, 'member_role.parents',
|
||||
*args, **kwargs)
|
||||
|
||||
# If the request is updating the membership, check the membership role permissions instead
|
||||
if relationship in ('member_role.members', 'admin_role.members'):
|
||||
rel_role = getattr(obj, relationship.split('.')[0])
|
||||
return RoleAccess(self.user).can_unattach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
return super(TeamAccess, self).can_unattach(obj, sub_obj, relationship,
|
||||
*args, **kwargs)
|
||||
|
||||
|
||||
class ProjectAccess(BaseAccess):
|
||||
class ProjectAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can see projects when:
|
||||
- I am a superuser.
|
||||
@@ -1199,7 +1349,9 @@ class ProjectAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = Project
|
||||
select_related = ('modified_by', 'credential', 'current_job', 'last_job',)
|
||||
select_related = ('credential',)
|
||||
prefetch_related = ('modified_by', 'created_by', 'organization', 'last_job', 'current_job')
|
||||
notification_attach_roles = ['admin_role']
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
@@ -1262,7 +1414,7 @@ class ProjectUpdateAccess(BaseAccess):
|
||||
return obj and self.user in obj.project.admin_role
|
||||
|
||||
|
||||
class JobTemplateAccess(BaseAccess):
|
||||
class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can see job templates when:
|
||||
- I have read role for the job template.
|
||||
@@ -1332,7 +1484,7 @@ class JobTemplateAccess(BaseAccess):
|
||||
return self.user in project.use_role
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@check_superuser
|
||||
def can_copy_related(self, obj):
|
||||
'''
|
||||
@@ -1341,17 +1493,19 @@ class JobTemplateAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
# obj.credentials.all() is accessible ONLY when object is saved (has valid id)
|
||||
credential_manager = getattr(obj, 'credentials', None) if getattr(obj, 'id', False) else Credentials.objects.none()
|
||||
return reduce(lambda prev, cred: prev and self.user in cred.use_role, credential_manager.all(), True)
|
||||
credential_manager = getattr(obj, 'credentials', None) if getattr(obj, 'id', False) else Credential.objects.none()
|
||||
user_can_copy = reduce(lambda prev, cred: prev and self.user in cred.use_role, credential_manager.all(), True)
|
||||
if not user_can_copy:
|
||||
raise PermissionDenied(_('Insufficient access to Job Template credentials.'))
|
||||
return user_can_copy
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
# Check license.
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
if obj.survey_enabled:
|
||||
self.check_license(feature='surveys')
|
||||
if Instance.objects.active_count() > 1:
|
||||
self.check_license(feature='ha')
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
# Super users can start any job
|
||||
if self.user.is_superuser:
|
||||
@@ -1409,8 +1563,6 @@ class JobTemplateAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if isinstance(sub_obj, NotificationTemplate):
|
||||
return self.check_related('organization', Organization, {}, obj=sub_obj, mandatory=True)
|
||||
if relationship == "instance_groups":
|
||||
if not obj.project.organization:
|
||||
return False
|
||||
@@ -1506,6 +1658,9 @@ class JobAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
# A super user can relaunch a job
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
@@ -1523,10 +1678,10 @@ class JobAccess(BaseAccess):
|
||||
prompts_access = False
|
||||
elif not config.has_user_prompts(obj.job_template):
|
||||
prompts_access = True
|
||||
elif obj.created_by_id != self.user.pk:
|
||||
elif obj.created_by_id != self.user.pk and vars_are_encrypted(config.extra_data):
|
||||
prompts_access = False
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts provided by another user.')
|
||||
self.messages['detail'] = _('Job was launched with secret prompts provided by another user.')
|
||||
else:
|
||||
prompts_access = (
|
||||
JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}) and
|
||||
@@ -1688,7 +1843,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
|
||||
'''
|
||||
model = WorkflowJobTemplateNode
|
||||
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'unified_job_template', 'credentials',)
|
||||
'unified_job_template', 'credentials', 'workflow_job_template')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -1780,9 +1935,8 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
Deletion must happen as a cascade delete from the workflow job.
|
||||
'''
|
||||
model = WorkflowJobNode
|
||||
select_related = ('unified_job_template', 'job',)
|
||||
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes',
|
||||
'credentials',)
|
||||
prefetch_related = ('unified_job_template', 'job', 'workflow_job', 'credentials',
|
||||
'success_nodes', 'failure_nodes', 'always_nodes',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -1805,7 +1959,7 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
|
||||
|
||||
# TODO: notification attachments?
|
||||
class WorkflowJobTemplateAccess(BaseAccess):
|
||||
class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
'''
|
||||
I can only see/manage Workflow Job Templates if I'm a super user
|
||||
'''
|
||||
@@ -1850,7 +2004,6 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
qs = obj.workflow_job_template_nodes
|
||||
qs = qs.prefetch_related('unified_job_template', 'inventory__use_role', 'credentials__use_role')
|
||||
for node in qs.all():
|
||||
node_errors = {}
|
||||
if node.inventory and self.user not in node.inventory.use_role:
|
||||
missing_inventories.append(node.inventory.name)
|
||||
for cred in node.credentials.all():
|
||||
@@ -1859,8 +2012,6 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
ujt = node.unified_job_template
|
||||
if ujt and not self.user.can_access(UnifiedJobTemplate, 'start', ujt, validate_license=False):
|
||||
missing_ujt.append(ujt.name)
|
||||
if node_errors:
|
||||
wfjt_errors[node.id] = node_errors
|
||||
if missing_ujt:
|
||||
self.messages['templates_unable_to_copy'] = missing_ujt
|
||||
if missing_credentials:
|
||||
@@ -1875,9 +2026,9 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
if validate_license:
|
||||
# check basic license, node count
|
||||
self.check_license()
|
||||
# if surveys are added to WFJTs, check license here
|
||||
if obj.survey_enabled:
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
# Super users can start any job
|
||||
if self.user.is_superuser:
|
||||
@@ -1886,11 +2037,6 @@ class WorkflowJobTemplateAccess(BaseAccess):
|
||||
return self.user in obj.execute_role
|
||||
|
||||
def can_change(self, obj, data):
|
||||
# Check survey license if surveys are added to WFJTs
|
||||
if (data and 'survey_enabled' in data and
|
||||
obj.survey_enabled != data['survey_enabled'] and data['survey_enabled']):
|
||||
self.check_license(feature='surveys')
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
@@ -1946,6 +2092,9 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit({'inventory': obj.inventory})
|
||||
|
||||
if self.user.is_superuser:
|
||||
return True
|
||||
|
||||
@@ -1967,9 +2116,9 @@ class WorkflowJobAccess(BaseAccess):
|
||||
|
||||
# Check if access to prompts to prevent relaunch
|
||||
if config.prompts_dict():
|
||||
if obj.created_by_id != self.user.pk:
|
||||
if obj.created_by_id != self.user.pk and vars_are_encrypted(config.extra_data):
|
||||
if self.save_messages:
|
||||
self.messages['detail'] = _('Job was launched with prompts provided by another user.')
|
||||
self.messages['detail'] = _('Job was launched with secret prompts provided by another user.')
|
||||
return False
|
||||
if not JobLaunchConfigAccess(self.user).can_add({'reference_obj': config}):
|
||||
if self.save_messages:
|
||||
@@ -2022,6 +2171,9 @@ class AdHocCommandAccess(BaseAccess):
|
||||
if validate_license:
|
||||
self.check_license()
|
||||
|
||||
# Check the per-org limit
|
||||
self.check_org_host_limit(data)
|
||||
|
||||
# If a credential is provided, the user should have use access to it.
|
||||
if not self.check_related('credential', Credential, data, role_field='use_role'):
|
||||
return False
|
||||
@@ -2112,7 +2264,7 @@ class JobEventAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = JobEvent
|
||||
prefetch_related = ('hosts', 'children', 'job__job_template', 'host',)
|
||||
prefetch_related = ('hosts', 'job__job_template', 'host',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -2222,11 +2374,6 @@ class UnifiedJobTemplateAccess(BaseAccess):
|
||||
Q(inventorysource__inventory__id__in=Inventory._accessible_pk_qs(
|
||||
Inventory, self.user, 'read_role')))
|
||||
|
||||
def get_queryset(self):
|
||||
# TODO: remove after the depreciation of v1 API
|
||||
qs = super(UnifiedJobTemplateAccess, self).get_queryset()
|
||||
return qs.exclude(inventorysource__source="")
|
||||
|
||||
def can_start(self, obj, validate_license=True):
|
||||
access_class = access_registry[obj.__class__]
|
||||
access_instance = access_class(self.user)
|
||||
@@ -2331,6 +2478,7 @@ class NotificationTemplateAccess(BaseAccess):
|
||||
I can see/use a notification_template if I have permission to
|
||||
'''
|
||||
model = NotificationTemplate
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(
|
||||
@@ -2431,7 +2579,7 @@ class ActivityStreamAccess(BaseAccess):
|
||||
model = ActivityStream
|
||||
prefetch_related = ('organization', 'user', 'inventory', 'host', 'group',
|
||||
'inventory_update', 'credential', 'credential_type', 'team',
|
||||
'ad_hoc_command', 'o_auth2_application', 'o_auth2_access_token',
|
||||
'ad_hoc_command', 'o_auth2_application', 'o_auth2_access_token',
|
||||
'notification_template', 'notification', 'label', 'role', 'actor',
|
||||
'schedule', 'custom_inventory_script', 'unified_job_template',
|
||||
'workflow_job_template_node',)
|
||||
@@ -2517,6 +2665,7 @@ class ActivityStreamAccess(BaseAccess):
|
||||
class CustomInventoryScriptAccess(BaseAccess):
|
||||
|
||||
model = CustomInventoryScript
|
||||
prefetch_related = ('created_by', 'modified_by', 'organization')
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.accessible_objects(self.user, 'read_role').all()
|
||||
@@ -2550,6 +2699,17 @@ class RoleAccess(BaseAccess):
|
||||
'''
|
||||
|
||||
model = Role
|
||||
prefetch_related = ('content_type',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
result = Role.visible_roles(self.user)
|
||||
# Sanity check: is the requesting user an orphaned non-admin/auditor?
|
||||
# if yes, make system admin/auditor mandatorily visible.
|
||||
if not self.user.is_superuser and not self.user.is_system_auditor and not self.user.organizations.exists():
|
||||
mandatories = ('system_administrator', 'system_auditor')
|
||||
super_qs = Role.objects.filter(singleton_name__in=mandatories)
|
||||
result = result | super_qs
|
||||
return result
|
||||
|
||||
def can_read(self, obj):
|
||||
if not obj:
|
||||
@@ -2569,10 +2729,6 @@ class RoleAccess(BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_unattach(self, obj, sub_obj, relationship, data=None, skip_sub_obj_read_check=False):
|
||||
if isinstance(obj.content_object, Team):
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
if not skip_sub_obj_read_check and relationship in ['members', 'member_role.parents', 'parents']:
|
||||
# If we are unattaching a team Role, check the Team read access
|
||||
if relationship == 'parents':
|
||||
@@ -2584,18 +2740,22 @@ class RoleAccess(BaseAccess):
|
||||
|
||||
# Being a user in the member_role or admin_role of an organization grants
|
||||
# administrators of that Organization the ability to edit that user. To prevent
|
||||
# unwanted escalations lets ensure that the Organization administartor has the abilty
|
||||
# unwanted escalations let's ensure that the Organization administrator has the ability
|
||||
# to admin the user being added to the role.
|
||||
if (isinstance(obj.content_object, Organization) and
|
||||
obj.role_field in (Organization.member_role.field.parent_role + ['member_role'])):
|
||||
if isinstance(obj.content_object, Organization) and obj.role_field in ['admin_role', 'member_role']:
|
||||
if not isinstance(sub_obj, User):
|
||||
logger.error('Unexpected attempt to associate {} with organization role.'.format(sub_obj))
|
||||
return False
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
if not UserAccess(self.user).can_admin(sub_obj, None, allow_orphans=True):
|
||||
return False
|
||||
|
||||
if isinstance(obj.content_object, ResourceMixin) and \
|
||||
self.user in obj.content_object.admin_role:
|
||||
if isinstance(obj.content_object, Team) and obj.role_field in ['admin_role', 'member_role']:
|
||||
if not settings.MANAGE_ORGANIZATION_AUTH and not self.user.is_superuser:
|
||||
return False
|
||||
|
||||
if isinstance(obj.content_object, ResourceMixin) and self.user in obj.content_object.admin_role:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
1
awx/main/analytics/__init__.py
Normal file
1
awx/main/analytics/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .core import register, gather, ship # noqa
|
||||
276
awx/main/analytics/collectors.py
Normal file
276
awx/main/analytics/collectors.py
Normal file
@@ -0,0 +1,276 @@
|
||||
import os
|
||||
import os.path
|
||||
import platform
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import (get_awx_version, get_ansible_version,
|
||||
get_custom_venv_choices, camelcase_to_underscore)
|
||||
from awx.main import models
|
||||
from django.contrib.sessions.models import Session
|
||||
from awx.main.analytics import register
|
||||
|
||||
'''
|
||||
This module is used to define metrics collected by awx.main.analytics.gather()
|
||||
Each function is decorated with a key name, and should return a data
|
||||
structure that can be serialized to JSON
|
||||
|
||||
@register('something')
|
||||
def something(since):
|
||||
# the generated archive will contain a `something.json` w/ this JSON
|
||||
return {'some': 'json'}
|
||||
|
||||
All functions - when called - will be passed a datetime.datetime object,
|
||||
`since`, which represents the last time analytics were gathered (some metrics
|
||||
functions - like those that return metadata about playbook runs, may return
|
||||
data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
'''
|
||||
|
||||
|
||||
@register('config')
|
||||
def config(since):
|
||||
license_info = get_license(show_key=False)
|
||||
install_type = 'traditional'
|
||||
if os.environ.get('container') == 'oci':
|
||||
install_type = 'openshift'
|
||||
elif 'KUBERNETES_SERVICE_PORT' in os.environ:
|
||||
install_type = 'k8s'
|
||||
return {
|
||||
'platform': {
|
||||
'system': platform.system(),
|
||||
'dist': platform.dist(),
|
||||
'release': platform.release(),
|
||||
'type': install_type,
|
||||
},
|
||||
'install_uuid': settings.INSTALL_UUID,
|
||||
'instance_uuid': settings.SYSTEM_UUID,
|
||||
'tower_url_base': settings.TOWER_URL_BASE,
|
||||
'tower_version': get_awx_version(),
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'free_instances': license_info.get('free instances', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'authentication_backends': settings.AUTHENTICATION_BACKENDS,
|
||||
'logging_aggregators': settings.LOG_AGGREGATOR_LOGGERS,
|
||||
'external_logger_enabled': settings.LOG_AGGREGATOR_ENABLED,
|
||||
'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', None),
|
||||
}
|
||||
|
||||
|
||||
@register('counts')
|
||||
def counts(since):
|
||||
counts = {}
|
||||
for cls in (models.Organization, models.Team, models.User,
|
||||
models.Inventory, models.Credential, models.Project,
|
||||
models.JobTemplate, models.WorkflowJobTemplate,
|
||||
models.Host, models.Schedule, models.CustomInventoryScript,
|
||||
models.NotificationTemplate):
|
||||
counts[camelcase_to_underscore(cls.__name__)] = cls.objects.count()
|
||||
|
||||
venvs = get_custom_venv_choices()
|
||||
counts['custom_virtualenvs'] = len([
|
||||
v for v in venvs
|
||||
if os.path.basename(v.rstrip('/')) != 'ansible'
|
||||
])
|
||||
|
||||
inv_counts = dict(models.Inventory.objects.order_by().values_list('kind').annotate(Count('kind')))
|
||||
inv_counts['normal'] = inv_counts.get('', 0)
|
||||
inv_counts.pop('', None)
|
||||
inv_counts['smart'] = inv_counts.get('smart', 0)
|
||||
counts['inventories'] = inv_counts
|
||||
|
||||
counts['unified_job'] = models.UnifiedJob.objects.exclude(launch_type='sync').count() # excludes implicit project_updates
|
||||
counts['active_host_count'] = models.Host.objects.active_count()
|
||||
active_sessions = Session.objects.filter(expire_date__gte=now()).count()
|
||||
active_user_sessions = models.UserSessionMembership.objects.select_related('session').filter(session__expire_date__gte=now()).count()
|
||||
active_anonymous_sessions = active_sessions - active_user_sessions
|
||||
counts['active_sessions'] = active_sessions
|
||||
counts['active_user_sessions'] = active_user_sessions
|
||||
counts['active_anonymous_sessions'] = active_anonymous_sessions
|
||||
counts['running_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').filter(status__in=('running', 'waiting',)).count()
|
||||
return counts
|
||||
|
||||
|
||||
@register('org_counts')
|
||||
def org_counts(since):
|
||||
counts = {}
|
||||
for org in models.Organization.objects.annotate(num_users=Count('member_role__members', distinct=True),
|
||||
num_teams=Count('teams', distinct=True)).values('name', 'id', 'num_users', 'num_teams'):
|
||||
counts[org['id']] = {'name': org['name'],
|
||||
'users': org['num_users'],
|
||||
'teams': org['num_teams']
|
||||
}
|
||||
return counts
|
||||
|
||||
|
||||
@register('cred_type_counts')
|
||||
def cred_type_counts(since):
|
||||
counts = {}
|
||||
for cred_type in models.CredentialType.objects.annotate(num_credentials=Count(
|
||||
'credentials', distinct=True)).values('name', 'id', 'managed_by_tower', 'num_credentials'):
|
||||
counts[cred_type['id']] = {'name': cred_type['name'],
|
||||
'credential_count': cred_type['num_credentials'],
|
||||
'managed_by_tower': cred_type['managed_by_tower']
|
||||
}
|
||||
return counts
|
||||
|
||||
|
||||
@register('inventory_counts')
|
||||
def inventory_counts(since):
|
||||
counts = {}
|
||||
for inv in models.Inventory.objects.filter(kind='').annotate(num_sources=Count('inventory_sources', distinct=True),
|
||||
num_hosts=Count('hosts', distinct=True)).only('id', 'name', 'kind'):
|
||||
counts[inv.id] = {'name': inv.name,
|
||||
'kind': inv.kind,
|
||||
'hosts': inv.num_hosts,
|
||||
'sources': inv.num_sources
|
||||
}
|
||||
|
||||
for smart_inv in models.Inventory.objects.filter(kind='smart'):
|
||||
counts[smart_inv.id] = {'name': smart_inv.name,
|
||||
'kind': smart_inv.kind,
|
||||
'num_hosts': smart_inv.hosts.count(),
|
||||
'num_sources': smart_inv.inventory_sources.count()
|
||||
}
|
||||
return counts
|
||||
|
||||
|
||||
@register('projects_by_scm_type')
|
||||
def projects_by_scm_type(since):
|
||||
counts = dict(
|
||||
(t[0] or 'manual', 0)
|
||||
for t in models.Project.SCM_TYPE_CHOICES
|
||||
)
|
||||
for result in models.Project.objects.values('scm_type').annotate(
|
||||
count=Count('scm_type')
|
||||
).order_by('scm_type'):
|
||||
counts[result['scm_type'] or 'manual'] = result['count']
|
||||
return counts
|
||||
|
||||
|
||||
@register('instance_info')
|
||||
def instance_info(since):
|
||||
info = {}
|
||||
instances = models.Instance.objects.values_list('hostname').annotate().values(
|
||||
'uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'hostname', 'last_isolated_check', 'enabled')
|
||||
for instance in instances:
|
||||
instance_info = {
|
||||
'uuid': instance['uuid'],
|
||||
'version': instance['version'],
|
||||
'capacity': instance['capacity'],
|
||||
'cpu': instance['cpu'],
|
||||
'memory': instance['memory'],
|
||||
'managed_by_policy': instance['managed_by_policy'],
|
||||
'last_isolated_check': instance['last_isolated_check'],
|
||||
'enabled': instance['enabled']
|
||||
}
|
||||
info[instance['uuid']] = instance_info
|
||||
return info
|
||||
|
||||
|
||||
@register('job_counts')
|
||||
def job_counts(since):
|
||||
counts = {}
|
||||
counts['total_jobs'] = models.UnifiedJob.objects.exclude(launch_type='sync').count()
|
||||
counts['status'] = dict(models.UnifiedJob.objects.exclude(launch_type='sync').values_list('status').annotate(Count('status')).order_by())
|
||||
counts['launch_type'] = dict(models.UnifiedJob.objects.exclude(launch_type='sync').values_list(
|
||||
'launch_type').annotate(Count('launch_type')).order_by())
|
||||
return counts
|
||||
|
||||
|
||||
@register('job_instance_counts')
|
||||
def job_instance_counts(since):
|
||||
counts = {}
|
||||
job_types = models.UnifiedJob.objects.exclude(launch_type='sync').values_list(
|
||||
'execution_node', 'launch_type').annotate(job_launch_type=Count('launch_type'))
|
||||
for job in job_types:
|
||||
counts.setdefault(job[0], {}).setdefault('launch_type', {})[job[1]] = job[2]
|
||||
|
||||
job_statuses = models.UnifiedJob.objects.exclude(launch_type='sync').values_list(
|
||||
'execution_node', 'status').annotate(job_status=Count('status'))
|
||||
for job in job_statuses:
|
||||
counts.setdefault(job[0], {}).setdefault('status', {})[job[1]] = job[2]
|
||||
return counts
|
||||
|
||||
|
||||
# Copies Job Events from db to a .csv to be shipped
|
||||
def copy_tables(since, full_path):
|
||||
def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = open(file_path, 'w', encoding='utf-8')
|
||||
with connection.cursor() as cursor:
|
||||
cursor.copy_expert(query, file)
|
||||
file.close()
|
||||
return file_path
|
||||
|
||||
events_query = '''COPY (SELECT main_jobevent.id,
|
||||
main_jobevent.created,
|
||||
main_jobevent.uuid,
|
||||
main_jobevent.parent_uuid,
|
||||
main_jobevent.event,
|
||||
main_jobevent.event_data::json->'task_action' AS task_action,
|
||||
main_jobevent.failed,
|
||||
main_jobevent.changed,
|
||||
main_jobevent.playbook,
|
||||
main_jobevent.play,
|
||||
main_jobevent.task,
|
||||
main_jobevent.role,
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name
|
||||
FROM main_jobevent
|
||||
WHERE main_jobevent.created > {}
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='events', query=events_query, path=full_path)
|
||||
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_unifiedjob.created,
|
||||
main_unifiedjob.name,
|
||||
main_unifiedjob.unified_job_template_id,
|
||||
main_unifiedjob.launch_type,
|
||||
main_unifiedjob.schedule_id,
|
||||
main_unifiedjob.execution_node,
|
||||
main_unifiedjob.controller_node,
|
||||
main_unifiedjob.cancel_flag,
|
||||
main_unifiedjob.status,
|
||||
main_unifiedjob.failed,
|
||||
main_unifiedjob.started,
|
||||
main_unifiedjob.finished,
|
||||
main_unifiedjob.elapsed,
|
||||
main_unifiedjob.job_explanation,
|
||||
main_unifiedjob.instance_group_id
|
||||
FROM main_unifiedjob, django_content_type
|
||||
WHERE main_unifiedjob.created > {} AND
|
||||
main_unifiedjob.polymorphic_ctype_id = django_content_type.id AND
|
||||
main_unifiedjob.launch_type != 'sync'
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
|
||||
unified_job_template_query = '''COPY (SELECT main_unifiedjobtemplate.id,
|
||||
main_unifiedjobtemplate.polymorphic_ctype_id,
|
||||
django_content_type.model,
|
||||
main_unifiedjobtemplate.created,
|
||||
main_unifiedjobtemplate.modified,
|
||||
main_unifiedjobtemplate.created_by_id,
|
||||
main_unifiedjobtemplate.modified_by_id,
|
||||
main_unifiedjobtemplate.name,
|
||||
main_unifiedjobtemplate.current_job_id,
|
||||
main_unifiedjobtemplate.last_job_id,
|
||||
main_unifiedjobtemplate.last_job_failed,
|
||||
main_unifiedjobtemplate.last_job_run,
|
||||
main_unifiedjobtemplate.next_job_run,
|
||||
main_unifiedjobtemplate.next_schedule_id,
|
||||
main_unifiedjobtemplate.status
|
||||
FROM main_unifiedjobtemplate, django_content_type
|
||||
WHERE main_unifiedjobtemplate.polymorphic_ctype_id = django_content_type.id
|
||||
ORDER BY main_unifiedjobtemplate.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='unified_job_template', query=unified_job_template_query, path=full_path)
|
||||
return
|
||||
|
||||
142
awx/main/analytics/core.py
Normal file
142
awx/main/analytics/core.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import tempfile
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now, timedelta
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.models.ha import TowerAnalyticsState
|
||||
|
||||
|
||||
__all__ = ['register', 'gather', 'ship']
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
def _valid_license():
|
||||
try:
|
||||
if get_license(show_key=False).get('license_type', 'UNLICENSED') == 'open':
|
||||
return False
|
||||
access_registry[Job](None).check_license()
|
||||
except PermissionDenied:
|
||||
logger.exception("A valid license was not found:")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def register(key):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
Decorated functions should return JSON-serializable objects.
|
||||
|
||||
@register('projects_by_scm_type')
|
||||
def projects_by_scm_type():
|
||||
return {'git': 5, 'svn': 1, 'hg': 0}
|
||||
"""
|
||||
|
||||
def decorate(f):
|
||||
f.__awx_analytics_key__ = key
|
||||
return f
|
||||
|
||||
return decorate
|
||||
|
||||
|
||||
def gather(dest=None, module=None):
|
||||
"""
|
||||
Gather all defined metrics and write them as JSON files in a .tgz
|
||||
|
||||
:param dest: the (optional) absolute path to write a compressed tarball
|
||||
:pararm module: the module to search for registered analytic collector
|
||||
functions; defaults to awx.main.analytics.collectors
|
||||
"""
|
||||
|
||||
run_now = now()
|
||||
state = TowerAnalyticsState.get_solo()
|
||||
last_run = state.last_run
|
||||
logger.debug("Last analytics run was: {}".format(last_run))
|
||||
|
||||
max_interval = now() - timedelta(days=7)
|
||||
if last_run < max_interval or not last_run:
|
||||
last_run = max_interval
|
||||
|
||||
|
||||
if _valid_license() is False:
|
||||
logger.exception("Invalid License provided, or No License Provided")
|
||||
return "Error: Invalid License provided, or No License Provided"
|
||||
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
logger.error("Insights analytics not enabled")
|
||||
return
|
||||
|
||||
if module is None:
|
||||
from awx.main.analytics import collectors
|
||||
module = collectors
|
||||
|
||||
dest = dest or tempfile.mkdtemp(prefix='awx_analytics')
|
||||
for name, func in inspect.getmembers(module):
|
||||
if inspect.isfunction(func) and hasattr(func, '__awx_analytics_key__'):
|
||||
key = func.__awx_analytics_key__
|
||||
path = '{}.json'.format(os.path.join(dest, key))
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
try:
|
||||
json.dump(func(last_run), f)
|
||||
except Exception:
|
||||
logger.exception("Could not generate metric {}.json".format(key))
|
||||
f.close()
|
||||
os.remove(f.name)
|
||||
try:
|
||||
collectors.copy_tables(since=last_run, full_path=dest)
|
||||
except Exception:
|
||||
logger.exception("Could not copy tables")
|
||||
|
||||
# can't use isoformat() since it has colons, which GNU tar doesn't like
|
||||
tarname = '_'.join([
|
||||
settings.SYSTEM_UUID,
|
||||
run_now.strftime('%Y-%m-%d-%H%M%S%z')
|
||||
])
|
||||
tgz = shutil.make_archive(
|
||||
os.path.join(os.path.dirname(dest), tarname),
|
||||
'gztar',
|
||||
dest
|
||||
)
|
||||
shutil.rmtree(dest)
|
||||
return tgz
|
||||
|
||||
|
||||
def ship(path):
|
||||
"""
|
||||
Ship gathered metrics via the Insights agent
|
||||
"""
|
||||
agent = 'insights-client'
|
||||
if shutil.which(agent) is None:
|
||||
logger.error('could not find {} on PATH'.format(agent))
|
||||
return
|
||||
logger.debug('shipping analytics file: {}'.format(path))
|
||||
try:
|
||||
cmd = [
|
||||
agent, '--payload', path, '--content-type', settings.INSIGHTS_AGENT_MIME
|
||||
]
|
||||
output = smart_str(subprocess.check_output(cmd, timeout=60 * 5))
|
||||
logger.debug(output)
|
||||
# reset the `last_run` when data is shipped
|
||||
run_now = now()
|
||||
state = TowerAnalyticsState.get_solo()
|
||||
state.last_run = run_now
|
||||
state.save()
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
logger.exception('{} failure:'.format(cmd))
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.exception('{} timeout:'.format(cmd))
|
||||
121
awx/main/analytics/metrics.py
Normal file
121
awx/main/analytics/metrics.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from django.conf import settings
|
||||
from prometheus_client import (
|
||||
REGISTRY,
|
||||
PROCESS_COLLECTOR,
|
||||
PLATFORM_COLLECTOR,
|
||||
GC_COLLECTOR,
|
||||
Gauge,
|
||||
Info,
|
||||
generate_latest
|
||||
)
|
||||
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils import (get_awx_version, get_ansible_version)
|
||||
from awx.main.analytics.collectors import (
|
||||
counts,
|
||||
instance_info,
|
||||
job_instance_counts,
|
||||
)
|
||||
|
||||
|
||||
REGISTRY.unregister(PROCESS_COLLECTOR)
|
||||
REGISTRY.unregister(PLATFORM_COLLECTOR)
|
||||
REGISTRY.unregister(GC_COLLECTOR)
|
||||
|
||||
SYSTEM_INFO = Info('awx_system', 'AWX System Information')
|
||||
ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations')
|
||||
USER_COUNT = Gauge('awx_users_total', 'Number of users')
|
||||
TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams')
|
||||
INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories')
|
||||
PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects')
|
||||
JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates')
|
||||
WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates')
|
||||
HOST_COUNT = Gauge('awx_hosts_total', 'Number of hosts', ['type',])
|
||||
SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules')
|
||||
INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts')
|
||||
USER_SESSIONS = Gauge('awx_sessions_total', 'Number of sessions', ['type',])
|
||||
CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs')
|
||||
RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system')
|
||||
|
||||
INSTANCE_CAPACITY = Gauge('awx_instance_capacity', 'Capacity of each node in a Tower system', ['instance_uuid',])
|
||||
INSTANCE_CPU = Gauge('awx_instance_cpu', 'CPU cores on each node in a Tower system', ['instance_uuid',])
|
||||
INSTANCE_MEMORY = Gauge('awx_instance_memory', 'RAM (Kb) on each node in a Tower system', ['instance_uuid',])
|
||||
INSTANCE_INFO = Info('awx_instance', 'Info about each node in a Tower system', ['instance_uuid',])
|
||||
INSTANCE_LAUNCH_TYPE = Gauge('awx_instance_launch_type_total', 'Type of Job launched', ['node', 'launch_type',])
|
||||
INSTANCE_STATUS = Gauge('awx_instance_status_total', 'Status of Job launched', ['node', 'status',])
|
||||
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license')
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license')
|
||||
|
||||
|
||||
def metrics():
|
||||
license_info = get_license(show_key=False)
|
||||
SYSTEM_INFO.info({
|
||||
'install_uuid': settings.INSTALL_UUID,
|
||||
'insights_analytics': str(settings.INSIGHTS_TRACKING_STATE),
|
||||
'tower_url_base': settings.TOWER_URL_BASE,
|
||||
'tower_version': get_awx_version(),
|
||||
'ansible_version': get_ansible_version(),
|
||||
'license_type': license_info.get('license_type', 'UNLICENSED'),
|
||||
'license_expiry': str(license_info.get('time_remaining', 0)),
|
||||
'pendo_tracking': settings.PENDO_TRACKING_STATE,
|
||||
'external_logger_enabled': str(settings.LOG_AGGREGATOR_ENABLED),
|
||||
'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', 'None')
|
||||
})
|
||||
|
||||
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('available_instances', 0)))
|
||||
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))
|
||||
|
||||
current_counts = counts(None)
|
||||
|
||||
ORG_COUNT.set(current_counts['organization'])
|
||||
USER_COUNT.set(current_counts['user'])
|
||||
TEAM_COUNT.set(current_counts['team'])
|
||||
INV_COUNT.set(current_counts['inventory'])
|
||||
PROJ_COUNT.set(current_counts['project'])
|
||||
JT_COUNT.set(current_counts['job_template'])
|
||||
WFJT_COUNT.set(current_counts['workflow_job_template'])
|
||||
|
||||
HOST_COUNT.labels(type='all').set(current_counts['host'])
|
||||
HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])
|
||||
|
||||
SCHEDULE_COUNT.set(current_counts['schedule'])
|
||||
INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script'])
|
||||
CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])
|
||||
|
||||
USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
|
||||
USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
|
||||
USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions'])
|
||||
|
||||
RUNNING_JOBS.set(current_counts['running_jobs'])
|
||||
|
||||
|
||||
instance_data = instance_info(None)
|
||||
for uuid in instance_data:
|
||||
INSTANCE_CAPACITY.labels(instance_uuid=uuid).set(instance_data[uuid]['capacity'])
|
||||
INSTANCE_CPU.labels(instance_uuid=uuid).set(instance_data[uuid]['cpu'])
|
||||
INSTANCE_MEMORY.labels(instance_uuid=uuid).set(instance_data[uuid]['memory'])
|
||||
INSTANCE_INFO.labels(instance_uuid=uuid).info({
|
||||
'enabled': str(instance_data[uuid]['enabled']),
|
||||
'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'),
|
||||
'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),
|
||||
'version': instance_data[uuid]['version']
|
||||
})
|
||||
|
||||
instance_data = job_instance_counts(None)
|
||||
for node in instance_data:
|
||||
# skipping internal execution node (for system jobs)
|
||||
if node == '':
|
||||
continue
|
||||
types = instance_data[node].get('launch_type', {})
|
||||
for launch_type, value in types.items():
|
||||
INSTANCE_LAUNCH_TYPE.labels(node=node, launch_type=launch_type).set(value)
|
||||
statuses = instance_data[node].get('status', {})
|
||||
for status, value in statuses.items():
|
||||
INSTANCE_STATUS.labels(node=node, status=status).set(value)
|
||||
|
||||
|
||||
return generate_latest()
|
||||
|
||||
|
||||
__all__ = ['metrics']
|
||||
@@ -21,7 +21,6 @@ register(
|
||||
help_text=_('Enable capturing activity for the activity stream.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='activity_streams',
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -31,7 +30,6 @@ register(
|
||||
help_text=_('Enable capturing activity for the activity stream when running inventory sync.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
feature_required='activity_streams',
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -120,12 +118,32 @@ register(
|
||||
default=_load_default_license_from_file,
|
||||
label=_('License'),
|
||||
help_text=_('The license controls which features and functionality are '
|
||||
'enabled. Use /api/v1/config/ to update or change '
|
||||
'enabled. Use /api/v2/config/ to update or change '
|
||||
'the license.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'INSTALL_UUID',
|
||||
field_class=fields.CharField,
|
||||
label=_('Unique identifier for an AWX/Tower installation'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
read_only=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'CUSTOM_VENV_PATHS',
|
||||
field_class=fields.StringListPathField,
|
||||
label=_('Custom virtual environment paths'),
|
||||
help_text=_('Paths where Tower will look for custom virtual environments '
|
||||
'(in addition to /var/lib/awx/venv/). Enter one path per line.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
default=[],
|
||||
)
|
||||
|
||||
register(
|
||||
'AD_HOC_COMMANDS',
|
||||
field_class=fields.StringListField,
|
||||
@@ -290,6 +308,16 @@ register(
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'INSIGHTS_TRACKING_STATE',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Gather data for Automation Insights'),
|
||||
help_text=_('Enables Tower to gather data on automation and send it to Red Hat Insights.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ROLES_ENABLED',
|
||||
field_class=fields.BooleanField,
|
||||
@@ -542,6 +570,16 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'BROKER_DURABILITY',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Message Durability'),
|
||||
help_text=_('When set (the default), underlying queues will be persisted to disk. Disable this to enable higher message bus throughput.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or \
|
||||
not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or \
|
||||
|
||||
@@ -16,7 +16,8 @@ SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),
|
||||
('dzdo', _('DZDO')), ('pmrun', _('Pmrun')), ('runas', _('Runas')),
|
||||
('enable', _('Enable')), ('doas', _('Doas')),
|
||||
('enable', _('Enable')), ('doas', _('Doas')), ('ksu', _('Ksu')),
|
||||
('machinectl', _('Machinectl')), ('sesu', _('Sesu')),
|
||||
]
|
||||
CHOICES_PRIVILEGE_ESCALATION_METHODS = [('', _('None'))] + PRIVILEGE_ESCALATION_METHODS
|
||||
ANSI_SGR_PATTERN = re.compile(r'\x1b\[[0-9;]*m')
|
||||
@@ -24,7 +25,9 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
# Failure to parse inventory should always be fatal
|
||||
'ANSIBLE_INVENTORY_UNPARSED_FAILED': 'True',
|
||||
# Always use the --export option for ansible-inventory
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True'
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True'
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
@@ -34,6 +37,17 @@ ENV_BLACKLIST = frozenset((
|
||||
'INVENTORY_ID', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID',
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS', 'FACT_QUEUE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
))
|
||||
|
||||
# loggers that may be called in process of emitting a log
|
||||
LOGGER_BLACKLIST = (
|
||||
'awx.main.utils.handlers',
|
||||
'awx.main.utils.formatters',
|
||||
'awx.main.utils.filters',
|
||||
'awx.main.utils.encryption',
|
||||
'awx.main.utils.log',
|
||||
# loggers that may be called getting logging settings
|
||||
'awx.conf'
|
||||
)
|
||||
|
||||
125
awx/main/credential_plugins/aim.py
Normal file
125
awx/main/credential_plugins/aim.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
import threading
|
||||
from urllib.parse import quote, urlencode, urljoin
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
|
||||
aim_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': _('CyberArk AIM URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
}, {
|
||||
'id': 'app_id',
|
||||
'label': _('Application ID'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'client_key',
|
||||
'label': _('Client Key'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
}, {
|
||||
'id': 'client_cert',
|
||||
'label': _('Client Certificate'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
}, {
|
||||
'id': 'verify',
|
||||
'label': _('Verify SSL Certificates'),
|
||||
'type': 'boolean',
|
||||
'default': True,
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'object_query',
|
||||
'label': _('Object Query'),
|
||||
'type': 'string',
|
||||
'help_text': _('Lookup query for the object. Ex: "Safe=TestSafe;Object=testAccountName123"'),
|
||||
}, {
|
||||
'id': 'object_query_format',
|
||||
'label': _('Object Query Format'),
|
||||
'type': 'string',
|
||||
'default': 'Exact',
|
||||
'choices': ['Exact', 'Regexp']
|
||||
}, {
|
||||
'id': 'reason',
|
||||
'label': _('Reason'),
|
||||
'type': 'string',
|
||||
'help_text': _('Object request reason. This is only needed if it is required by the object\'s policy.')
|
||||
}],
|
||||
'required': ['url', 'app_id', 'object_query'],
|
||||
}
|
||||
|
||||
|
||||
def create_temporary_fifo(data):
|
||||
"""Open fifo named pipe in a new thread using a temporary file path. The
|
||||
thread blocks until data is read from the pipe.
|
||||
|
||||
Returns the path to the fifo.
|
||||
|
||||
:param data(bytes): Data to write to the pipe.
|
||||
"""
|
||||
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
|
||||
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
threading.Thread(
|
||||
target=lambda p, d: open(p, 'wb').write(d),
|
||||
args=(path, data)
|
||||
).start()
|
||||
return path
|
||||
|
||||
|
||||
def aim_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
client_cert = kwargs.get('client_cert', None)
|
||||
client_key = kwargs.get('client_key', None)
|
||||
verify = kwargs['verify']
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
reason = kwargs.get('reason', None)
|
||||
|
||||
query_params = {
|
||||
'AppId': app_id,
|
||||
'Query': object_query,
|
||||
'QueryFormat': object_query_format,
|
||||
}
|
||||
if reason:
|
||||
query_params['reason'] = reason
|
||||
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
|
||||
cert = None
|
||||
if client_cert and client_key:
|
||||
cert = (
|
||||
create_temporary_fifo(client_cert.encode()),
|
||||
create_temporary_fifo(client_key.encode())
|
||||
)
|
||||
elif client_cert:
|
||||
cert = create_temporary_fifo(client_cert.encode())
|
||||
|
||||
res = requests.get(
|
||||
request_url + request_qs,
|
||||
timeout=30,
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
)
|
||||
res.raise_for_status()
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin(
|
||||
'CyberArk AIM Secret Lookup',
|
||||
inputs=aim_inputs,
|
||||
backend=aim_backend
|
||||
)
|
||||
65
awx/main/credential_plugins/azure_kv.py
Normal file
65
awx/main/credential_plugins/azure_kv.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
|
||||
|
||||
azure_keyvault_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': _('Vault URL (DNS Name)'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
}, {
|
||||
'id': 'client',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string'
|
||||
}, {
|
||||
'id': 'secret',
|
||||
'label': _('Client Secret'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant ID'),
|
||||
'type': 'string'
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_field',
|
||||
'label': _('Secret Name'),
|
||||
'type': 'string',
|
||||
'help_text': _('The name of the secret to look up.'),
|
||||
}, {
|
||||
'id': 'secret_version',
|
||||
'label': _('Secret Version'),
|
||||
'type': 'string',
|
||||
'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
|
||||
}],
|
||||
'required': ['url', 'client', 'secret', 'tenant', 'secret_field'],
|
||||
}
|
||||
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
url = url,
|
||||
client_id = kwargs['client'],
|
||||
secret = kwargs['secret'],
|
||||
tenant = kwargs['tenant'],
|
||||
resource = "https://vault.azure.net",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
kv = KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value
|
||||
|
||||
|
||||
azure_keyvault_plugin = CredentialPlugin(
|
||||
'Microsoft Azure Key Vault',
|
||||
inputs=azure_keyvault_inputs,
|
||||
backend=azure_keyvault_backend
|
||||
)
|
||||
121
awx/main/credential_plugins/conjur.py
Normal file
121
awx/main/credential_plugins/conjur.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
import base64
|
||||
import os
|
||||
import stat
|
||||
import tempfile
|
||||
import threading
|
||||
from urllib.parse import urljoin, quote_plus
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': _('Conjur URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
}, {
|
||||
'id': 'api_key',
|
||||
'label': _('API Key'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
}, {
|
||||
'id': 'account',
|
||||
'label': _('Account'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'username',
|
||||
'label': _('Username'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'cacert',
|
||||
'label': _('Public Key Certificate'),
|
||||
'type': 'string',
|
||||
'multiline': True
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_path',
|
||||
'label': _('Secret Identifier'),
|
||||
'type': 'string',
|
||||
'help_text': _('The identifier for the secret e.g., /some/identifier'),
|
||||
}, {
|
||||
'id': 'secret_version',
|
||||
'label': _('Secret Version'),
|
||||
'type': 'string',
|
||||
'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
|
||||
}],
|
||||
'required': ['url', 'api_key', 'account', 'username'],
|
||||
}
|
||||
|
||||
|
||||
def create_temporary_fifo(data):
|
||||
"""Open fifo named pipe in a new thread using a temporary file path. The
|
||||
thread blocks until data is read from the pipe.
|
||||
|
||||
Returns the path to the fifo.
|
||||
|
||||
:param data(bytes): Data to write to the pipe.
|
||||
"""
|
||||
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
|
||||
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
threading.Thread(
|
||||
target=lambda p, d: open(p, 'wb').write(d),
|
||||
args=(path, data)
|
||||
).start()
|
||||
return path
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
account = quote_plus(kwargs['account'])
|
||||
username = quote_plus(kwargs['username'])
|
||||
secret_path = quote_plus(kwargs['secret_path'])
|
||||
version = kwargs.get('secret_version')
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
auth_kwargs = {
|
||||
'headers': {'Content-Type': 'text/plain'},
|
||||
'data': api_key
|
||||
}
|
||||
if cacert:
|
||||
auth_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
resp = requests.post(
|
||||
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
|
||||
**auth_kwargs
|
||||
)
|
||||
resp.raise_for_status()
|
||||
token = base64.b64encode(resp.content).decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
}
|
||||
if cacert:
|
||||
lookup_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
|
||||
path = urljoin(url, '/'.join([
|
||||
'secrets',
|
||||
account,
|
||||
'variable',
|
||||
secret_path
|
||||
]))
|
||||
if version:
|
||||
path = '?'.join([path, version])
|
||||
|
||||
resp = requests.get(path, timeout=30, **lookup_kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp.text
|
||||
|
||||
|
||||
conjur_plugin = CredentialPlugin(
|
||||
'CyberArk Conjur Secret Lookup',
|
||||
inputs=conjur_inputs,
|
||||
backend=conjur_backend
|
||||
)
|
||||
152
awx/main/credential_plugins/hashivault.py
Normal file
152
awx/main/credential_plugins/hashivault.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import copy
|
||||
import os
|
||||
import pathlib
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
import requests
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
|
||||
base_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
'label': _('Server URL'),
|
||||
'type': 'string',
|
||||
'format': 'url',
|
||||
'help_text': _('The URL to the HashiCorp Vault'),
|
||||
}, {
|
||||
'id': 'token',
|
||||
'label': _('Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': _('The access token used to authenticate to the Vault server'),
|
||||
}],
|
||||
'metadata': [{
|
||||
'id': 'secret_path',
|
||||
'label': _('Path to Secret'),
|
||||
'type': 'string',
|
||||
'help_text': _('The path to the secret e.g., /some-engine/some-secret/'),
|
||||
}],
|
||||
'required': ['url', 'token', 'secret_path'],
|
||||
}
|
||||
|
||||
hashi_kv_inputs = copy.deepcopy(base_inputs)
|
||||
hashi_kv_inputs['fields'].append({
|
||||
'id': 'api_version',
|
||||
'label': _('API Version'),
|
||||
'choices': ['v1', 'v2'],
|
||||
'help_text': _('API v1 is for static key/value lookups. API v2 is for versioned key/value lookups.'),
|
||||
'default': 'v1',
|
||||
})
|
||||
hashi_kv_inputs['metadata'].extend([{
|
||||
'id': 'secret_key',
|
||||
'label': _('Key Name'),
|
||||
'type': 'string',
|
||||
'help_text': _('The name of the key to look up in the secret.'),
|
||||
}, {
|
||||
'id': 'secret_version',
|
||||
'label': _('Secret Version (v2 only)'),
|
||||
'type': 'string',
|
||||
'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
|
||||
}])
|
||||
hashi_kv_inputs['required'].extend(['api_version', 'secret_key'])
|
||||
|
||||
hashi_ssh_inputs = copy.deepcopy(base_inputs)
|
||||
hashi_ssh_inputs['metadata'] = [{
|
||||
'id': 'public_key',
|
||||
'label': _('Unsigned Public Key'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
}] + hashi_ssh_inputs['metadata'] + [{
|
||||
'id': 'role',
|
||||
'label': _('Role Name'),
|
||||
'type': 'string',
|
||||
'help_text': _('The name of the role used to sign.')
|
||||
}, {
|
||||
'id': 'valid_principals',
|
||||
'label': _('Valid Principals'),
|
||||
'type': 'string',
|
||||
'help_text': _('Valid principals (either usernames or hostnames) that the certificate should be signed for.'),
|
||||
}]
|
||||
hashi_ssh_inputs['required'].extend(['public_key', 'role'])
|
||||
|
||||
|
||||
def kv_backend(**kwargs):
|
||||
token = kwargs['token']
|
||||
url = urljoin(kwargs['url'], 'v1')
|
||||
secret_path = kwargs['secret_path']
|
||||
secret_key = kwargs.get('secret_key', None)
|
||||
|
||||
api_version = kwargs['api_version']
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
if api_version == 'v2':
|
||||
params = {}
|
||||
if kwargs.get('secret_version'):
|
||||
params['version'] = kwargs['secret_version']
|
||||
try:
|
||||
mount_point, *path = pathlib.Path(secret_path.lstrip(os.sep)).parts
|
||||
'/'.join(*path)
|
||||
except Exception:
|
||||
mount_point, path = secret_path, []
|
||||
# https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
|
||||
response = sess.get(
|
||||
'/'.join([url, mount_point, 'data'] + path).rstrip('/'),
|
||||
params=params,
|
||||
timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
json = response.json()['data']
|
||||
else:
|
||||
# https://www.vaultproject.io/api/secret/kv/kv-v1.html#read-secret
|
||||
response = sess.get('/'.join([url, secret_path]).rstrip('/'), timeout=30)
|
||||
response.raise_for_status()
|
||||
json = response.json()
|
||||
|
||||
if secret_key:
|
||||
try:
|
||||
return json['data'][secret_key]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
'{} is not present at {}'.format(secret_key, secret_path)
|
||||
)
|
||||
return json['data']
|
||||
|
||||
|
||||
def ssh_backend(**kwargs):
|
||||
token = kwargs['token']
|
||||
url = urljoin(kwargs['url'], 'v1')
|
||||
secret_path = kwargs['secret_path']
|
||||
role = kwargs['role']
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
json = {
|
||||
'public_key': kwargs['public_key']
|
||||
}
|
||||
if kwargs.get('valid_principals'):
|
||||
json['valid_principals'] = kwargs['valid_principals']
|
||||
# https://www.vaultproject.io/api/secret/ssh/index.html#sign-ssh-key
|
||||
resp = sess.post(
|
||||
'/'.join([url, secret_path, 'sign', role]).rstrip('/'),
|
||||
json=json,
|
||||
timeout=30
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return resp.json()['data']['signed_key']
|
||||
|
||||
|
||||
hashivault_kv_plugin = CredentialPlugin(
|
||||
'HashiCorp Vault Secret Lookup',
|
||||
inputs=hashi_kv_inputs,
|
||||
backend=kv_backend
|
||||
)
|
||||
|
||||
hashivault_ssh_plugin = CredentialPlugin(
|
||||
'HashiCorp Vault Signed SSH',
|
||||
inputs=hashi_ssh_inputs,
|
||||
backend=ssh_backend
|
||||
)
|
||||
3
awx/main/credential_plugins/plugin.py
Normal file
3
awx/main/credential_plugins/plugin.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from collections import namedtuple
|
||||
|
||||
CredentialPlugin = namedtuple('CredentialPlugin', ['name', 'inputs', 'backend'])
|
||||
155
awx/main/db/profiled_pg/base.py
Normal file
155
awx/main/db/profiled_pg/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import os
|
||||
import pkg_resources
|
||||
import sqlite3
|
||||
import sys
|
||||
import traceback
|
||||
import uuid
|
||||
|
||||
from django.core.cache import cache
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.db.backends.postgresql.base import DatabaseWrapper as BaseDatabaseWrapper
|
||||
|
||||
from awx.main.utils import memoize
|
||||
|
||||
__loc__ = LocMemCache(str(uuid.uuid4()), {})
|
||||
__all__ = ['DatabaseWrapper']
|
||||
|
||||
|
||||
class RecordedQueryLog(object):
|
||||
|
||||
def __init__(self, log, db, dest='/var/log/tower/profile'):
|
||||
self.log = log
|
||||
self.db = db
|
||||
self.dest = dest
|
||||
try:
|
||||
self.threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
self.threshold = None
|
||||
|
||||
def append(self, query):
|
||||
ret = self.log.append(query)
|
||||
try:
|
||||
self.write(query)
|
||||
except Exception:
|
||||
# not sure what else to do her e- we can't really safely
|
||||
# *use* our loggers because it'll just generate more DB queries
|
||||
# and potentially recurse into this state again
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb)
|
||||
return ret
|
||||
|
||||
def write(self, query):
|
||||
if self.threshold is None:
|
||||
return
|
||||
seconds = float(query['time'])
|
||||
|
||||
# if the query is slow enough...
|
||||
if seconds >= self.threshold:
|
||||
sql = query['sql']
|
||||
if sql.startswith('EXPLAIN'):
|
||||
return
|
||||
|
||||
# build a printable Python stack
|
||||
bt = ' '.join(traceback.format_stack())
|
||||
|
||||
# and re-run the same query w/ EXPLAIN
|
||||
explain = ''
|
||||
cursor = self.db.cursor()
|
||||
cursor.execute('EXPLAIN VERBOSE {}'.format(sql))
|
||||
for line in cursor.fetchall():
|
||||
explain += line[0] + '\n'
|
||||
|
||||
# write a row of data into a per-PID sqlite database
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'runworker'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
else:
|
||||
progname = os.path.basename(sys.argv[0])
|
||||
filepath = os.path.join(
|
||||
self.dest,
|
||||
'{}.sqlite'.format(progname)
|
||||
)
|
||||
version = pkg_resources.get_distribution('awx').version
|
||||
log = sqlite3.connect(filepath, timeout=3)
|
||||
log.execute(
|
||||
'CREATE TABLE IF NOT EXISTS queries ('
|
||||
' id INTEGER PRIMARY KEY,'
|
||||
' version TEXT,'
|
||||
' pid INTEGER,'
|
||||
' stamp DATETIME DEFAULT CURRENT_TIMESTAMP,'
|
||||
' argv REAL,'
|
||||
' time REAL,'
|
||||
' sql TEXT,'
|
||||
' explain TEXT,'
|
||||
' bt TEXT'
|
||||
');'
|
||||
)
|
||||
log.commit()
|
||||
log.execute(
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) '
|
||||
'VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
(os.getpid(), version, ' ' .join(sys.argv), seconds, sql, explain, bt)
|
||||
)
|
||||
log.commit()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.log)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.log)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.log, attr)
|
||||
|
||||
|
||||
class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
"""
|
||||
This is a special subclass of Django's postgres DB backend which - based on
|
||||
the value of a special flag in memcached - captures slow queries and
|
||||
writes profile and Python stack metadata to the disk.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DatabaseWrapper, self).__init__(*args, **kwargs)
|
||||
# Django's default base wrapper implementation has `queries_log`
|
||||
# which is a `collections.deque` that every query is appended to
|
||||
#
|
||||
# this line wraps the deque with a proxy that can capture each query
|
||||
# and - if it's slow enough - record profiling metadata to the file
|
||||
# system for debugging purposes
|
||||
self.queries_log = RecordedQueryLog(self.queries_log, self)
|
||||
|
||||
@property
|
||||
@memoize(ttl=1, cache=__loc__)
|
||||
def force_debug_cursor(self):
|
||||
# in Django's base DB implementation, `self.force_debug_cursor` is just
|
||||
# a simple boolean, and this value is used to signal to Django that it
|
||||
# should record queries into `self.queries_log` as they're executed (this
|
||||
# is the same mechanism used by libraries like the django-debug-toolbar)
|
||||
#
|
||||
# in _this_ implementation, we represent it as a property which will
|
||||
# check memcache for a special flag to be set (when the flag is set, it
|
||||
# means we should start recording queries because somebody called
|
||||
# `awx-manage profile_sql`)
|
||||
#
|
||||
# it's worth noting that this property is wrapped w/ @memoize because
|
||||
# Django references this attribute _constantly_ (in particular, once
|
||||
# per executed query); doing a memcached.get() _at most_ once per
|
||||
# second is a good enough window to detect when profiling is turned
|
||||
# on/off by a system administrator
|
||||
try:
|
||||
threshold = cache.get('awx-profile-sql-threshold')
|
||||
except Exception:
|
||||
# if we can't reach memcached, just assume profiling's off
|
||||
threshold = None
|
||||
self.queries_log.threshold = threshold
|
||||
return threshold is not None
|
||||
|
||||
@force_debug_cursor.setter
|
||||
def force_debug_cursor(self, v):
|
||||
return
|
||||
@@ -4,7 +4,8 @@ import socket
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from kombu import Connection, Queue, Exchange, Producer, Consumer
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
from kombu import Queue, Exchange, Producer, Consumer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
42
awx/main/dispatch/kombu.py
Normal file
42
awx/main/dispatch/kombu.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from amqp.exceptions import PreconditionFailed
|
||||
from django.conf import settings
|
||||
from kombu.connection import Connection as KombuConnection
|
||||
from kombu.transport import pyamqp
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
__all__ = ['Connection']
|
||||
|
||||
|
||||
class Connection(KombuConnection):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
class _Channel(pyamqp.Channel):
|
||||
|
||||
def queue_declare(self, queue, *args, **kwargs):
|
||||
kwargs['durable'] = settings.BROKER_DURABILITY
|
||||
try:
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
except PreconditionFailed as e:
|
||||
if "inequivalent arg 'durable'" in getattr(e, 'reply_text', None):
|
||||
logger.error(
|
||||
'queue {} durability is not {}, deleting and recreating'.format(
|
||||
|
||||
queue,
|
||||
kwargs['durable']
|
||||
)
|
||||
)
|
||||
self.queue_delete(queue)
|
||||
return super(_Channel, self).queue_declare(queue, *args, **kwargs)
|
||||
|
||||
class _Connection(pyamqp.Connection):
|
||||
Channel = _Channel
|
||||
|
||||
class _Transport(pyamqp.Transport):
|
||||
Connection = _Connection
|
||||
|
||||
self.transport_cls = _Transport
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -18,7 +19,10 @@ import psutil
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
|
||||
@@ -4,7 +4,9 @@ import sys
|
||||
from uuid import uuid4
|
||||
|
||||
from django.conf import settings
|
||||
from kombu import Connection, Exchange, Producer
|
||||
from kombu import Exchange, Producer
|
||||
|
||||
from awx.main.dispatch.kombu import Connection
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
@@ -13,7 +14,10 @@ from kombu.mixins import ConsumerMixin
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def signame(sig):
|
||||
@@ -108,7 +112,7 @@ class AWXConsumer(ConsumerMixin):
|
||||
|
||||
def stop(self, signum, frame):
|
||||
self.should_stop = True # this makes the kombu mixin stop consuming
|
||||
logger.debug('received {}, stopping'.format(signame(signum)))
|
||||
logger.warn('received {}, stopping'.format(signame(signum)))
|
||||
self.worker.on_stop()
|
||||
raise SystemExit()
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
it into the database.
|
||||
|
||||
The code that *builds* these types of messages is found in the AWX display
|
||||
callback (`awx.lib.awx_display_callback`).
|
||||
The code that *generates* these types of messages is found in the
|
||||
ansible-runner display callback plugin.
|
||||
'''
|
||||
|
||||
MAX_RETRIES = 2
|
||||
|
||||
@@ -1,507 +0,0 @@
|
||||
import base64
|
||||
import codecs
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
from distutils.version import LooseVersion as Version
|
||||
from io import StringIO
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_bytes, smart_str
|
||||
|
||||
import awx
|
||||
from awx.main.expect import run
|
||||
from awx.main.utils import OutputEventFilter, get_system_task_capacity
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, args, cwd, env, stdout_handle, ssh_key_path,
|
||||
expect_passwords={}, cancelled_callback=None, job_timeout=0,
|
||||
idle_timeout=None, extra_update_fields=None,
|
||||
pexpect_timeout=5, proot_cmd='bwrap'):
|
||||
"""
|
||||
:param args: a list of `subprocess.call`-style arguments
|
||||
representing a subprocess e.g.,
|
||||
['ansible-playbook', '...']
|
||||
:param cwd: the directory where the subprocess should run,
|
||||
generally the directory where playbooks exist
|
||||
:param env: a dict containing environment variables for the
|
||||
subprocess, ala `os.environ`
|
||||
:param stdout_handle: a file-like object for capturing stdout
|
||||
:param ssh_key_path: a filepath where SSH key data can be read
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
:param job_timeout a timeout (in seconds); if the total job runtime
|
||||
exceeds this, the process will be killed
|
||||
:param idle_timeout a timeout (in seconds); if new output is not
|
||||
sent to stdout in this interval, the process
|
||||
will be terminated
|
||||
:param extra_update_fields: a dict used to specify DB fields which should
|
||||
be updated on the underlying model
|
||||
object after execution completes
|
||||
:param pexpect_timeout a timeout (in seconds) to wait on
|
||||
`pexpect.spawn().expect()` calls
|
||||
:param proot_cmd the command used to isolate processes, `bwrap`
|
||||
"""
|
||||
self.args = args
|
||||
self.cwd = cwd
|
||||
self.isolated_env = self._redact_isolated_env(env.copy())
|
||||
self.management_env = self._base_management_env()
|
||||
self.stdout_handle = stdout_handle
|
||||
self.ssh_key_path = ssh_key_path
|
||||
self.expect_passwords = {k.pattern: v for k, v in expect_passwords.items()}
|
||||
self.cancelled_callback = cancelled_callback
|
||||
self.job_timeout = job_timeout
|
||||
self.idle_timeout = idle_timeout
|
||||
self.extra_update_fields = extra_update_fields
|
||||
self.pexpect_timeout = pexpect_timeout
|
||||
self.proot_cmd = proot_cmd
|
||||
self.started_at = None
|
||||
|
||||
@staticmethod
|
||||
def _base_management_env():
|
||||
'''
|
||||
Returns environment variables to use when running a playbook
|
||||
that manages the isolated instance.
|
||||
Use of normal job callback and other such configurations are avoided.
|
||||
'''
|
||||
env = dict(os.environ.items())
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
|
||||
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
|
||||
return env
|
||||
|
||||
@staticmethod
|
||||
def _build_args(playbook, hosts, extra_vars=None):
|
||||
'''
|
||||
Returns list of Ansible CLI command arguments for a management task
|
||||
|
||||
:param playbook: name of the playbook to run
|
||||
:param hosts: host pattern to operate on, ex. "localhost,"
|
||||
:param extra_vars: optional dictionary of extra_vars to apply
|
||||
'''
|
||||
args = [
|
||||
'ansible-playbook',
|
||||
playbook,
|
||||
'-u', settings.AWX_ISOLATED_USERNAME,
|
||||
'-T', str(settings.AWX_ISOLATED_CONNECTION_TIMEOUT),
|
||||
'-i', hosts
|
||||
]
|
||||
if extra_vars:
|
||||
args.extend(['-e', json.dumps(extra_vars)])
|
||||
if settings.AWX_ISOLATED_VERBOSITY:
|
||||
args.append('-%s' % ('v' * min(5, settings.AWX_ISOLATED_VERBOSITY)))
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
def _redact_isolated_env(env):
|
||||
'''
|
||||
strips some environment variables that aren't applicable to
|
||||
job execution within the isolated instance
|
||||
'''
|
||||
for var in (
|
||||
'HOME', 'RABBITMQ_HOST', 'RABBITMQ_PASS', 'RABBITMQ_USER', 'CACHE',
|
||||
'DJANGO_PROJECT_DIR', 'DJANGO_SETTINGS_MODULE', 'RABBITMQ_VHOST'):
|
||||
env.pop(var, None)
|
||||
return env
|
||||
|
||||
@classmethod
|
||||
def awx_playbook_path(cls):
|
||||
return os.path.abspath(os.path.join(
|
||||
os.path.dirname(awx.__file__),
|
||||
'playbooks'
|
||||
))
|
||||
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
def dispatch(self):
|
||||
'''
|
||||
Compile the playbook, its environment, and metadata into a series
|
||||
of files, and ship to a remote host for isolated execution.
|
||||
'''
|
||||
self.started_at = time.time()
|
||||
secrets = {
|
||||
'env': self.isolated_env,
|
||||
'passwords': self.expect_passwords,
|
||||
'ssh_key_data': None,
|
||||
'idle_timeout': self.idle_timeout,
|
||||
'job_timeout': self.job_timeout,
|
||||
'pexpect_timeout': self.pexpect_timeout
|
||||
}
|
||||
|
||||
# if an ssh private key fifo exists, read its contents and delete it
|
||||
if self.ssh_key_path:
|
||||
buff = StringIO()
|
||||
with open(self.ssh_key_path, 'r') as fifo:
|
||||
for line in fifo:
|
||||
buff.write(line)
|
||||
secrets['ssh_key_data'] = buff.getvalue()
|
||||
os.remove(self.ssh_key_path)
|
||||
|
||||
# write the entire secret payload to a named pipe
|
||||
# the run_isolated.yml playbook will use a lookup to read this data
|
||||
# into a variable, and will replicate the data into a named pipe on the
|
||||
# isolated instance
|
||||
secrets_path = os.path.join(self.private_data_dir, 'env')
|
||||
run.open_fifo_write(
|
||||
secrets_path,
|
||||
smart_str(base64.b64encode(smart_bytes(json.dumps(secrets))))
|
||||
)
|
||||
|
||||
self.build_isolated_job_data()
|
||||
|
||||
extra_vars = {
|
||||
'src': self.private_data_dir,
|
||||
'dest': settings.AWX_PROOT_BASE_PATH,
|
||||
}
|
||||
if self.proot_temp_dir:
|
||||
extra_vars['proot_temp_dir'] = self.proot_temp_dir
|
||||
|
||||
# Run ansible-playbook to launch a job on the isolated host. This:
|
||||
#
|
||||
# - sets up a temporary directory for proot/bwrap (if necessary)
|
||||
# - copies encrypted job data from the controlling host to the isolated host (with rsync)
|
||||
# - writes the encryption secret to a named pipe on the isolated host
|
||||
# - launches the isolated playbook runner via `awx-expect start <job-id>`
|
||||
args = self._build_args('run_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
if self.instance.verbosity:
|
||||
args.append('-%s' % ('v' * min(5, self.instance.verbosity)))
|
||||
buff = StringIO()
|
||||
logger.debug('Starting job {} on isolated host with `run_isolated.yml` playbook.'.format(self.instance.id))
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, self.awx_playbook_path(), self.management_env, buff,
|
||||
idle_timeout=self.idle_timeout,
|
||||
job_timeout=settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} dispatch:\n{}'.format(self.instance.id, output))
|
||||
if status != 'successful':
|
||||
self.stdout_handle.write(output)
|
||||
return status, rc
|
||||
|
||||
@classmethod
|
||||
def run_pexpect(cls, pexpect_args, *args, **kw):
|
||||
isolated_ssh_path = None
|
||||
try:
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
|
||||
]):
|
||||
isolated_ssh_path = tempfile.mkdtemp(prefix='awx_isolated', dir=settings.AWX_PROOT_BASE_PATH)
|
||||
os.chmod(isolated_ssh_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
isolated_key = os.path.join(isolated_ssh_path, '.isolated')
|
||||
ssh_sock = os.path.join(isolated_ssh_path, '.isolated_ssh_auth.sock')
|
||||
run.open_fifo_write(isolated_key, settings.AWX_ISOLATED_PRIVATE_KEY)
|
||||
pexpect_args = run.wrap_args_with_ssh_agent(pexpect_args, isolated_key, ssh_sock, silence_ssh_add=True)
|
||||
return run.run_pexpect(pexpect_args, *args, **kw)
|
||||
finally:
|
||||
if isolated_ssh_path:
|
||||
shutil.rmtree(isolated_ssh_path)
|
||||
|
||||
def build_isolated_job_data(self):
|
||||
'''
|
||||
Write the playbook and metadata into a collection of files on the local
|
||||
file system.
|
||||
|
||||
This function is intended to be used to compile job data so that it
|
||||
can be shipped to a remote, isolated host (via ssh).
|
||||
'''
|
||||
|
||||
rsync_exclude = [
|
||||
# don't rsync source control metadata (it can be huge!)
|
||||
'- /project/.git',
|
||||
'- /project/.svn',
|
||||
'- /project/.hg',
|
||||
# don't rsync job events that are in the process of being written
|
||||
'- /artifacts/job_events/*-partial.json.tmp',
|
||||
# rsync can't copy named pipe data - we're replicating this manually ourselves in the playbook
|
||||
'- /env'
|
||||
]
|
||||
|
||||
for filename, data in (
|
||||
['.rsync-filter', '\n'.join(rsync_exclude)],
|
||||
['args', json.dumps(self.args)]
|
||||
):
|
||||
path = self.path_to(filename)
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
|
||||
# symlink the scm checkout (if there is one) so that it's rsync'ed over, too
|
||||
if 'AD_HOC_COMMAND_ID' not in self.isolated_env:
|
||||
os.symlink(self.cwd, self.path_to('project'))
|
||||
|
||||
# create directories for build artifacts to live in
|
||||
os.makedirs(self.path_to('artifacts', 'job_events'), mode=stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR)
|
||||
|
||||
def _missing_artifacts(self, path_list, output):
|
||||
missing_artifacts = list(filter(lambda path: not os.path.exists(path), path_list))
|
||||
for path in missing_artifacts:
|
||||
self.stdout_handle.write('ansible did not exit cleanly, missing `{}`.\n'.format(path))
|
||||
if missing_artifacts:
|
||||
daemon_path = self.path_to('artifacts', 'daemon.log')
|
||||
if os.path.exists(daemon_path):
|
||||
# If available, show log files from the run.py call
|
||||
with codecs.open(daemon_path, 'r', encoding='utf-8') as f:
|
||||
self.stdout_handle.write(f.read())
|
||||
else:
|
||||
# Provide the management playbook standard out if not available
|
||||
self.stdout_handle.write(output)
|
||||
return True
|
||||
return False
|
||||
|
||||
def check(self, interval=None):
|
||||
"""
|
||||
Repeatedly poll the isolated node to determine if the job has run.
|
||||
|
||||
On success, copy job artifacts to the controlling node.
|
||||
On failure, continue to poll the isolated node (until the job timeout
|
||||
is exceeded).
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
|
||||
:param interval: an interval (in seconds) to wait between status polls
|
||||
"""
|
||||
interval = interval if interval is not None else settings.AWX_ISOLATED_CHECK_INTERVAL
|
||||
extra_vars = {'src': self.private_data_dir}
|
||||
args = self._build_args('check_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
if self.instance.verbosity:
|
||||
args.append('-%s' % ('v' * min(5, self.instance.verbosity)))
|
||||
|
||||
status = 'failed'
|
||||
output = ''
|
||||
rc = None
|
||||
buff = StringIO()
|
||||
last_check = time.time()
|
||||
seek = 0
|
||||
job_timeout = remaining = self.job_timeout
|
||||
while status == 'failed':
|
||||
if job_timeout != 0:
|
||||
remaining = max(0, job_timeout - (time.time() - self.started_at))
|
||||
if remaining == 0:
|
||||
# if it takes longer than $REMAINING_JOB_TIMEOUT to retrieve
|
||||
# job artifacts from the host, consider the job failed
|
||||
if isinstance(self.extra_update_fields, dict):
|
||||
self.extra_update_fields['job_explanation'] = "Job terminated due to timeout"
|
||||
status = 'failed'
|
||||
break
|
||||
|
||||
canceled = self.cancelled_callback() if self.cancelled_callback else False
|
||||
if not canceled and time.time() - last_check < interval:
|
||||
# If the job isn't cancelled, but we haven't waited `interval` seconds, wait longer
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
buff = StringIO()
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, self.awx_playbook_path(), self.management_env, buff,
|
||||
cancelled_callback=self.cancelled_callback,
|
||||
idle_timeout=remaining,
|
||||
job_timeout=remaining,
|
||||
pexpect_timeout=5,
|
||||
proot_cmd=self.proot_cmd
|
||||
)
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} check:\n{}'.format(self.instance.id, output))
|
||||
|
||||
path = self.path_to('artifacts', 'stdout')
|
||||
if os.path.exists(path):
|
||||
with codecs.open(path, 'r', encoding='utf-8') as f:
|
||||
f.seek(seek)
|
||||
for line in f:
|
||||
self.stdout_handle.write(line)
|
||||
seek += len(line)
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
if status == 'successful':
|
||||
status_path = self.path_to('artifacts', 'status')
|
||||
rc_path = self.path_to('artifacts', 'rc')
|
||||
if self._missing_artifacts([status_path, rc_path], output):
|
||||
status = 'failed'
|
||||
rc = 1
|
||||
else:
|
||||
with open(status_path, 'r') as f:
|
||||
status = f.readline()
|
||||
with open(rc_path, 'r') as f:
|
||||
rc = int(f.readline())
|
||||
elif status == 'failed':
|
||||
# if we were unable to retrieve job reults from the isolated host,
|
||||
# print stdout of the `check_isolated.yml` playbook for clues
|
||||
self.stdout_handle.write(smart_str(output))
|
||||
|
||||
return status, rc
|
||||
|
||||
def cleanup(self):
|
||||
# If the job failed for any reason, make a last-ditch effort at cleanup
|
||||
extra_vars = {
|
||||
'private_data_dir': self.private_data_dir,
|
||||
'cleanup_dirs': [
|
||||
self.private_data_dir,
|
||||
self.proot_temp_dir,
|
||||
],
|
||||
}
|
||||
args = self._build_args('clean_isolated.yml', '%s,' % self.host, extra_vars)
|
||||
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
|
||||
buff = StringIO()
|
||||
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, self.awx_playbook_path(), self.management_env, buff,
|
||||
idle_timeout=timeout, job_timeout=timeout,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
output = buff.getvalue().encode('utf-8')
|
||||
playbook_logger.info('Isolated job {} cleanup:\n{}'.format(self.instance.id, output))
|
||||
|
||||
if status != 'successful':
|
||||
# stdout_handle is closed by this point so writing output to logs is our only option
|
||||
logger.warning('Isolated job {} cleanup error, output:\n{}'.format(self.instance.id, output))
|
||||
|
||||
@classmethod
|
||||
def update_capacity(cls, instance, task_result, awx_application_version):
|
||||
instance.version = task_result['version']
|
||||
|
||||
isolated_version = instance.version.split("-", 1)[0]
|
||||
cluster_version = awx_application_version.split("-", 1)[0]
|
||||
|
||||
if Version(cluster_version) > Version(isolated_version):
|
||||
err_template = "Isolated instance {} reports version {}, cluster node is at {}, setting capacity to zero."
|
||||
logger.error(err_template.format(instance.hostname, instance.version, awx_application_version))
|
||||
instance.capacity = 0
|
||||
else:
|
||||
if instance.capacity == 0 and task_result['capacity_cpu']:
|
||||
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
|
||||
instance.cpu_capacity = int(task_result['capacity_cpu'])
|
||||
instance.mem_capacity = int(task_result['capacity_mem'])
|
||||
instance.capacity = get_system_task_capacity(scale=instance.capacity_adjustment,
|
||||
cpu_capacity=int(task_result['capacity_cpu']),
|
||||
mem_capacity=int(task_result['capacity_mem']))
|
||||
instance.save(update_fields=['cpu_capacity', 'mem_capacity', 'capacity', 'version', 'modified'])
|
||||
|
||||
@classmethod
|
||||
def health_check(cls, instance_qs, awx_application_version):
|
||||
'''
|
||||
:param instance_qs: List of Django objects representing the
|
||||
isolated instances to manage
|
||||
Runs playbook that will
|
||||
- determine if instance is reachable
|
||||
- find the instance capacity
|
||||
- clean up orphaned private files
|
||||
Performs save on each instance to update its capacity.
|
||||
'''
|
||||
hostname_string = ''
|
||||
for instance in instance_qs:
|
||||
hostname_string += '{},'.format(instance.hostname)
|
||||
args = cls._build_args('heartbeat_isolated.yml', hostname_string)
|
||||
args.extend(['--forks', str(len(instance_qs))])
|
||||
env = cls._base_management_env()
|
||||
|
||||
try:
|
||||
facts_path = tempfile.mkdtemp()
|
||||
env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
|
||||
env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = facts_path
|
||||
|
||||
buff = StringIO()
|
||||
timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
status, rc = IsolatedManager.run_pexpect(
|
||||
args, cls.awx_playbook_path(), env, buff,
|
||||
idle_timeout=timeout, job_timeout=timeout,
|
||||
pexpect_timeout=5
|
||||
)
|
||||
heartbeat_stdout = buff.getvalue().encode('utf-8')
|
||||
buff.close()
|
||||
|
||||
for instance in instance_qs:
|
||||
output = heartbeat_stdout
|
||||
task_result = {}
|
||||
try:
|
||||
with open(os.path.join(facts_path, instance.hostname), 'r') as facts_data:
|
||||
output = facts_data.read()
|
||||
task_result = json.loads(output)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances, output:\n {}'.format(output))
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
cls.update_capacity(instance, task_result, awx_application_version)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(facts_path):
|
||||
shutil.rmtree(facts_path)
|
||||
|
||||
@staticmethod
|
||||
def get_stdout_handle(instance, private_data_dir, event_data_key='job_id'):
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
|
||||
def job_event_callback(event_data):
|
||||
event_data.setdefault(event_data_key, instance.id)
|
||||
if 'uuid' in event_data:
|
||||
filename = '{}-partial.json'.format(event_data['uuid'])
|
||||
partial_filename = os.path.join(private_data_dir, 'artifacts', 'job_events', filename)
|
||||
try:
|
||||
with codecs.open(partial_filename, 'r', encoding='utf-8') as f:
|
||||
partial_event_data = json.load(f)
|
||||
event_data.update(partial_event_data)
|
||||
except IOError:
|
||||
if event_data.get('event', '') != 'verbose':
|
||||
logger.error('Missing callback data for event type `{}`, uuid {}, job {}.\nevent_data: {}'.format(
|
||||
event_data.get('event', ''), event_data['uuid'], instance.id, event_data))
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return OutputEventFilter(job_event_callback)
|
||||
|
||||
def run(self, instance, private_data_dir, proot_temp_dir):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
:param instance: a `model.Job` instance
|
||||
:param private_data_dir: an absolute path on the local file system
|
||||
where job-specific data should be written
|
||||
(i.e., `/tmp/ansible_awx_xyz/`)
|
||||
:param proot_temp_dir: a temporary directory which bwrap maps
|
||||
restricted paths to
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.instance = instance
|
||||
self.host = instance.execution_node
|
||||
self.private_data_dir = private_data_dir
|
||||
self.proot_temp_dir = proot_temp_dir
|
||||
status, rc = self.dispatch()
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
self.cleanup()
|
||||
return status, rc
|
||||
@@ -1,333 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import codecs
|
||||
import collections
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
import pipes
|
||||
import re
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
try:
|
||||
from io import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
import pexpect
|
||||
import psutil
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.expect')
|
||||
|
||||
|
||||
def args2cmdline(*args):
|
||||
return ' '.join([pipes.quote(a) for a in args])
|
||||
|
||||
|
||||
def wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
|
||||
if ssh_key_path:
|
||||
ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
|
||||
if silence_ssh_add:
|
||||
ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
|
||||
cmd = ' && '.join([ssh_add_command,
|
||||
args2cmdline('rm', '-f', ssh_key_path),
|
||||
args2cmdline(*args)])
|
||||
args = ['ssh-agent']
|
||||
if ssh_auth_sock:
|
||||
args.extend(['-a', ssh_auth_sock])
|
||||
args.extend(['sh', '-c', cmd])
|
||||
return args
|
||||
|
||||
|
||||
def open_fifo_write(path, data):
|
||||
'''open_fifo_write opens the fifo named pipe in a new thread.
|
||||
This blocks the thread until an external process (such as ssh-agent)
|
||||
reads data from the pipe.
|
||||
'''
|
||||
os.mkfifo(path, 0o600)
|
||||
threading.Thread(
|
||||
target=lambda p, d: open(p, 'w').write(d),
|
||||
args=(path, data)
|
||||
).start()
|
||||
|
||||
|
||||
def run_pexpect(args, cwd, env, logfile,
|
||||
cancelled_callback=None, expect_passwords={},
|
||||
extra_update_fields=None, idle_timeout=None, job_timeout=0,
|
||||
pexpect_timeout=5, proot_cmd='bwrap'):
|
||||
'''
|
||||
Run the given command using pexpect to capture output and provide
|
||||
passwords when requested.
|
||||
|
||||
:param args: a list of `subprocess.call`-style arguments
|
||||
representing a subprocess e.g., ['ls', '-la']
|
||||
:param cwd: the directory in which the subprocess should
|
||||
run
|
||||
:param env: a dict containing environment variables for the
|
||||
subprocess, ala `os.environ`
|
||||
:param logfile: a file-like object for capturing stdout
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
:param expect_passwords: a dict of regular expression password prompts
|
||||
to input values, i.e., {r'Password:*?$':
|
||||
'some_password'}
|
||||
:param extra_update_fields: a dict used to specify DB fields which should
|
||||
be updated on the underlying model
|
||||
object after execution completes
|
||||
:param idle_timeout a timeout (in seconds); if new output is not
|
||||
sent to stdout in this interval, the process
|
||||
will be terminated
|
||||
:param job_timeout a timeout (in seconds); if the total job runtime
|
||||
exceeds this, the process will be killed
|
||||
:param pexpect_timeout a timeout (in seconds) to wait on
|
||||
`pexpect.spawn().expect()` calls
|
||||
:param proot_cmd the command used to isolate processes, `bwrap`
|
||||
|
||||
Returns a tuple (status, return_code) i.e., `('successful', 0)`
|
||||
'''
|
||||
expect_passwords[pexpect.TIMEOUT] = None
|
||||
expect_passwords[pexpect.EOF] = None
|
||||
|
||||
if not isinstance(expect_passwords, collections.OrderedDict):
|
||||
# We iterate over `expect_passwords.keys()` and
|
||||
# `expect_passwords.values()` separately to map matched inputs to
|
||||
# patterns and choose the proper string to send to the subprocess;
|
||||
# enforce usage of an OrderedDict so that the ordering of elements in
|
||||
# `keys()` matches `values()`.
|
||||
expect_passwords = collections.OrderedDict(expect_passwords)
|
||||
password_patterns = list(expect_passwords.keys())
|
||||
password_values = list(expect_passwords.values())
|
||||
|
||||
child = pexpect.spawn(
|
||||
args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True,
|
||||
encoding='utf-8', echo=False, use_poll=True
|
||||
)
|
||||
child.logfile_read = logfile
|
||||
canceled = False
|
||||
timed_out = False
|
||||
errored = False
|
||||
last_stdout_update = time.time()
|
||||
|
||||
job_start = time.time()
|
||||
while child.isalive():
|
||||
result_id = child.expect(password_patterns, timeout=pexpect_timeout, searchwindowsize=100)
|
||||
password = password_values[result_id]
|
||||
if password is not None:
|
||||
child.sendline(password)
|
||||
last_stdout_update = time.time()
|
||||
if cancelled_callback:
|
||||
try:
|
||||
canceled = cancelled_callback()
|
||||
except Exception:
|
||||
logger.exception('Could not check cancel callback - canceling immediately')
|
||||
if isinstance(extra_update_fields, dict):
|
||||
extra_update_fields['job_explanation'] = "System error during job execution, check system logs"
|
||||
errored = True
|
||||
else:
|
||||
canceled = False
|
||||
if not canceled and job_timeout != 0 and (time.time() - job_start) > job_timeout:
|
||||
timed_out = True
|
||||
if isinstance(extra_update_fields, dict):
|
||||
extra_update_fields['job_explanation'] = "Job terminated due to timeout"
|
||||
if canceled or timed_out or errored:
|
||||
handle_termination(child.pid, child.args, proot_cmd, is_cancel=canceled)
|
||||
if idle_timeout and (time.time() - last_stdout_update) > idle_timeout:
|
||||
child.close(True)
|
||||
canceled = True
|
||||
if errored:
|
||||
return 'error', child.exitstatus
|
||||
elif canceled:
|
||||
return 'canceled', child.exitstatus
|
||||
elif child.exitstatus == 0 and not timed_out:
|
||||
return 'successful', child.exitstatus
|
||||
else:
|
||||
return 'failed', child.exitstatus
|
||||
|
||||
|
||||
def run_isolated_job(private_data_dir, secrets, logfile=sys.stdout):
|
||||
'''
|
||||
Launch `ansible-playbook`, executing a job packaged by
|
||||
`build_isolated_job_data`.
|
||||
|
||||
:param private_data_dir: an absolute path on the local file system where
|
||||
job metadata exists (i.e.,
|
||||
`/tmp/ansible_awx_xyz/`)
|
||||
:param secrets: a dict containing sensitive job metadata, {
|
||||
'env': { ... } # environment variables,
|
||||
'passwords': { ... } # pexpect password prompts
|
||||
'ssh_key_data': 'RSA KEY DATA',
|
||||
}
|
||||
:param logfile: a file-like object for capturing stdout
|
||||
|
||||
Returns a tuple (status, return_code) i.e., `('successful', 0)`
|
||||
'''
|
||||
with open(os.path.join(private_data_dir, 'args'), 'r') as args:
|
||||
args = json.load(args)
|
||||
|
||||
env = secrets.get('env', {})
|
||||
expect_passwords = {
|
||||
re.compile(pattern, re.M): password
|
||||
for pattern, password in secrets.get('passwords', {}).items()
|
||||
}
|
||||
|
||||
if 'AD_HOC_COMMAND_ID' in env:
|
||||
cwd = private_data_dir
|
||||
else:
|
||||
cwd = os.path.join(private_data_dir, 'project')
|
||||
|
||||
# write the SSH key data into a fifo read by ssh-agent
|
||||
ssh_key_data = secrets.get('ssh_key_data')
|
||||
if ssh_key_data:
|
||||
ssh_key_path = os.path.join(private_data_dir, 'ssh_key_data')
|
||||
ssh_auth_sock = os.path.join(private_data_dir, 'ssh_auth.sock')
|
||||
open_fifo_write(ssh_key_path, ssh_key_data)
|
||||
args = wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock)
|
||||
|
||||
idle_timeout = secrets.get('idle_timeout', 10)
|
||||
job_timeout = secrets.get('job_timeout', 10)
|
||||
pexpect_timeout = secrets.get('pexpect_timeout', 5)
|
||||
|
||||
# Use local callback directory
|
||||
callback_dir = os.getenv('AWX_LIB_DIRECTORY')
|
||||
if callback_dir is None:
|
||||
raise RuntimeError('Location for callbacks must be specified '
|
||||
'by environment variable AWX_LIB_DIRECTORY.')
|
||||
env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.join(callback_dir, 'isolated_callbacks')
|
||||
if 'AD_HOC_COMMAND_ID' in env:
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'minimal'
|
||||
else:
|
||||
env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
|
||||
env['AWX_ISOLATED_DATA_DIR'] = private_data_dir
|
||||
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + callback_dir + ':'
|
||||
|
||||
venv_path = env.get('VIRTUAL_ENV')
|
||||
if venv_path and not os.path.exists(venv_path):
|
||||
raise RuntimeError(
|
||||
'a valid Python virtualenv does not exist at {}'.format(venv_path)
|
||||
)
|
||||
|
||||
return run_pexpect(args, cwd, env, logfile,
|
||||
expect_passwords=expect_passwords,
|
||||
idle_timeout=idle_timeout,
|
||||
job_timeout=job_timeout,
|
||||
pexpect_timeout=pexpect_timeout)
|
||||
|
||||
|
||||
def handle_termination(pid, args, proot_cmd, is_cancel=True):
|
||||
'''
|
||||
Terminate a subprocess spawned by `pexpect`.
|
||||
|
||||
:param pid: the process id of the running the job.
|
||||
:param args: the args for the job, i.e., ['ansible-playbook', 'abc.yml']
|
||||
:param proot_cmd the command used to isolate processes i.e., `bwrap`
|
||||
:param is_cancel: flag showing whether this termination is caused by
|
||||
instance's cancel_flag.
|
||||
'''
|
||||
try:
|
||||
if sys.version_info > (3, 0):
|
||||
used_proot = proot_cmd.encode('utf-8') in args
|
||||
else:
|
||||
used_proot = proot_cmd in ' '.join(args)
|
||||
if used_proot:
|
||||
if not psutil:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
else:
|
||||
try:
|
||||
main_proc = psutil.Process(pid=pid)
|
||||
child_procs = main_proc.children(recursive=True)
|
||||
for child_proc in child_procs:
|
||||
os.kill(child_proc.pid, signal.SIGKILL)
|
||||
os.kill(main_proc.pid, signal.SIGKILL)
|
||||
except (TypeError, psutil.Error):
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
else:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
time.sleep(3)
|
||||
except OSError:
|
||||
keyword = 'cancel' if is_cancel else 'timeout'
|
||||
logger.warn("Attempted to %s already finished job, ignoring" % keyword)
|
||||
|
||||
|
||||
def __run__(private_data_dir):
|
||||
buff = StringIO()
|
||||
with codecs.open(os.path.join(private_data_dir, 'env'), 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
buff.write(line)
|
||||
|
||||
artifacts_dir = os.path.join(private_data_dir, 'artifacts')
|
||||
|
||||
# Standard out directed to pickup location without event filtering applied
|
||||
stdout_filename = os.path.join(artifacts_dir, 'stdout')
|
||||
os.mknod(stdout_filename, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
|
||||
stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
|
||||
|
||||
status, rc = run_isolated_job(
|
||||
private_data_dir,
|
||||
json.loads(base64.b64decode(buff.getvalue())),
|
||||
stdout_handle
|
||||
)
|
||||
for filename, data in [
|
||||
('status', status),
|
||||
('rc', rc),
|
||||
]:
|
||||
artifact_path = os.path.join(private_data_dir, 'artifacts', filename)
|
||||
os.mknod(artifact_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
|
||||
with open(artifact_path, 'w') as f:
|
||||
f.write(str(data))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import awx
|
||||
__version__ = awx.__version__
|
||||
parser = argparse.ArgumentParser(description='manage a daemonized, isolated ansible playbook')
|
||||
parser.add_argument('--version', action='version', version=__version__ + '-isolated')
|
||||
parser.add_argument('command', choices=['start', 'stop', 'is-alive'])
|
||||
parser.add_argument('private_data_dir')
|
||||
args = parser.parse_args()
|
||||
|
||||
private_data_dir = args.private_data_dir
|
||||
pidfile = os.path.join(private_data_dir, 'pid')
|
||||
|
||||
if args.command == 'start':
|
||||
# create a file to log stderr in case the daemonized process throws
|
||||
# an exception before it gets to `pexpect.spawn`
|
||||
stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
|
||||
if not os.path.exists(stderr_path):
|
||||
os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
|
||||
stderr = open(stderr_path, 'w+')
|
||||
|
||||
import daemon
|
||||
from daemon.pidfile import TimeoutPIDLockFile
|
||||
context = daemon.DaemonContext(
|
||||
pidfile=TimeoutPIDLockFile(pidfile),
|
||||
stderr=stderr
|
||||
)
|
||||
with context:
|
||||
__run__(private_data_dir)
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
with open(pidfile, 'r') as f:
|
||||
pid = int(f.readline())
|
||||
except IOError:
|
||||
sys.exit(1)
|
||||
|
||||
if args.command == 'stop':
|
||||
try:
|
||||
with open(os.path.join(private_data_dir, 'args'), 'r') as args:
|
||||
handle_termination(pid, json.load(args), 'bwrap')
|
||||
except IOError:
|
||||
handle_termination(pid, [], 'bwrap')
|
||||
elif args.command == 'is-alive':
|
||||
try:
|
||||
os.kill(pid, signal.SIG_DFL)
|
||||
sys.exit(0)
|
||||
except OSError:
|
||||
sys.exit(1)
|
||||
@@ -11,6 +11,7 @@ from jinja2 import Environment, StrictUndefined
|
||||
from jinja2.exceptions import UndefinedError, TemplateSyntaxError
|
||||
|
||||
# Django
|
||||
import django
|
||||
from django.core import exceptions as django_exceptions
|
||||
from django.db.models.signals import (
|
||||
post_save,
|
||||
@@ -18,14 +19,16 @@ from django.db.models.signals import (
|
||||
)
|
||||
from django.db.models.signals import m2m_changed
|
||||
from django.db import models
|
||||
from django.db.models.fields.related import add_lazy_relation
|
||||
from django.db.models.fields.related import lazy_related_operation
|
||||
from django.db.models.fields.related_descriptors import (
|
||||
ReverseOneToOneDescriptor,
|
||||
ForwardManyToOneDescriptor,
|
||||
ManyToManyDescriptor,
|
||||
ReverseManyToOneDescriptor,
|
||||
create_forward_many_to_many_manager
|
||||
)
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# jsonschema
|
||||
@@ -43,14 +46,17 @@ from rest_framework import serializers
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||
from awx.main.models.rbac import (
|
||||
batch_role_ancestor_rebuilding, Role,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
)
|
||||
from awx.main.constants import ENV_BLACKLIST
|
||||
from awx.main import utils
|
||||
|
||||
|
||||
__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField',
|
||||
'SmartFilterField', 'update_role_parentage_for_instance',
|
||||
'is_implicit_parent']
|
||||
'SmartFilterField', 'OrderedManyToManyField',
|
||||
'update_role_parentage_for_instance', 'is_implicit_parent']
|
||||
|
||||
|
||||
# Provide a (better) custom error message for enum jsonschema validation
|
||||
@@ -159,6 +165,13 @@ def is_implicit_parent(parent_role, child_role):
|
||||
the model definition. This does not include any role parents that
|
||||
might have been set by the user.
|
||||
'''
|
||||
if child_role.content_object is None:
|
||||
# The only singleton implicit parent is the system admin being
|
||||
# a parent of the system auditor role
|
||||
return bool(
|
||||
child_role.singleton_name == ROLE_SINGLETON_SYSTEM_AUDITOR and
|
||||
parent_role.singleton_name == ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
|
||||
)
|
||||
# Get the list of implicit parents that were defined at the class level.
|
||||
implicit_parents = getattr(
|
||||
child_role.content_object.__class__, child_role.role_field
|
||||
@@ -217,6 +230,7 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
kwargs.setdefault('related_name', '+')
|
||||
kwargs.setdefault('null', 'True')
|
||||
kwargs.setdefault('editable', False)
|
||||
kwargs.setdefault('on_delete', models.CASCADE)
|
||||
super(ImplicitRoleField, self).__init__(*args, **kwargs)
|
||||
|
||||
def deconstruct(self):
|
||||
@@ -234,7 +248,9 @@ class ImplicitRoleField(models.ForeignKey):
|
||||
|
||||
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
|
||||
post_delete.connect(self._post_delete, cls, True, dispatch_uid='implicit-role-post-delete')
|
||||
add_lazy_relation(cls, self, "self", self.bind_m2m_changed)
|
||||
|
||||
function = lambda local, related, field: self.bind_m2m_changed(field, related, local)
|
||||
lazy_related_operation(function, cls, "self", field=self)
|
||||
|
||||
def bind_m2m_changed(self, _self, _role_class, cls):
|
||||
if not self.parent_role:
|
||||
@@ -480,6 +496,86 @@ def format_ssh_private_key(value):
|
||||
return True
|
||||
|
||||
|
||||
@JSONSchemaField.format_checker.checks('url')
|
||||
def format_url(value):
|
||||
try:
|
||||
parsed = urllib.parse.urlparse(value)
|
||||
except Exception as e:
|
||||
raise jsonschema.exceptions.FormatError(str(e))
|
||||
if parsed.scheme == '':
|
||||
raise jsonschema.exceptions.FormatError(
|
||||
'Invalid URL: Missing url scheme (http, https, etc.)'
|
||||
)
|
||||
if parsed.netloc == '':
|
||||
raise jsonschema.exceptions.FormatError(
|
||||
'Invalid URL: {}'.format(value)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
class DynamicCredentialInputField(JSONSchemaField):
|
||||
"""
|
||||
Used to validate JSON for
|
||||
`awx.main.models.credential:CredentialInputSource().metadata`.
|
||||
|
||||
Metadata for input sources is represented as a dictionary e.g.,
|
||||
{'secret_path': '/kv/somebody', 'secret_key': 'password'}
|
||||
|
||||
For the data to be valid, the keys of this dictionary should correspond
|
||||
with the metadata field (and datatypes) defined in the associated
|
||||
target CredentialType e.g.,
|
||||
"""
|
||||
|
||||
def schema(self, credential_type):
|
||||
# determine the defined fields for the associated credential type
|
||||
properties = {}
|
||||
for field in credential_type.inputs.get('metadata', []):
|
||||
field = field.copy()
|
||||
properties[field['id']] = field
|
||||
if field.get('choices', []):
|
||||
field['enum'] = list(field['choices'])[:]
|
||||
return {
|
||||
'type': 'object',
|
||||
'properties': properties,
|
||||
'additionalProperties': False,
|
||||
}
|
||||
|
||||
def validate(self, value, model_instance):
|
||||
if not isinstance(value, dict):
|
||||
return super(DynamicCredentialInputField, self).validate(value, model_instance)
|
||||
|
||||
super(JSONSchemaField, self).validate(value, model_instance)
|
||||
credential_type = model_instance.source_credential.credential_type
|
||||
errors = {}
|
||||
for error in Draft4Validator(
|
||||
self.schema(credential_type),
|
||||
format_checker=self.format_checker
|
||||
).iter_errors(value):
|
||||
if error.validator == 'pattern' and 'error' in error.schema:
|
||||
error.message = error.schema['error'].format(instance=error.instance)
|
||||
if 'id' not in error.schema:
|
||||
# If the error is not for a specific field, it's specific to
|
||||
# `inputs` in general
|
||||
raise django_exceptions.ValidationError(
|
||||
error.message,
|
||||
code='invalid',
|
||||
params={'value': value},
|
||||
)
|
||||
errors[error.schema['id']] = [error.message]
|
||||
|
||||
defined_metadata = [field.get('id') for field in credential_type.inputs.get('metadata', [])]
|
||||
for field in credential_type.inputs.get('required', []):
|
||||
if field in defined_metadata and not value.get(field, None):
|
||||
errors[field] = [_('required for %s') % (
|
||||
credential_type.name
|
||||
)]
|
||||
|
||||
if errors:
|
||||
raise serializers.ValidationError({
|
||||
'metadata': errors
|
||||
})
|
||||
|
||||
|
||||
class CredentialInputField(JSONSchemaField):
|
||||
"""
|
||||
Used to validate JSON for
|
||||
@@ -542,7 +638,7 @@ class CredentialInputField(JSONSchemaField):
|
||||
v != '$encrypted$',
|
||||
model_instance.pk
|
||||
]):
|
||||
if not isinstance(getattr(model_instance, k), str):
|
||||
if not isinstance(model_instance.inputs.get(k), str):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('secret values must be of type string, not {}').format(type(v).__name__),
|
||||
code='invalid',
|
||||
@@ -592,18 +688,13 @@ class CredentialInputField(JSONSchemaField):
|
||||
)
|
||||
errors[error.schema['id']] = [error.message]
|
||||
|
||||
inputs = model_instance.credential_type.inputs
|
||||
for field in inputs.get('required', []):
|
||||
if not value.get(field, None):
|
||||
errors[field] = [_('required for %s') % (
|
||||
model_instance.credential_type.name
|
||||
)]
|
||||
defined_fields = model_instance.credential_type.defined_fields
|
||||
|
||||
# `ssh_key_unlock` requirements are very specific and can't be
|
||||
# represented without complicated JSON schema
|
||||
if (
|
||||
model_instance.credential_type.managed_by_tower is True and
|
||||
'ssh_key_unlock' in model_instance.credential_type.defined_fields
|
||||
'ssh_key_unlock' in defined_fields
|
||||
):
|
||||
|
||||
# in order to properly test the necessity of `ssh_key_unlock`, we
|
||||
@@ -613,15 +704,15 @@ class CredentialInputField(JSONSchemaField):
|
||||
# 'ssh_key_unlock': 'do-you-need-me?',
|
||||
# }
|
||||
# ...we have to fetch the actual key value from the database
|
||||
if model_instance.pk and model_instance.ssh_key_data == '$encrypted$':
|
||||
model_instance.ssh_key_data = model_instance.__class__.objects.get(
|
||||
if model_instance.pk and model_instance.inputs.get('ssh_key_data') == '$encrypted$':
|
||||
model_instance.inputs['ssh_key_data'] = model_instance.__class__.objects.get(
|
||||
pk=model_instance.pk
|
||||
).ssh_key_data
|
||||
).inputs.get('ssh_key_data')
|
||||
|
||||
if model_instance.has_encrypted_ssh_key_data and not value.get('ssh_key_unlock'):
|
||||
errors['ssh_key_unlock'] = [_('must be set when SSH key is encrypted.')]
|
||||
if all([
|
||||
model_instance.ssh_key_data,
|
||||
model_instance.inputs.get('ssh_key_data'),
|
||||
value.get('ssh_key_unlock'),
|
||||
not model_instance.has_encrypted_ssh_key_data
|
||||
]):
|
||||
@@ -654,7 +745,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'type': {'enum': ['string', 'boolean']},
|
||||
'format': {'enum': ['ssh_private_key']},
|
||||
'format': {'enum': ['ssh_private_key', 'url']},
|
||||
'choices': {
|
||||
'type': 'array',
|
||||
'minItems': 1,
|
||||
@@ -671,6 +762,7 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
'multiline': {'type': 'boolean'},
|
||||
'secret': {'type': 'boolean'},
|
||||
'ask_at_runtime': {'type': 'boolean'},
|
||||
'default': {},
|
||||
},
|
||||
'additionalProperties': False,
|
||||
'required': ['id', 'label'],
|
||||
@@ -714,6 +806,14 @@ class CredentialTypeInputField(JSONSchemaField):
|
||||
# If no type is specified, default to string
|
||||
field['type'] = 'string'
|
||||
|
||||
if 'default' in field:
|
||||
default = field['default']
|
||||
_type = {'string': str, 'boolean': bool}[field['type']]
|
||||
if type(default) != _type:
|
||||
raise django_exceptions.ValidationError(
|
||||
_('{} is not a {}').format(default, field['type'])
|
||||
)
|
||||
|
||||
for key in ('choices', 'multiline', 'format', 'secret',):
|
||||
if key in field and field['type'] != 'string':
|
||||
raise django_exceptions.ValidationError(
|
||||
@@ -890,3 +990,115 @@ class OAuth2ClientSecretField(models.CharField):
|
||||
if value and value.startswith('$encrypted$'):
|
||||
return decrypt_value(get_encryption_key('value', pk=None), value)
|
||||
return value
|
||||
|
||||
|
||||
class OrderedManyToManyDescriptor(ManyToManyDescriptor):
|
||||
"""
|
||||
Django doesn't seem to support:
|
||||
|
||||
class Meta:
|
||||
ordering = [...]
|
||||
|
||||
...on custom through= relations for ManyToMany fields.
|
||||
|
||||
Meaning, queries made _through_ the intermediary table will _not_ apply an
|
||||
ORDER_BY clause based on the `Meta.ordering` of the intermediary M2M class
|
||||
(which is the behavior we want for "ordered" many to many relations):
|
||||
|
||||
https://github.com/django/django/blob/stable/1.11.x/django/db/models/fields/related_descriptors.py#L593
|
||||
|
||||
This descriptor automatically sorts all queries through this relation
|
||||
using the `position` column on the M2M table.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def related_manager_cls(self):
|
||||
model = self.rel.related_model if self.reverse else self.rel.model
|
||||
|
||||
def add_custom_queryset_to_many_related_manager(many_related_manage_cls):
|
||||
class OrderedManyRelatedManager(many_related_manage_cls):
|
||||
def get_queryset(self):
|
||||
return super(OrderedManyRelatedManager, self).get_queryset().order_by(
|
||||
'%s__position' % self.through._meta.model_name
|
||||
)
|
||||
|
||||
def add(self, *objs):
|
||||
# Django < 2 doesn't support this method on
|
||||
# ManyToManyFields w/ an intermediary model
|
||||
# We should be able to remove this code snippet when we
|
||||
# upgrade Django.
|
||||
# see: https://github.com/django/django/blob/stable/1.11.x/django/db/models/fields/related_descriptors.py#L926
|
||||
if not django.__version__.startswith('1.'):
|
||||
raise RuntimeError(
|
||||
'This method is no longer necessary in Django>=2'
|
||||
)
|
||||
try:
|
||||
self.through._meta.auto_created = True
|
||||
super(OrderedManyRelatedManager, self).add(*objs)
|
||||
finally:
|
||||
self.through._meta.auto_created = False
|
||||
|
||||
def remove(self, *objs):
|
||||
# Django < 2 doesn't support this method on
|
||||
# ManyToManyFields w/ an intermediary model
|
||||
# We should be able to remove this code snippet when we
|
||||
# upgrade Django.
|
||||
# see: https://github.com/django/django/blob/stable/1.11.x/django/db/models/fields/related_descriptors.py#L944
|
||||
if not django.__version__.startswith('1.'):
|
||||
raise RuntimeError(
|
||||
'This method is no longer necessary in Django>=2'
|
||||
)
|
||||
try:
|
||||
self.through._meta.auto_created = True
|
||||
super(OrderedManyRelatedManager, self).remove(*objs)
|
||||
finally:
|
||||
self.through._meta.auto_created = False
|
||||
|
||||
return OrderedManyRelatedManager
|
||||
|
||||
return add_custom_queryset_to_many_related_manager(
|
||||
create_forward_many_to_many_manager(
|
||||
model._default_manager.__class__,
|
||||
self.rel,
|
||||
reverse=self.reverse,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class OrderedManyToManyField(models.ManyToManyField):
|
||||
"""
|
||||
A ManyToManyField that automatically sorts all querysets
|
||||
by a special `position` column on the M2M table
|
||||
"""
|
||||
|
||||
def _update_m2m_position(self, sender, **kwargs):
|
||||
if kwargs.get('action') in ('post_add', 'post_remove'):
|
||||
order_with_respect_to = None
|
||||
for field in sender._meta.local_fields:
|
||||
if (
|
||||
isinstance(field, models.ForeignKey) and
|
||||
isinstance(kwargs['instance'], field.related_model)
|
||||
):
|
||||
order_with_respect_to = field.name
|
||||
for i, ig in enumerate(sender.objects.filter(**{
|
||||
order_with_respect_to: kwargs['instance'].pk}
|
||||
)):
|
||||
if ig.position != i:
|
||||
ig.position = i
|
||||
ig.save()
|
||||
|
||||
def contribute_to_class(self, cls, name, **kwargs):
|
||||
super(OrderedManyToManyField, self).contribute_to_class(cls, name, **kwargs)
|
||||
setattr(
|
||||
cls, name,
|
||||
OrderedManyToManyDescriptor(self.remote_field, reverse=False)
|
||||
)
|
||||
|
||||
through = getattr(cls, name).through
|
||||
if isinstance(through, str) and "." not in through:
|
||||
# support lazy loading of string model names
|
||||
through = '.'.join([cls._meta.app_label, through])
|
||||
m2m_changed.connect(
|
||||
self._update_m2m_position,
|
||||
sender=through
|
||||
)
|
||||
|
||||
0
awx/main/isolated/__init__.py
Normal file
0
awx/main/isolated/__init__.py
Normal file
404
awx/main/isolated/manager.py
Normal file
404
awx/main/isolated/manager.py
Normal file
@@ -0,0 +1,404 @@
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
import ansible_runner
|
||||
|
||||
import awx
|
||||
from awx.main.utils import get_system_task_capacity
|
||||
from awx.main.queue import CallbackQueueDispatcher
|
||||
|
||||
logger = logging.getLogger('awx.isolated.manager')
|
||||
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
|
||||
|
||||
|
||||
def set_pythonpath(venv_libdir, env):
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for version in os.listdir(venv_libdir):
|
||||
if fnmatch.fnmatch(version, 'python[23].*'):
|
||||
if os.path.isdir(os.path.join(venv_libdir, version)):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, version, "site-packages") + ":"
|
||||
break
|
||||
|
||||
|
||||
class IsolatedManager(object):
|
||||
|
||||
def __init__(self, cancelled_callback=None, check_callback=None):
|
||||
"""
|
||||
:param cancelled_callback: a callable - which returns `True` or `False`
|
||||
- signifying if the job has been prematurely
|
||||
cancelled
|
||||
"""
|
||||
self.cancelled_callback = cancelled_callback
|
||||
self.check_callback = check_callback
|
||||
self.idle_timeout = max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT)
|
||||
self.started_at = None
|
||||
self.captured_command_artifact = False
|
||||
|
||||
def build_runner_params(self, hosts, verbosity=1):
|
||||
env = dict(os.environ.items())
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
|
||||
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
|
||||
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
|
||||
|
||||
def finished_callback(runner_obj):
|
||||
if runner_obj.status == 'failed' and runner_obj.config.playbook != 'check_isolated.yml':
|
||||
# failed for clean_isolated.yml just means the playbook hasn't
|
||||
# exited on the isolated host
|
||||
stdout = runner_obj.stdout.read()
|
||||
playbook_logger.error(stdout)
|
||||
elif runner_obj.status == 'timeout':
|
||||
# this means that the default idle timeout of
|
||||
# (2 * AWX_ISOLATED_CONNECTION_TIMEOUT) was exceeded
|
||||
# (meaning, we tried to sync with an isolated node, and we got
|
||||
# no new output for 2 * AWX_ISOLATED_CONNECTION_TIMEOUT seconds)
|
||||
# this _usually_ means SSH key auth from the controller ->
|
||||
# isolated didn't work, and ssh is hung waiting on interactive
|
||||
# input e.g.,
|
||||
#
|
||||
# awx@isolated's password:
|
||||
stdout = runner_obj.stdout.read()
|
||||
playbook_logger.error(stdout)
|
||||
else:
|
||||
playbook_logger.info(runner_obj.stdout.read())
|
||||
|
||||
inventory = '\n'.join([
|
||||
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
|
||||
for host in hosts
|
||||
])
|
||||
|
||||
return {
|
||||
'project_dir': os.path.abspath(os.path.join(
|
||||
os.path.dirname(awx.__file__),
|
||||
'playbooks'
|
||||
)),
|
||||
'inventory': inventory,
|
||||
'envvars': env,
|
||||
'finished_callback': finished_callback,
|
||||
'verbosity': verbosity,
|
||||
'cancel_callback': self.cancelled_callback,
|
||||
'settings': {
|
||||
'idle_timeout': self.idle_timeout,
|
||||
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
},
|
||||
}
|
||||
|
||||
def path_to(self, *args):
|
||||
return os.path.join(self.private_data_dir, *args)
|
||||
|
||||
def run_management_playbook(self, playbook, private_data_dir, **kw):
|
||||
iso_dir = tempfile.mkdtemp(
|
||||
prefix=playbook,
|
||||
dir=private_data_dir
|
||||
)
|
||||
params = self.runner_params.copy()
|
||||
params['playbook'] = playbook
|
||||
params['private_data_dir'] = iso_dir
|
||||
params.update(**kw)
|
||||
if all([
|
||||
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
|
||||
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
|
||||
]):
|
||||
params['ssh_key'] = settings.AWX_ISOLATED_PRIVATE_KEY
|
||||
return ansible_runner.interface.run(**params)
|
||||
|
||||
def dispatch(self, playbook=None, module=None, module_args=None):
|
||||
'''
|
||||
Ship the runner payload to a remote host for isolated execution.
|
||||
'''
|
||||
self.handled_events = set()
|
||||
self.started_at = time.time()
|
||||
|
||||
# exclude certain files from the rsync
|
||||
rsync_exclude = [
|
||||
# don't rsync source control metadata (it can be huge!)
|
||||
'- /project/.git',
|
||||
'- /project/.svn',
|
||||
'- /project/.hg',
|
||||
# don't rsync job events that are in the process of being written
|
||||
'- /artifacts/job_events/*-partial.json.tmp',
|
||||
# don't rsync the ssh_key FIFO
|
||||
'- /env/ssh_key',
|
||||
]
|
||||
|
||||
for filename, data in (
|
||||
['.rsync-filter', '\n'.join(rsync_exclude)],
|
||||
):
|
||||
path = self.path_to(filename)
|
||||
with open(path, 'w') as f:
|
||||
f.write(data)
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
|
||||
extravars = {
|
||||
'src': self.private_data_dir,
|
||||
'dest': settings.AWX_PROOT_BASE_PATH,
|
||||
'ident': self.ident
|
||||
}
|
||||
if playbook:
|
||||
extravars['playbook'] = playbook
|
||||
if module and module_args:
|
||||
extravars['module'] = module
|
||||
extravars['module_args'] = module_args
|
||||
|
||||
logger.debug('Starting job {} on isolated host with `run_isolated.yml` playbook.'.format(self.instance.id))
|
||||
runner_obj = self.run_management_playbook('run_isolated.yml',
|
||||
self.private_data_dir,
|
||||
extravars=extravars)
|
||||
return runner_obj.status, runner_obj.rc
|
||||
|
||||
def check(self, interval=None):
|
||||
"""
|
||||
Repeatedly poll the isolated node to determine if the job has run.
|
||||
|
||||
On success, copy job artifacts to the controlling node.
|
||||
On failure, continue to poll the isolated node (until the job timeout
|
||||
is exceeded).
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
|
||||
:param interval: an interval (in seconds) to wait between status polls
|
||||
"""
|
||||
interval = interval if interval is not None else settings.AWX_ISOLATED_CHECK_INTERVAL
|
||||
extravars = {'src': self.private_data_dir}
|
||||
status = 'failed'
|
||||
rc = None
|
||||
last_check = time.time()
|
||||
dispatcher = CallbackQueueDispatcher()
|
||||
while status == 'failed':
|
||||
canceled = self.cancelled_callback() if self.cancelled_callback else False
|
||||
if not canceled and time.time() - last_check < interval:
|
||||
# If the job isn't cancelled, but we haven't waited `interval` seconds, wait longer
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if canceled:
|
||||
logger.warning('Isolated job {} was manually cancelled.'.format(self.instance.id))
|
||||
|
||||
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
|
||||
runner_obj = self.run_management_playbook('check_isolated.yml',
|
||||
self.private_data_dir,
|
||||
extravars=extravars)
|
||||
status, rc = runner_obj.status, runner_obj.rc
|
||||
|
||||
if self.check_callback is not None and not self.captured_command_artifact:
|
||||
command_path = self.path_to('artifacts', self.ident, 'command')
|
||||
# If the configuration artifact has been synced back, update the model
|
||||
if os.path.exists(command_path):
|
||||
try:
|
||||
with open(command_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.check_callback(data)
|
||||
self.captured_command_artifact = True
|
||||
except json.decoder.JSONDecodeError: # Just in case it's not fully here yet.
|
||||
pass
|
||||
|
||||
self.consume_events(dispatcher)
|
||||
|
||||
last_check = time.time()
|
||||
|
||||
if status == 'successful':
|
||||
status_path = self.path_to('artifacts', self.ident, 'status')
|
||||
rc_path = self.path_to('artifacts', self.ident, 'rc')
|
||||
if os.path.exists(status_path):
|
||||
with open(status_path, 'r') as f:
|
||||
status = f.readline()
|
||||
with open(rc_path, 'r') as f:
|
||||
rc = int(f.readline())
|
||||
else:
|
||||
# if there's no status file, it means that runner _probably_
|
||||
# exited with a traceback (which should be logged to
|
||||
# daemon.log) Record it so we can see how runner failed.
|
||||
daemon_path = self.path_to('daemon.log')
|
||||
if os.path.exists(daemon_path):
|
||||
with open(daemon_path, 'r') as f:
|
||||
self.instance.result_traceback = f.read()
|
||||
self.instance.save(update_fields=['result_traceback'])
|
||||
else:
|
||||
logger.error('Failed to rsync daemon.log (is ansible-runner installed on the isolated host?)')
|
||||
status = 'failed'
|
||||
rc = 1
|
||||
|
||||
# consume events one last time just to be sure we didn't miss anything
|
||||
# in the final sync
|
||||
self.consume_events(dispatcher)
|
||||
|
||||
# emit an EOF event
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': len(self.handled_events)
|
||||
}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
|
||||
return status, rc
|
||||
|
||||
def consume_events(self, dispatcher):
|
||||
# discover new events and ingest them
|
||||
events_path = self.path_to('artifacts', self.ident, 'job_events')
|
||||
|
||||
# it's possible that `events_path` doesn't exist *yet*, because runner
|
||||
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
|
||||
# only attempt to consume events if any were rsynced back
|
||||
if os.path.exists(events_path):
|
||||
for event in set(os.listdir(events_path)) - self.handled_events:
|
||||
path = os.path.join(events_path, event)
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
event_data = json.load(
|
||||
open(os.path.join(events_path, event), 'r')
|
||||
)
|
||||
except json.decoder.JSONDecodeError:
|
||||
# This means the event we got back isn't valid JSON
|
||||
# that can happen if runner is still partially
|
||||
# writing an event file while it's rsyncing
|
||||
# these event writes are _supposed_ to be atomic
|
||||
# but it doesn't look like they actually are in
|
||||
# practice
|
||||
# in this scenario, just ignore this event and try it
|
||||
# again on the next sync
|
||||
pass
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
dispatcher.dispatch(event_data)
|
||||
self.handled_events.add(event)
|
||||
|
||||
# handle artifacts
|
||||
if event_data.get('event_data', {}).get('artifact_data', {}):
|
||||
self.instance.artifacts = event_data['event_data']['artifact_data']
|
||||
self.instance.save(update_fields=['artifacts'])
|
||||
|
||||
|
||||
def cleanup(self):
|
||||
# If the job failed for any reason, make a last-ditch effort at cleanup
|
||||
extravars = {
|
||||
'private_data_dir': self.private_data_dir,
|
||||
'cleanup_dirs': [
|
||||
self.private_data_dir,
|
||||
],
|
||||
}
|
||||
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
|
||||
self.run_management_playbook(
|
||||
'clean_isolated.yml',
|
||||
self.private_data_dir,
|
||||
extravars=extravars
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def update_capacity(cls, instance, task_result):
|
||||
instance.version = 'ansible-runner-{}'.format(task_result['version'])
|
||||
|
||||
if instance.capacity == 0 and task_result['capacity_cpu']:
|
||||
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
|
||||
instance.cpu = int(task_result['cpu'])
|
||||
instance.memory = int(task_result['mem'])
|
||||
instance.cpu_capacity = int(task_result['capacity_cpu'])
|
||||
instance.mem_capacity = int(task_result['capacity_mem'])
|
||||
instance.capacity = get_system_task_capacity(scale=instance.capacity_adjustment,
|
||||
cpu_capacity=int(task_result['capacity_cpu']),
|
||||
mem_capacity=int(task_result['capacity_mem']))
|
||||
instance.save(update_fields=['cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity', 'version', 'modified'])
|
||||
|
||||
def health_check(self, instance_qs):
|
||||
'''
|
||||
:param instance_qs: List of Django objects representing the
|
||||
isolated instances to manage
|
||||
Runs playbook that will
|
||||
- determine if instance is reachable
|
||||
- find the instance capacity
|
||||
- clean up orphaned private files
|
||||
Performs save on each instance to update its capacity.
|
||||
'''
|
||||
instance_qs = [i for i in instance_qs if i.enabled]
|
||||
if not len(instance_qs):
|
||||
return
|
||||
try:
|
||||
private_data_dir = tempfile.mkdtemp(
|
||||
prefix='awx_iso_heartbeat_',
|
||||
dir=settings.AWX_PROOT_BASE_PATH
|
||||
)
|
||||
self.runner_params = self.build_runner_params([
|
||||
instance.hostname for instance in instance_qs
|
||||
])
|
||||
self.runner_params['private_data_dir'] = private_data_dir
|
||||
self.runner_params['forks'] = len(instance_qs)
|
||||
runner_obj = self.run_management_playbook(
|
||||
'heartbeat_isolated.yml',
|
||||
private_data_dir
|
||||
)
|
||||
|
||||
if runner_obj.status == 'successful':
|
||||
for instance in instance_qs:
|
||||
task_result = {}
|
||||
try:
|
||||
task_result = runner_obj.get_fact_cache(instance.hostname)
|
||||
except Exception:
|
||||
logger.exception('Failed to read status from isolated instances')
|
||||
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
|
||||
task_result = {
|
||||
'cpu': task_result['awx_cpu'],
|
||||
'mem': task_result['awx_mem'],
|
||||
'capacity_cpu': task_result['awx_capacity_cpu'],
|
||||
'capacity_mem': task_result['awx_capacity_mem'],
|
||||
'version': task_result['awx_capacity_version']
|
||||
}
|
||||
IsolatedManager.update_capacity(instance, task_result)
|
||||
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
|
||||
elif instance.capacity == 0:
|
||||
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
|
||||
instance.hostname))
|
||||
else:
|
||||
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
|
||||
if instance.is_lost(isolated=True):
|
||||
instance.capacity = 0
|
||||
instance.save(update_fields=['capacity'])
|
||||
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
|
||||
instance.hostname, instance.modified))
|
||||
finally:
|
||||
if os.path.exists(private_data_dir):
|
||||
shutil.rmtree(private_data_dir)
|
||||
|
||||
def run(self, instance, private_data_dir, playbook, module, module_args,
|
||||
event_data_key, ident=None):
|
||||
"""
|
||||
Run a job on an isolated host.
|
||||
|
||||
:param instance: a `model.Job` instance
|
||||
:param private_data_dir: an absolute path on the local file system
|
||||
where job-specific data should be written
|
||||
(i.e., `/tmp/awx_N_xyz/`)
|
||||
:param playbook: the playbook to run
|
||||
:param module: the module to run
|
||||
:param module_args: the module args to use
|
||||
:param event_data_key: e.g., job_id, inventory_id, ...
|
||||
|
||||
For a completed job run, this function returns (status, rc),
|
||||
representing the status and return code of the isolated
|
||||
`ansible-playbook` run.
|
||||
"""
|
||||
self.ident = ident
|
||||
self.event_data_key = event_data_key
|
||||
self.instance = instance
|
||||
self.private_data_dir = private_data_dir
|
||||
self.runner_params = self.build_runner_params(
|
||||
[instance.execution_node],
|
||||
verbosity=min(5, self.instance.verbosity)
|
||||
)
|
||||
status, rc = self.dispatch(playbook, module, module_args)
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
else:
|
||||
# emit an EOF event
|
||||
event_data = {'event': 'EOF', 'final_counter': 0}
|
||||
event_data.setdefault(self.event_data_key, self.instance.id)
|
||||
CallbackQueueDispatcher().dispatch(event_data)
|
||||
return status, rc
|
||||
@@ -59,7 +59,7 @@ class Command(BaseCommand):
|
||||
if len(pks_to_delete):
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
self.logger.log(99, "Removed %d items", n_deleted_items)
|
||||
self.logger.info("Removed {} items".format(n_deleted_items))
|
||||
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
# Python
|
||||
import re
|
||||
import sys
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
# Django
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models.fact import Fact
|
||||
from awx.conf.license import feature_enabled
|
||||
|
||||
OLDER_THAN = 'older_than'
|
||||
GRANULARITY = 'granularity'
|
||||
|
||||
|
||||
class CleanupFacts(object):
|
||||
def __init__(self):
|
||||
self.timestamp = None
|
||||
|
||||
# Find all with timestamp < older_than
|
||||
# Start search at < older_than, stop search at oldest entry
|
||||
# Find all factVersion < pivot && > (pivot - granularity) grouped by host sorted by time descending (because it's indexed this way)
|
||||
# foreach group
|
||||
# Delete all except LAST entry (or Delete all except the FIRST entry, it's an arbitrary decision)
|
||||
#
|
||||
# pivot -= granularity
|
||||
# group by host
|
||||
def cleanup(self, older_than_abs, granularity, module=None):
|
||||
fact_oldest = Fact.objects.all().order_by('timestamp').first()
|
||||
if not fact_oldest:
|
||||
return 0
|
||||
|
||||
kv = {
|
||||
'timestamp__lte': older_than_abs
|
||||
}
|
||||
if module:
|
||||
kv['module'] = module
|
||||
|
||||
# Special case, granularity=0x where x is d, w, or y
|
||||
# The intent is to delete all facts < older_than_abs
|
||||
if granularity == relativedelta():
|
||||
qs = Fact.objects.filter(**kv)
|
||||
count = qs.count()
|
||||
qs.delete()
|
||||
return count
|
||||
|
||||
total = 0
|
||||
|
||||
date_pivot = older_than_abs
|
||||
while date_pivot > fact_oldest.timestamp:
|
||||
date_pivot_next = date_pivot - granularity
|
||||
|
||||
# For the current time window.
|
||||
# Delete all facts expect the fact that matches the largest timestamp.
|
||||
kv = {
|
||||
'timestamp__lte': date_pivot
|
||||
}
|
||||
if module:
|
||||
kv['module'] = module
|
||||
|
||||
|
||||
fact_version_obj = Fact.objects.filter(**kv).order_by('-timestamp').first()
|
||||
if fact_version_obj:
|
||||
kv = {
|
||||
'timestamp__lt': fact_version_obj.timestamp,
|
||||
'timestamp__gt': date_pivot_next
|
||||
}
|
||||
if module:
|
||||
kv['module'] = module
|
||||
qs = Fact.objects.filter(**kv)
|
||||
count = qs.count()
|
||||
qs.delete()
|
||||
total += count
|
||||
|
||||
date_pivot = date_pivot_next
|
||||
|
||||
return total
|
||||
|
||||
'''
|
||||
older_than and granularity are of type relativedelta
|
||||
'''
|
||||
def run(self, older_than, granularity, module=None):
|
||||
t = now()
|
||||
deleted_count = self.cleanup(t - older_than, granularity, module=module)
|
||||
print("Deleted %d facts." % deleted_count)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Cleanup facts. For each host older than the value specified, keep one fact scan for each time window (granularity).'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--older_than',
|
||||
dest='older_than',
|
||||
default='30d',
|
||||
help='Specify the relative time to consider facts older than (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 30d.')
|
||||
parser.add_argument('--granularity',
|
||||
dest='granularity',
|
||||
default='1w',
|
||||
help='Window duration to group same hosts by for deletion (w)eek (d)ay or (y)ear (i.e. 5d, 2w, 1y). Defaults to 1w.')
|
||||
parser.add_argument('--module',
|
||||
dest='module',
|
||||
default=None,
|
||||
help='Limit cleanup to a particular module.')
|
||||
|
||||
def __init__(self):
|
||||
super(Command, self).__init__()
|
||||
|
||||
def string_time_to_timestamp(self, time_string):
|
||||
units = {
|
||||
'y': 'years',
|
||||
'd': 'days',
|
||||
'w': 'weeks',
|
||||
'm': 'months'
|
||||
}
|
||||
try:
|
||||
match = re.match(r'(?P<value>[0-9]+)(?P<unit>.*)', time_string)
|
||||
group = match.groupdict()
|
||||
kv = {}
|
||||
units_verbose = units[group['unit']]
|
||||
kv[units_verbose]= int(group['value'])
|
||||
return relativedelta(**kv)
|
||||
except (KeyError, TypeError, AttributeError):
|
||||
return None
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
sys.stderr.write("This command has been deprecated and will be removed in a future release.\n")
|
||||
if not feature_enabled('system_tracking'):
|
||||
raise CommandError("The System Tracking feature is not enabled for your instance")
|
||||
cleanup_facts = CleanupFacts()
|
||||
if not all([options[GRANULARITY], options[OLDER_THAN]]):
|
||||
raise CommandError('Both --granularity and --older_than are required.')
|
||||
|
||||
older_than = self.string_time_to_timestamp(options[OLDER_THAN])
|
||||
granularity = self.string_time_to_timestamp(options[GRANULARITY])
|
||||
|
||||
if older_than is None:
|
||||
raise CommandError('--older_than invalid value "%s"' % options[OLDER_THAN])
|
||||
if granularity is None:
|
||||
raise CommandError('--granularity invalid value "%s"' % options[GRANULARITY])
|
||||
|
||||
cleanup_facts.run(older_than, granularity, module=options['module'])
|
||||
25
awx/main/management/commands/cleanup_sessions.py
Normal file
25
awx/main/management/commands/cleanup_sessions.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import logging
|
||||
from django.core import management
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from django.contrib.sessions.models import Session
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_sessions')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter('%(message)s'))
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
|
||||
def execute(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
total_sessions = Session.objects.all().count()
|
||||
management.call_command('clearsessions')
|
||||
self.logger.info("Expired Sessions deleted {}".format(total_sessions - Session.objects.all().count()))
|
||||
28
awx/main/management/commands/cleanup_tokens.py
Normal file
28
awx/main/management/commands/cleanup_tokens.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import logging
|
||||
from django.core import management
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models import OAuth2AccessToken
|
||||
from oauth2_provider.models import RefreshToken
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.cleanup_tokens')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter('%(message)s'))
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
|
||||
def execute(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
total_accesstokens = OAuth2AccessToken.objects.all().count()
|
||||
total_refreshtokens = RefreshToken.objects.all().count()
|
||||
management.call_command('cleartokens')
|
||||
self.logger.info("Expired OAuth 2 Access Tokens deleted: {}".format(total_accesstokens - OAuth2AccessToken.objects.all().count()))
|
||||
self.logger.info("Expired OAuth 2 Refresh Tokens deleted: {}".format(total_refreshtokens - RefreshToken.objects.all().count()))
|
||||
@@ -34,7 +34,7 @@ class Command(BaseCommand):
|
||||
scm_update_cache_timeout=0,
|
||||
organization=o)
|
||||
p.save(skip_update=True)
|
||||
ssh_type = CredentialType.from_v1_kind('ssh')
|
||||
ssh_type = CredentialType.objects.filter(namespace='ssh').first()
|
||||
c = Credential.objects.create(credential_type=ssh_type,
|
||||
name='Demo Credential',
|
||||
inputs={
|
||||
@@ -47,7 +47,7 @@ class Command(BaseCommand):
|
||||
created_by=superuser)
|
||||
Host.objects.create(name='localhost',
|
||||
inventory=i,
|
||||
variables="ansible_connection: local",
|
||||
variables="ansible_connection: local\nansible_python_interpreter: '{{ ansible_playbook_python }}'",
|
||||
created_by=superuser)
|
||||
jt = JobTemplate.objects.create(name='Demo Job Template',
|
||||
playbook='hello_world.yml',
|
||||
|
||||
@@ -4,8 +4,6 @@ from importlib import import_module
|
||||
# Django
|
||||
from django.utils import timezone
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import logout
|
||||
from django.http import HttpRequest
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.sessions.models import Session
|
||||
@@ -29,9 +27,9 @@ class Command(BaseCommand):
|
||||
# with consideration for timezones.
|
||||
start = timezone.now()
|
||||
sessions = Session.objects.filter(expire_date__gte=start).iterator()
|
||||
request = HttpRequest()
|
||||
for session in sessions:
|
||||
user_id = session.get_decoded().get('_auth_user_id')
|
||||
if (user is None) or (user_id and user.id == int(user_id)):
|
||||
request.session = import_module(settings.SESSION_ENGINE).SessionStore(session.session_key)
|
||||
logout(request)
|
||||
session = import_module(settings.SESSION_ENGINE).SessionStore(session.session_key)
|
||||
# Log out the session, but without the need for a request object.
|
||||
session.flush()
|
||||
|
||||
31
awx/main/management/commands/gather_analytics.py
Normal file
31
awx/main/management/commands/gather_analytics.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
from awx.main.analytics import gather, ship
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
'''
|
||||
Gather AWX analytics data
|
||||
'''
|
||||
|
||||
help = 'Gather AWX analytics data'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--ship', dest='ship', action='store_true',
|
||||
help='Enable to ship metrics via insights-client')
|
||||
|
||||
def init_logging(self):
|
||||
self.logger = logging.getLogger('awx.main.analytics')
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(logging.DEBUG)
|
||||
handler.setFormatter(logging.Formatter('%(message)s'))
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
|
||||
def handle(self, *args, **options):
|
||||
tgz = gather()
|
||||
self.init_logging()
|
||||
if tgz:
|
||||
self.logger.debug(tgz)
|
||||
if options.get('ship'):
|
||||
ship(tgz)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user