mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
545 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72de660ea1 | ||
|
|
042c854ad3 | ||
|
|
9aed49e146 | ||
|
|
5c91f66316 | ||
|
|
5e223db945 | ||
|
|
67e2f19aaf | ||
|
|
e940f1f7df | ||
|
|
ebee75e91e | ||
|
|
e641433c68 | ||
|
|
c5dc03d4a9 | ||
|
|
3a6a7c100a | ||
|
|
cd07305c08 | ||
|
|
c39423d5ca | ||
|
|
3f00329abd | ||
|
|
def3114b5b | ||
|
|
fcc99baaa8 | ||
|
|
aad371d224 | ||
|
|
d5cb6ad58a | ||
|
|
0bad717db7 | ||
|
|
b31abbba82 | ||
|
|
0d30a67756 | ||
|
|
5d24acf613 | ||
|
|
94692c2364 | ||
|
|
29abe35799 | ||
|
|
6652464e25 | ||
|
|
f9d5860d63 | ||
|
|
d8ac2f5263 | ||
|
|
fdae3cd092 | ||
|
|
139384acc4 | ||
|
|
9d85e8655d | ||
|
|
9fdd9061d3 | ||
|
|
d3d4ce3804 | ||
|
|
960aa9df16 | ||
|
|
41a8d32dcc | ||
|
|
12b47405f0 | ||
|
|
7659ffca1e | ||
|
|
41014e62b7 | ||
|
|
c2e9df74e4 | ||
|
|
e1cedcfb04 | ||
|
|
d0384799fd | ||
|
|
45f8f0f412 | ||
|
|
c07b6285da | ||
|
|
f17ced8f9c | ||
|
|
17702c94f7 | ||
|
|
7f66f084f1 | ||
|
|
616e7082b3 | ||
|
|
83051e9138 | ||
|
|
33ff4ad9be | ||
|
|
e5bdd44e13 | ||
|
|
fa73fe76b2 | ||
|
|
fb567dad1e | ||
|
|
88f44cb185 | ||
|
|
fd2d0966e2 | ||
|
|
efe9d85685 | ||
|
|
1f1069e9f3 | ||
|
|
ac46013fcc | ||
|
|
f6a2d41e4c | ||
|
|
3b84ff7002 | ||
|
|
b1b6af4ad5 | ||
|
|
04e6482f36 | ||
|
|
f7a8e30d3e | ||
|
|
d55b223aae | ||
|
|
e51d0b6fde | ||
|
|
e36ffa5764 | ||
|
|
c86692784e | ||
|
|
952c91dea5 | ||
|
|
e7a9604896 | ||
|
|
8bbd2c7aaa | ||
|
|
9e87ddde69 | ||
|
|
5d40cf7635 | ||
|
|
75b54d2c14 | ||
|
|
1bcd71ec9f | ||
|
|
31c6b3337c | ||
|
|
c0e07198cf | ||
|
|
a2c3027bd7 | ||
|
|
acdf15640c | ||
|
|
92b74266ca | ||
|
|
08323a11b6 | ||
|
|
6390794828 | ||
|
|
ab703e2a32 | ||
|
|
8c657e210c | ||
|
|
3f5820adf8 | ||
|
|
a34a63ec7f | ||
|
|
47fef1dcf3 | ||
|
|
b6be891dc1 | ||
|
|
546df65753 | ||
|
|
2a86a3e05b | ||
|
|
37ee95314a | ||
|
|
28c3fa517e | ||
|
|
01c56a4f2b | ||
|
|
3dd21d720e | ||
|
|
9cfecb5590 | ||
|
|
2742612be9 | ||
|
|
4f4a4e2394 | ||
|
|
edd9972435 | ||
|
|
9fdec9b31b | ||
|
|
a93ee86581 | ||
|
|
020246736c | ||
|
|
8d3ce206cd | ||
|
|
68f5482c42 | ||
|
|
28e27c5196 | ||
|
|
c56352daa4 | ||
|
|
5eea4e8881 | ||
|
|
58c821f3e1 | ||
|
|
5cad0d243a | ||
|
|
0aaa2d8c8d | ||
|
|
921feb561d | ||
|
|
5b0bb4939f | ||
|
|
144cffe009 | ||
|
|
af11055e5c | ||
|
|
c0cb546c3c | ||
|
|
a800c8cd00 | ||
|
|
f8a23f20aa | ||
|
|
46edd151e0 | ||
|
|
ba4b6bdbb7 | ||
|
|
1e24d8b5fa | ||
|
|
41586ea3a6 | ||
|
|
ded5577832 | ||
|
|
e428aeb97e | ||
|
|
e42915d4cf | ||
|
|
cce5f26e34 | ||
|
|
1940c834cb | ||
|
|
08381577f5 | ||
|
|
669d67b8fb | ||
|
|
8a0be5b111 | ||
|
|
9e30f004d3 | ||
|
|
62bf61b2a2 | ||
|
|
f62dfe85cc | ||
|
|
97acba8fe9 | ||
|
|
cec7cb393d | ||
|
|
e9b254b9d2 | ||
|
|
222fecc5f6 | ||
|
|
1afd8b4309 | ||
|
|
c833676863 | ||
|
|
7e9835f6ee | ||
|
|
5940f6de2c | ||
|
|
a899a147e1 | ||
|
|
e0c8f3e541 | ||
|
|
68a0bbe125 | ||
|
|
50197c6a12 | ||
|
|
de0122d64e | ||
|
|
8592bf3e39 | ||
|
|
4787e69afb | ||
|
|
8f5afc83ce | ||
|
|
b1a90d445b | ||
|
|
8954e6e556 | ||
|
|
fff34f7227 | ||
|
|
7bfc99a615 | ||
|
|
e2f5aa987d | ||
|
|
f159a6508e | ||
|
|
4d7b5adf12 | ||
|
|
6e648cf72f | ||
|
|
24a50ea076 | ||
|
|
2d2add009b | ||
|
|
fd068695ef | ||
|
|
b19360ac9b | ||
|
|
7c3c1f5a29 | ||
|
|
a902afcf73 | ||
|
|
501568340b | ||
|
|
1d32917ceb | ||
|
|
84d863ff9d | ||
|
|
47bdf86dfa | ||
|
|
6b015c9d81 | ||
|
|
c3f2b3e44d | ||
|
|
6807878e2d | ||
|
|
96c6cf9f05 | ||
|
|
8a20b5225b | ||
|
|
0e2786d1f1 | ||
|
|
f38437b6bc | ||
|
|
99ae614a63 | ||
|
|
fcf75af6a7 | ||
|
|
541b9607f5 | ||
|
|
2d455800c4 | ||
|
|
37491fa4b9 | ||
|
|
f41852c3ee | ||
|
|
b565ed2077 | ||
|
|
86bafb52f6 | ||
|
|
11b1d0e84c | ||
|
|
f47325a532 | ||
|
|
1a261782c7 | ||
|
|
5a1599b440 | ||
|
|
72248db76d | ||
|
|
21268b779f | ||
|
|
8926f635df | ||
|
|
e19194b883 | ||
|
|
fa1c33da7e | ||
|
|
d30ecb6fb3 | ||
|
|
8ed5964871 | ||
|
|
a989c624c7 | ||
|
|
7f01de26a1 | ||
|
|
e3b5d64aa7 | ||
|
|
eba0e4fd77 | ||
|
|
d3c80eef4d | ||
|
|
3683dfab37 | ||
|
|
8e3931de37 | ||
|
|
29a582f869 | ||
|
|
be0a7a2aa9 | ||
|
|
d0d8d1c66c | ||
|
|
8a8a48a4ff | ||
|
|
b0aa795b10 | ||
|
|
017064aecf | ||
|
|
7311ddf722 | ||
|
|
69835e9895 | ||
|
|
85960d9035 | ||
|
|
c8ceb62269 | ||
|
|
1acca459ef | ||
|
|
ee6fda9f8a | ||
|
|
a95632c349 | ||
|
|
ed3b6385f1 | ||
|
|
3518fb0c17 | ||
|
|
1289f141d6 | ||
|
|
8464ec5c49 | ||
|
|
3bc5975b90 | ||
|
|
af7e9cb533 | ||
|
|
af2a8f9831 | ||
|
|
f99a43ffa6 | ||
|
|
262d99fde6 | ||
|
|
63f56d33aa | ||
|
|
9cabf3ef4d | ||
|
|
2855be9d26 | ||
|
|
2524e8af47 | ||
|
|
f957ef7249 | ||
|
|
4551859248 | ||
|
|
2a4912df3e | ||
|
|
daa312d7ee | ||
|
|
e95938715a | ||
|
|
f5d4f7858a | ||
|
|
25e0efd0b7 | ||
|
|
47a007caee | ||
|
|
cd6d2ed53a | ||
|
|
4de61204c4 | ||
|
|
6b21f2042b | ||
|
|
7820517734 | ||
|
|
2ba1288284 | ||
|
|
149f8a21a6 | ||
|
|
602f2951b9 | ||
|
|
b003f42e22 | ||
|
|
2ee2cd0bd9 | ||
|
|
a79f2ff07a | ||
|
|
75bb7cce22 | ||
|
|
52a253ad18 | ||
|
|
0f74a05fea | ||
|
|
440691387b | ||
|
|
27e6c2d47d | ||
|
|
8b69b08991 | ||
|
|
8714bde1b4 | ||
|
|
28b84d0d71 | ||
|
|
c6111fface | ||
|
|
98e8a09ad3 | ||
|
|
3f9af8fe69 | ||
|
|
dbe949a2c2 | ||
|
|
a296f64696 | ||
|
|
ee18400a33 | ||
|
|
98a4e85db4 | ||
|
|
f7f1bdf9c9 | ||
|
|
69cf915a20 | ||
|
|
9440785bdd | ||
|
|
ca7c840d8c | ||
|
|
f85bcae89f | ||
|
|
a0e31b9c01 | ||
|
|
c414fd68a0 | ||
|
|
2830cdfdeb | ||
|
|
07e9b46643 | ||
|
|
1f01521213 | ||
|
|
8587461ac9 | ||
|
|
e54e5280f2 | ||
|
|
516a44ce73 | ||
|
|
e52cebc28e | ||
|
|
bb5136cdae | ||
|
|
b0db2b7bec | ||
|
|
1000dc10fb | ||
|
|
2a4b009f04 | ||
|
|
8cdd42307c | ||
|
|
269558876e | ||
|
|
bba680671b | ||
|
|
f70a76109c | ||
|
|
5d54877183 | ||
|
|
f7dac8e68d | ||
|
|
39648b4f0b | ||
|
|
b942fde59a | ||
|
|
ce82b87d9f | ||
|
|
70391f96ae | ||
|
|
2329c1b797 | ||
|
|
470159b4d7 | ||
|
|
e740340793 | ||
|
|
4d5507d344 | ||
|
|
d350551547 | ||
|
|
7fd79b8e54 | ||
|
|
eb12f45e8e | ||
|
|
fb047b1267 | ||
|
|
d31c528257 | ||
|
|
996d7ce054 | ||
|
|
7040fcfd88 | ||
|
|
88ca4b63e6 | ||
|
|
c0af3c537b | ||
|
|
f8afae308a | ||
|
|
4cd0d60711 | ||
|
|
955d57bce6 | ||
|
|
589d27c88c | ||
|
|
eafb751ecc | ||
|
|
30ea66023f | ||
|
|
9843e21632 | ||
|
|
6002beb231 | ||
|
|
9c6e42fd1b | ||
|
|
eeab4b90a5 | ||
|
|
7827a2aedd | ||
|
|
a7f1a36ed8 | ||
|
|
d651786206 | ||
|
|
19e4758be1 | ||
|
|
fe9de0d4cc | ||
|
|
80147acc1c | ||
|
|
4acdf8584b | ||
|
|
cf607691ac | ||
|
|
d7adcfb119 | ||
|
|
97d26728e4 | ||
|
|
6403895eae | ||
|
|
8b26ff1fe6 | ||
|
|
9ddd020348 | ||
|
|
a2d1c32da3 | ||
|
|
af18aa8456 | ||
|
|
188b23e88f | ||
|
|
63bed7a30d | ||
|
|
fd93964953 | ||
|
|
1f9f86974a | ||
|
|
6a86af5b43 | ||
|
|
6a503e152a | ||
|
|
b7227113be | ||
|
|
907da2ae61 | ||
|
|
6f76b15d92 | ||
|
|
9d6fbd6c78 | ||
|
|
edb4dac652 | ||
|
|
42898b94e2 | ||
|
|
943543354a | ||
|
|
2da22ccd8a | ||
|
|
9cab5a5046 | ||
|
|
e270a692b7 | ||
|
|
677a8dae7b | ||
|
|
6eeb32a447 | ||
|
|
e57991d498 | ||
|
|
4242bd55c2 | ||
|
|
e8fb466f0f | ||
|
|
869fcbf483 | ||
|
|
6abeaf2c55 | ||
|
|
f734918d3e | ||
|
|
91f2e0c32b | ||
|
|
88d6dd96fa | ||
|
|
7feac5ecd6 | ||
|
|
193ec21149 | ||
|
|
14e62057da | ||
|
|
a26c0dfb8a | ||
|
|
6b4219badb | ||
|
|
1f598e1b12 | ||
|
|
7ddd4d74c0 | ||
|
|
6ad6f48ff0 | ||
|
|
d736adbedc | ||
|
|
c881762c97 | ||
|
|
be5d067148 | ||
|
|
189a10e35a | ||
|
|
285e9c2f62 | ||
|
|
054de87f8e | ||
|
|
7de8a8700c | ||
|
|
4f7669dec1 | ||
|
|
25a1bc7a33 | ||
|
|
955ef3e9cb | ||
|
|
0e8f2307fc | ||
|
|
bcfd2d6aa4 | ||
|
|
7e52f4682c | ||
|
|
9c218fa5f5 | ||
|
|
508aed67de | ||
|
|
0bf1116ef8 | ||
|
|
45df5ba9c4 | ||
|
|
b90a296d41 | ||
|
|
d40143a63d | ||
|
|
db40d550be | ||
|
|
da661e45ae | ||
|
|
58160b9eb4 | ||
|
|
05b28efd9c | ||
|
|
0b433ebb1c | ||
|
|
5b3f5bf37d | ||
|
|
397c0092a0 | ||
|
|
362fdaeecc | ||
|
|
606c3c3595 | ||
|
|
42705c9eb0 | ||
|
|
c2ba495824 | ||
|
|
85a1c88653 | ||
|
|
c4d704bee1 | ||
|
|
60d499e11c | ||
|
|
bb48ef40be | ||
|
|
771ca2400a | ||
|
|
735d44816b | ||
|
|
e346493921 | ||
|
|
bd39fab17a | ||
|
|
ce30594b30 | ||
|
|
2021c2a596 | ||
|
|
ecd1d09c9a | ||
|
|
7dbde8d82c | ||
|
|
4e64b17712 | ||
|
|
cc4c514103 | ||
|
|
ab8726dafa | ||
|
|
2cefba6f96 | ||
|
|
592043fa70 | ||
|
|
59477aa221 | ||
|
|
279fe53837 | ||
|
|
bb319136e4 | ||
|
|
b0f68d97da | ||
|
|
a46462eede | ||
|
|
646e403fbd | ||
|
|
64c846cfc1 | ||
|
|
8e07269738 | ||
|
|
6fc815937b | ||
|
|
014c995a8f | ||
|
|
c1bb62cc36 | ||
|
|
f5cf7c204f | ||
|
|
6d08e21511 | ||
|
|
8b881d195d | ||
|
|
5c9ff51248 | ||
|
|
3f64768ba8 | ||
|
|
fd24918ba8 | ||
|
|
f04e7067e8 | ||
|
|
9a91c0bfb2 | ||
|
|
c06188da56 | ||
|
|
7433aab258 | ||
|
|
37a715c680 | ||
|
|
3d9eb3b600 | ||
|
|
99511de728 | ||
|
|
82b1b85fa4 | ||
|
|
2aa29420ee | ||
|
|
9e331fe029 | ||
|
|
591cdb6015 | ||
|
|
bc244b3600 | ||
|
|
dbe3863b04 | ||
|
|
ae021c37e3 | ||
|
|
8baa9d8458 | ||
|
|
3c888475a5 | ||
|
|
29b567d6e1 | ||
|
|
00aa1ad295 | ||
|
|
4f3213715e | ||
|
|
0389e72197 | ||
|
|
0732795ecc | ||
|
|
a26df3135b | ||
|
|
a904aea519 | ||
|
|
6bd5053ae8 | ||
|
|
8b00b8c9c2 | ||
|
|
2b9acd78c8 | ||
|
|
d7f0642f48 | ||
|
|
8bbae0cc3a | ||
|
|
c00f1505d7 | ||
|
|
a08e6691fb | ||
|
|
98bc499498 | ||
|
|
6d0c42a91a | ||
|
|
79c5a62279 | ||
|
|
3bb671f3f2 | ||
|
|
0b9c5c410a | ||
|
|
d77d5a7734 | ||
|
|
0a00a3104a | ||
|
|
ab36129395 | ||
|
|
e99500cf16 | ||
|
|
299497ea12 | ||
|
|
843c22c6b1 | ||
|
|
86b49b6fe2 | ||
|
|
9489f00ca4 | ||
|
|
6d60e7dadc | ||
|
|
346b9b9e3e | ||
|
|
99384b1db9 | ||
|
|
d1b5a60bb9 | ||
|
|
d57258878d | ||
|
|
48414f6dab | ||
|
|
ff0186f72b | ||
|
|
a682565758 | ||
|
|
0dee2e5973 | ||
|
|
929f4bfb81 | ||
|
|
ac474e2108 | ||
|
|
d6722c2106 | ||
|
|
6eef0b82bd | ||
|
|
fb4343d75e | ||
|
|
a867a32b4e | ||
|
|
3060505110 | ||
|
|
5d68f796aa | ||
|
|
15036ff970 | ||
|
|
32783f7aaf | ||
|
|
8699a8fbc2 | ||
|
|
b4cde80fa9 | ||
|
|
eb4db4ed43 | ||
|
|
649aafb454 | ||
|
|
b6c272e946 | ||
|
|
9fe2211f82 | ||
|
|
4704e24c24 | ||
|
|
e5f293ce52 | ||
|
|
d64b898390 | ||
|
|
498c525b34 | ||
|
|
bb184f8ffb | ||
|
|
7f537dbedf | ||
|
|
f9b8a69f7b | ||
|
|
bc228b8d77 | ||
|
|
7710ad2e57 | ||
|
|
9f2c9b13d7 | ||
|
|
6940704deb | ||
|
|
6b9cacb85f | ||
|
|
cfa0fdaa12 | ||
|
|
4423e6edae | ||
|
|
13faa0ed2e | ||
|
|
42336355bb | ||
|
|
c18aa90534 | ||
|
|
39460fb3d3 | ||
|
|
4f51c1d2c9 | ||
|
|
04ccff0e3f | ||
|
|
2242119182 | ||
|
|
5cba34c34d | ||
|
|
33a699b8ae | ||
|
|
344a4bb238 | ||
|
|
0beda08cf9 | ||
|
|
2264a98c04 | ||
|
|
d19a9db523 | ||
|
|
4b76332daf | ||
|
|
db38339179 | ||
|
|
5eddcdd5f5 | ||
|
|
3480d2da59 | ||
|
|
e60e6c7d08 | ||
|
|
55356ebb51 | ||
|
|
7f4bbbe5c5 | ||
|
|
49b1ce6e8c | ||
|
|
caaefef900 | ||
|
|
96576b0e3d | ||
|
|
288ce123ca | ||
|
|
140dbbaa7d | ||
|
|
e9d11be680 | ||
|
|
d7f117e83f | ||
|
|
eef1246e0b | ||
|
|
65e38aa37d | ||
|
|
c7b23aac9b | ||
|
|
b4ea60eb79 | ||
|
|
24c738c6d8 | ||
|
|
0c26734d7d | ||
|
|
d9b613ccb3 | ||
|
|
831bf9124f | ||
|
|
0b31cad2db | ||
|
|
059e744774 | ||
|
|
2b3c57755c | ||
|
|
0eb526919f | ||
|
|
a8f56f78e9 | ||
|
|
f7ad3d78eb | ||
|
|
5bfe89be6e | ||
|
|
47661fad51 | ||
|
|
9a38971d47 | ||
|
|
c4e697879d |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -31,6 +31,7 @@ awx/ui/templates/ui/installing.html
|
||||
awx/ui_next/node_modules/
|
||||
awx/ui_next/coverage/
|
||||
awx/ui_next/build/locales/_build
|
||||
rsyslog.pid
|
||||
/tower-license
|
||||
/tower-license/**
|
||||
tools/prometheus/data
|
||||
|
||||
49
CHANGELOG.md
49
CHANGELOG.md
@@ -2,6 +2,53 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 11.2.0 (Apr 29, 2020)
|
||||
|
||||
- Inventory updates now use collection-based plugins by default (in Ansible 2.9+):
|
||||
- amazon.aws.aws_ec2
|
||||
- community.vmware.vmware_vm_inventory
|
||||
- azure.azcollection.azure_rm
|
||||
- google.cloud.gcp_compute
|
||||
- theforeman.foreman.foreman
|
||||
- openstack.cloud.openstack
|
||||
- ovirt.ovirt_collection.ovirt
|
||||
- awx.awx.tower
|
||||
- Added support for Approle and LDAP/AD mechanisms to the Hashicorp Vault credential plugin (https://github.com/ansible/awx/issues/5076)
|
||||
- Added Project (Domain Name) support for the OpenStack Keystone v3 API (https://github.com/ansible/awx/issues/6831)
|
||||
- Added a new setting for raising log verbosity for rsyslogd (https://github.com/ansible/awx/pull/6818)
|
||||
- Added the ability to monitor stdout in the CLI for running jobs and workflow jobs (https://github.com/ansible/awx/issues/6165)
|
||||
- Fixed a bug which prevented the AWX CLI from properly installing with newer versions of pip (https://github.com/ansible/awx/issues/6870)
|
||||
- Fixed a bug which broke AWX's external logging support when configured with HTTPS endpoints that utilize self-signed certificates (https://github.com/ansible/awx/issues/6851)
|
||||
- Fixed a local docker installer bug that mistakenly attempted to upgrade PostgreSQL when an external pg_hostname is specified (https://github.com/ansible/awx/pull/5398)
|
||||
- Fixed a race condition that caused task container crashes when pods are quickly brought down and back up (https://github.com/ansible/awx/issues/6750)
|
||||
- Fixed a bug that caused 404 errors when attempting to view the second page of the workflow approvals view (https://github.com/ansible/awx/issues/6803)
|
||||
- Fixed a bug that prevented the use of ANSIBLE_SSH_ARGS for ad-hoc-commands (https://github.com/ansible/awx/pull/6811)
|
||||
- Fixed a bug that broke AWX installs/upgrades on Red Hat OpenShift (https://github.com/ansible/awx/issues/6791)
|
||||
|
||||
|
||||
## 11.1.0 (Apr 22, 2020)
|
||||
- Changed rsyslogd to persist queued events to disk (to prevent a risk of out-of-memory errors) (https://github.com/ansible/awx/issues/6746)
|
||||
- Added the ability to configure the destination and maximum disk size of rsyslogd spool (in the event of a log aggregator outage) (https://github.com/ansible/awx/pull/6763)
|
||||
- Added the ability to discover playbooks in project clones from symlinked directories (https://github.com/ansible/awx/pull/6773)
|
||||
- Fixed a bug that caused certain log aggregator settings to break logging integration (https://github.com/ansible/awx/issues/6760)
|
||||
- Fixed a bug that caused playbook execution in container groups to sometimes unexpectedly deadlock (https://github.com/ansible/awx/issues/6692)
|
||||
- Improved stability of the new redis clustering implementation (https://github.com/ansible/awx/pull/6739 https://github.com/ansible/awx/pull/6720)
|
||||
- Improved stability of the new rsyslogd-based logging implementation (https://github.com/ansible/awx/pull/6796)
|
||||
|
||||
## 11.0.0 (Apr 16, 2020)
|
||||
- As of AWX 11.0.0, Kubernetes-based deployments use a Deployment rather than a StatefulSet.
|
||||
- Reimplemented external logging support using rsyslogd to improve reliability and address a number of issues (https://github.com/ansible/awx/issues/5155)
|
||||
- Changed activity stream logs to include summary fields for related objects (https://github.com/ansible/awx/issues/1761)
|
||||
- Added code to more gracefully attempt to reconnect to redis if it restarts/becomes unavailable (https://github.com/ansible/awx/pull/6670)
|
||||
- Fixed a bug that caused REFRESH_TOKEN_EXPIRE_SECONDS to not properly be respected for OAuth2.0 refresh tokens generated by AWX (https://github.com/ansible/awx/issues/6630)
|
||||
- Fixed a bug that broke schedules containing RRULES with very old DTSTART dates (https://github.com/ansible/awx/pull/6550)
|
||||
- Fixed a bug that broke installs on older versions of Ansible packaged with certain Linux distributions (https://github.com/ansible/awx/issues/5501)
|
||||
- Fixed a bug that caused the activity stream to sometimes report the incorrect actor when associating user membership on SAML login (https://github.com/ansible/awx/pull/6525)
|
||||
- Fixed a bug in AWX's Grafana notification support when annotation tags are omitted (https://github.com/ansible/awx/issues/6580)
|
||||
- Fixed a bug that prevented some users from searching for Source Control credentials in the AWX user interface (https://github.com/ansible/awx/issues/6600)
|
||||
- Fixed a bug that prevented disassociating orphaned users from credentials (https://github.com/ansible/awx/pull/6554)
|
||||
- Updated Twisted to address CVE-2020-10108 and CVE-2020-10109.
|
||||
|
||||
## 10.0.0 (Mar 30, 2020)
|
||||
- As of AWX 10.0.0, the official AWX CLI no longer supports Python 2 (it requires at least Python 3.6) (https://github.com/ansible/awx/pull/6327)
|
||||
- AWX no longer relies on RabbitMQ; Redis is added as a new dependency (https://github.com/ansible/awx/issues/5443)
|
||||
@@ -95,7 +142,7 @@ This is a list of high-level changes for each release of AWX. A full list of com
|
||||
- Fixed a bug in the CLI which incorrectly parsed launch time arguments for `awx job_templates launch` and `awx workflow_job_templates launch` (https://github.com/ansible/awx/issues/5093).
|
||||
- Fixed a bug that caused inventory updates using "sourced from a project" to stop working (https://github.com/ansible/awx/issues/4750).
|
||||
- Fixed a bug that caused Slack notifications to sometimes show the wrong bot avatar (https://github.com/ansible/awx/pull/5125).
|
||||
- Fixed a bug that prevented the use of digits in Tower's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
- Fixed a bug that prevented the use of digits in AWX's URL settings (https://github.com/ansible/awx/issues/5081).
|
||||
|
||||
## 8.0.0 (Oct 21, 2019)
|
||||
|
||||
|
||||
@@ -215,18 +215,23 @@ Using `docker exec`, this will create a session in the running *awx* container,
|
||||
If you want to start and use the development environment, you'll first need to bootstrap it by running the following command:
|
||||
|
||||
```bash
|
||||
(container)# /bootstrap_development.sh
|
||||
(container)# /usr/bin/bootstrap_development.sh
|
||||
```
|
||||
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes.
|
||||
The above will do all the setup tasks, including running database migrations, so it may take a couple minutes. Once it's done it
|
||||
will drop you back to the shell.
|
||||
|
||||
Now you can start each service individually, or start all services in a pre-configured tmux session like so:
|
||||
In order to launch all developer services:
|
||||
|
||||
```bash
|
||||
(container)# cd /awx_devel
|
||||
(container)# make server
|
||||
(container)# /usr/bin/launch_awx.sh
|
||||
```
|
||||
|
||||
`launch_awx.sh` also calls `bootstrap_development.sh` so if all you are doing is launching the supervisor to start all services, you don't
|
||||
need to call `bootstrap_development.sh` first.
|
||||
|
||||
|
||||
|
||||
### Post Build Steps
|
||||
|
||||
Before you can log in and use the system, you will need to create an admin user. Optionally, you may also want to load some demo data.
|
||||
|
||||
24
INSTALL.md
24
INSTALL.md
@@ -10,7 +10,6 @@ This document provides a guide for installing AWX.
|
||||
+ [AWX branding](#awx-branding)
|
||||
+ [Prerequisites](#prerequisites)
|
||||
+ [System Requirements](#system-requirements)
|
||||
+ [AWX Tunables](#awx-tunables)
|
||||
+ [Choose a deployment platform](#choose-a-deployment-platform)
|
||||
+ [Official vs Building Images](#official-vs-building-images)
|
||||
* [Upgrading from previous versions](#upgrading-from-previous-versions)
|
||||
@@ -49,7 +48,17 @@ This document provides a guide for installing AWX.
|
||||
|
||||
### Clone the repo
|
||||
|
||||
If you have not already done so, you will need to clone, or create a local copy, of the [AWX repo](https://github.com/ansible/awx). For more on how to clone the repo, view [git clone help](https://git-scm.com/docs/git-clone).
|
||||
If you have not already done so, you will need to clone, or create a local copy, of the [AWX repo](https://github.com/ansible/awx). We generally recommend that you view the releases page:
|
||||
|
||||
https://github.com/ansible/awx/releases
|
||||
|
||||
...and clone the latest stable release, e.g.,
|
||||
|
||||
`git clone -b x.y.z https://github.com/ansible/awx.git`
|
||||
|
||||
Please note that deploying from `HEAD` (or the latest commit) is **not** stable, and that if you want to do this, you should proceed at your own risk (also, see the section #official-vs-building-images for building your own image).
|
||||
|
||||
For more on how to clone the repo, view [git clone help](https://git-scm.com/docs/git-clone).
|
||||
|
||||
Once you have a local copy, run commands within the root of the project tree.
|
||||
|
||||
@@ -73,6 +82,7 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
- Python 3.6+
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -82,11 +92,7 @@ The system that runs the AWX service will need to satisfy the following requirem
|
||||
- At least 2 cpu cores
|
||||
- At least 20GB of space
|
||||
- Running Docker, Openshift, or Kubernetes
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 9.6+.
|
||||
|
||||
### AWX Tunables
|
||||
|
||||
**TODO** add tunable bits
|
||||
- If you choose to use an external PostgreSQL database, please note that the minimum version is 10+.
|
||||
|
||||
### Choose a deployment platform
|
||||
|
||||
@@ -143,7 +149,7 @@ $ ansible-playbook -i inventory install.yml -e @vars.yml
|
||||
|
||||
### Prerequisites
|
||||
|
||||
To complete a deployment to OpenShift, you will obviously need access to an OpenShift cluster. For demo and testing purposes, you can use [Minishift](https://github.com/minishift/minishift) to create a single node cluster running inside a virtual machine.
|
||||
To complete a deployment to OpenShift, you will need access to an OpenShift cluster. For demo and testing purposes, you can use [Minishift](https://github.com/minishift/minishift) to create a single node cluster running inside a virtual machine.
|
||||
|
||||
When using OpenShift for deploying AWX make sure you have correct privileges to add the security context 'privileged', otherwise the installation will fail. The privileged context is needed because of the use of [the bubblewrap tool](https://github.com/containers/bubblewrap) to add an additional layer of security when using containers.
|
||||
|
||||
@@ -477,7 +483,7 @@ Before starting the install process, review the [inventory](./installer/inventor
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key.
|
||||
> Optionally, provide the path to a file that contains a certificate and its private key. This needs to be a .pem-file
|
||||
|
||||
*docker_compose_dir*
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
recursive-include requirements *.txt
|
||||
recursive-include requirements *.yml
|
||||
recursive-include config *
|
||||
recursive-include docs/licenses *
|
||||
recursive-exclude awx devonly.py*
|
||||
|
||||
27
Makefile
27
Makefile
@@ -18,7 +18,6 @@ COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
COMPOSE_HOST ?= $(shell hostname)
|
||||
|
||||
VENV_BASE ?= /venv
|
||||
COLLECTION_VENV ?= /awx_devel/awx_collection_test_venv
|
||||
SCL_PREFIX ?=
|
||||
CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
|
||||
|
||||
@@ -210,7 +209,11 @@ requirements_awx: virtualenv_awx
|
||||
requirements_awx_dev:
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
|
||||
|
||||
requirements: requirements_ansible requirements_awx
|
||||
requirements_collections:
|
||||
mkdir -p $(COLLECTION_BASE)
|
||||
ansible-galaxy collection install -r requirements/collections_requirements.yml -p $(COLLECTION_BASE)
|
||||
|
||||
requirements: requirements_ansible requirements_awx requirements_collections
|
||||
|
||||
requirements_dev: requirements_awx requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
|
||||
@@ -365,11 +368,6 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py2,py3
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
prepare_collection_venv:
|
||||
rm -rf $(COLLECTION_VENV)
|
||||
mkdir $(COLLECTION_VENV)
|
||||
$(VENV_BASE)/awx/bin/pip install --target=$(COLLECTION_VENV) git+https://github.com/ansible/tower-cli.git
|
||||
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
COLLECTION_PACKAGE ?= awx
|
||||
@@ -380,12 +378,12 @@ test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$(COLLECTION_VENV):$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
|
||||
test_collection_all: prepare_collection_venv test_collection flake8_collection
|
||||
test_collection_all: test_collection flake8_collection
|
||||
|
||||
# WARNING: symlinking a collection is fundamentally unstable
|
||||
# this is for rapid development iteration with playbooks, do not use with other test targets
|
||||
@@ -650,7 +648,6 @@ detect-schema-change: genschema
|
||||
diff -u -b reference-schema.json schema.json
|
||||
|
||||
docker-compose-clean: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm -w /awx_devel --service-ports awx make clean
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
docker-compose-build: awx-devel-build
|
||||
@@ -668,11 +665,12 @@ docker-compose-isolated-build: awx-devel-build
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
|
||||
MACHINE?=default
|
||||
docker-clean:
|
||||
eval $$(docker-machine env $(MACHINE))
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-docker images | grep "awx_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi
|
||||
docker images | grep "awx_devel" | awk '{print $$1 ":" $$2}' | xargs docker rmi
|
||||
|
||||
docker-clean-volumes: docker-compose-clean
|
||||
docker volume rm tools_awx_db
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -686,9 +684,6 @@ docker-compose-cluster-elk: docker-auth awx/projects
|
||||
prometheus:
|
||||
docker run -u0 --net=tools_default --link=`docker ps | egrep -o "tools_awx(_run)?_([^ ]+)?"`:awxweb --volume `pwd`/tools/prometheus:/prometheus --name prometheus -d -p 0.0.0.0:9090:9090 prom/prometheus --web.enable-lifecycle --config.file=/prometheus/prometheus.yml
|
||||
|
||||
minishift-dev:
|
||||
ansible-playbook -i localhost, -e devtree_directory=$(CURDIR) tools/clusterdevel/start_minishift_dev.yml
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
docker stop tools_logstash_1
|
||||
|
||||
@@ -45,7 +45,10 @@ from awx.main.utils import (
|
||||
get_search_fields,
|
||||
getattrd,
|
||||
get_object_or_400,
|
||||
decrypt_field
|
||||
decrypt_field,
|
||||
get_awx_version,
|
||||
get_licenser,
|
||||
StubLicense
|
||||
)
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer, UserSerializer
|
||||
@@ -197,6 +200,8 @@ class APIView(views.APIView):
|
||||
logger.warning(status_msg)
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = 'AWX' if isinstance(get_licenser(), StubLicense) else 'Red Hat Ansible Tower'
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
if time_started:
|
||||
time_elapsed = time.time() - self.time_started
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
from collections import OrderedDict
|
||||
from uuid import UUID
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import PermissionDenied
|
||||
@@ -86,6 +87,8 @@ class Metadata(metadata.SimpleMetadata):
|
||||
# FIXME: Still isn't showing all default values?
|
||||
try:
|
||||
default = field.get_default()
|
||||
if type(default) is UUID:
|
||||
default = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
|
||||
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
|
||||
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
|
||||
field_info['default'] = default
|
||||
|
||||
@@ -2034,11 +2034,6 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def get_group(self, obj): # TODO: remove in 3.3
|
||||
if obj.deprecated_group:
|
||||
return obj.deprecated_group.id
|
||||
return None
|
||||
|
||||
def build_relational_field(self, field_name, relation_info):
|
||||
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
|
||||
# SCM Project and inventory are read-only unless creating a new inventory.
|
||||
@@ -3616,9 +3611,11 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
elif self.instance:
|
||||
ujt = self.instance.unified_job_template
|
||||
if ujt is None:
|
||||
if 'workflow_job_template' in attrs:
|
||||
return {'workflow_job_template': attrs['workflow_job_template']}
|
||||
return {}
|
||||
ret = {}
|
||||
for fd in ('workflow_job_template', 'identifier'):
|
||||
if fd in attrs:
|
||||
ret[fd] = attrs[fd]
|
||||
return ret
|
||||
|
||||
# build additional field survey_passwords to track redacted variables
|
||||
password_dict = {}
|
||||
@@ -3671,7 +3668,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
attrs.get('survey_passwords', {}).pop(key, None)
|
||||
else:
|
||||
errors.setdefault('extra_vars', []).append(
|
||||
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
|
||||
_('"$encrypted$ is a reserved keyword, may not be used for {}."'.format(key))
|
||||
)
|
||||
|
||||
# Launch configs call extra_vars extra_data for historical reasons
|
||||
@@ -4539,6 +4536,8 @@ class SchedulePreviewSerializer(BaseSerializer):
|
||||
try:
|
||||
Schedule.rrulestr(rrule_value)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
|
||||
return value
|
||||
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
# Test Logging Configuration
|
||||
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
from datetime import timedelta
|
||||
|
||||
from django.utils.timezone import now
|
||||
from django.conf import settings
|
||||
from django.conf.urls import url
|
||||
|
||||
from oauthlib import oauth2
|
||||
from oauth2_provider import views
|
||||
|
||||
from awx.main.models import RefreshToken
|
||||
from awx.api.views import (
|
||||
ApiOAuthAuthorizationRootView,
|
||||
)
|
||||
@@ -14,6 +18,21 @@ from awx.api.views import (
|
||||
class TokenView(views.TokenView):
|
||||
|
||||
def create_token_response(self, request):
|
||||
# Django OAuth2 Toolkit has a bug whereby refresh tokens are *never*
|
||||
# properly expired (ugh):
|
||||
#
|
||||
# https://github.com/jazzband/django-oauth-toolkit/issues/746
|
||||
#
|
||||
# This code detects and auto-expires them on refresh grant
|
||||
# requests.
|
||||
if request.POST.get('grant_type') == 'refresh_token' and 'refresh_token' in request.POST:
|
||||
refresh_token = RefreshToken.objects.filter(
|
||||
token=request.POST['refresh_token']
|
||||
).first()
|
||||
if refresh_token:
|
||||
expire_seconds = settings.OAUTH2_PROVIDER.get('REFRESH_TOKEN_EXPIRE_SECONDS', 0)
|
||||
if refresh_token.created + timedelta(seconds=expire_seconds) < now():
|
||||
return request.build_absolute_uri(), {}, 'The refresh token has expired.', '403'
|
||||
try:
|
||||
return super(TokenView, self).create_token_response(request)
|
||||
except oauth2.AccessDeniedError as e:
|
||||
|
||||
@@ -1092,7 +1092,7 @@ class UserRolesList(SubListAttachDetachAPIView):
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
if role.content_type == credential_content_type:
|
||||
if role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -4415,7 +4415,7 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
|
||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||
if role.content_type == credential_content_type:
|
||||
if role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
data = dict(msg=_("You cannot grant credential access to a user not in the credentials' organization"))
|
||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
@@ -172,9 +172,9 @@ class URLField(CharField):
|
||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||
if url_parts.username:
|
||||
if url_parts.password:
|
||||
netloc = '{}:{}@{}' % (url_parts.username, url_parts.password, netloc)
|
||||
netloc = '{}:{}@{}'.format(url_parts.username, url_parts.password, netloc)
|
||||
else:
|
||||
netloc = '{}@{}' % (url_parts.username, netloc)
|
||||
netloc = '{}@{}'.format(url_parts.username, netloc)
|
||||
value = urlparse.urlunsplit([url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment])
|
||||
except Exception:
|
||||
raise # If something fails here, just fall through and let the validators check it.
|
||||
|
||||
@@ -410,7 +410,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
field = self.registry.get_setting_field(name)
|
||||
if field.read_only:
|
||||
logger.warning('Attempt to set read only setting "%s".', name)
|
||||
raise ImproperlyConfigured('Setting "%s" is read only.'.format(name))
|
||||
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
||||
|
||||
try:
|
||||
data = field.to_representation(value)
|
||||
@@ -441,7 +441,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
field = self.registry.get_setting_field(name)
|
||||
if field.read_only:
|
||||
logger.warning('Attempt to delete read only setting "%s".', name)
|
||||
raise ImproperlyConfigured('Setting "%s" is read only.'.format(name))
|
||||
raise ImproperlyConfigured('Setting "{}" is read only.'.format(name))
|
||||
for setting in Setting.objects.filter(key=name, user__isnull=True):
|
||||
setting.delete()
|
||||
# pre_delete handler will delete from cache.
|
||||
|
||||
@@ -325,17 +325,3 @@ def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting
|
||||
)
|
||||
assert response.data['FOO_BAR'] == 23
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_logging_test(api_request):
|
||||
with mock.patch('awx.conf.views.AWXProxyHandler.perform_test') as mock_func:
|
||||
api_request(
|
||||
'post',
|
||||
reverse('api:setting_logging_test'),
|
||||
data={'LOG_AGGREGATOR_HOST': 'http://foobar', 'LOG_AGGREGATOR_TYPE': 'logstash'}
|
||||
)
|
||||
call = mock_func.call_args_list[0]
|
||||
args, kwargs = call
|
||||
given_settings = kwargs['custom_settings']
|
||||
assert given_settings.LOG_AGGREGATOR_HOST == 'http://foobar'
|
||||
assert given_settings.LOG_AGGREGATOR_TYPE == 'logstash'
|
||||
|
||||
@@ -3,7 +3,11 @@
|
||||
|
||||
# Python
|
||||
import collections
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
import socket
|
||||
from socket import SHUT_RDWR
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@@ -11,7 +15,7 @@ from django.http import Http404
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import PermissionDenied, ValidationError
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import serializers
|
||||
from rest_framework import status
|
||||
@@ -26,7 +30,6 @@ from awx.api.generics import (
|
||||
from awx.api.permissions import IsSuperUser
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.main.tasks import handle_setting_changes
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
@@ -161,40 +164,53 @@ class SettingLoggingTest(GenericAPIView):
|
||||
filter_backends = []
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
defaults = dict()
|
||||
for key in settings_registry.get_registered_settings(category_slug='logging'):
|
||||
try:
|
||||
defaults[key] = settings_registry.get_setting_field(key).get_default()
|
||||
except serializers.SkipField:
|
||||
defaults[key] = None
|
||||
obj = type('Settings', (object,), defaults)()
|
||||
serializer = self.get_serializer(obj, data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
# Special validation specific to logging test.
|
||||
errors = {}
|
||||
for key in ['LOG_AGGREGATOR_TYPE', 'LOG_AGGREGATOR_HOST']:
|
||||
if not request.data.get(key, ''):
|
||||
errors[key] = 'This field is required.'
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
if request.data.get('LOG_AGGREGATOR_PASSWORD', '').startswith('$encrypted$'):
|
||||
serializer.validated_data['LOG_AGGREGATOR_PASSWORD'] = getattr(
|
||||
settings, 'LOG_AGGREGATOR_PASSWORD', ''
|
||||
)
|
||||
# Error if logging is not enabled
|
||||
enabled = getattr(settings, 'LOG_AGGREGATOR_ENABLED', False)
|
||||
if not enabled:
|
||||
return Response({'error': 'Logging not enabled'}, status=status.HTTP_409_CONFLICT)
|
||||
|
||||
# Send test message to configured logger based on db settings
|
||||
try:
|
||||
default_logger = settings.LOG_AGGREGATOR_LOGGERS[0]
|
||||
if default_logger != 'awx':
|
||||
default_logger = f'awx.analytics.{default_logger}'
|
||||
except IndexError:
|
||||
default_logger = 'awx'
|
||||
logging.getLogger(default_logger).error('AWX Connection Test Message')
|
||||
|
||||
hostname = getattr(settings, 'LOG_AGGREGATOR_HOST', None)
|
||||
protocol = getattr(settings, 'LOG_AGGREGATOR_PROTOCOL', None)
|
||||
|
||||
try:
|
||||
class MockSettings:
|
||||
pass
|
||||
mock_settings = MockSettings()
|
||||
for k, v in serializer.validated_data.items():
|
||||
setattr(mock_settings, k, v)
|
||||
AWXProxyHandler().perform_test(custom_settings=mock_settings)
|
||||
if mock_settings.LOG_AGGREGATOR_PROTOCOL.upper() == 'UDP':
|
||||
return Response(status=status.HTTP_201_CREATED)
|
||||
except LoggingConnectivityException as e:
|
||||
return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
return Response(status=status.HTTP_200_OK)
|
||||
subprocess.check_output(
|
||||
['rsyslogd', '-N1', '-f', '/var/lib/awx/rsyslog/rsyslog.conf'],
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
return Response({'error': exc.output}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
# Check to ensure port is open at host
|
||||
if protocol in ['udp', 'tcp']:
|
||||
port = getattr(settings, 'LOG_AGGREGATOR_PORT', None)
|
||||
# Error if port is not set when using UDP/TCP
|
||||
if not port:
|
||||
return Response({'error': 'Port required for ' + protocol}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if http/https by this point, domain is reacheable
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
|
||||
if protocol == 'udp':
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
else:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
s.settimeout(.5)
|
||||
s.connect((hostname, int(port)))
|
||||
s.shutdown(SHUT_RDWR)
|
||||
s.close()
|
||||
return Response(status=status.HTTP_202_ACCEPTED)
|
||||
except Exception as e:
|
||||
return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
# Create view functions for all of the class-based views to simplify inclusion
|
||||
|
||||
@@ -11,7 +11,6 @@ from functools import reduce
|
||||
from django.conf import settings
|
||||
from django.db.models import Q, Prefetch
|
||||
from django.contrib.auth.models import User
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
@@ -405,14 +404,6 @@ class BaseAccess(object):
|
||||
# Cannot copy manual project without errors
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, Group): # TODO: remove in 3.3
|
||||
try:
|
||||
if obj.deprecated_inventory_source and not obj.deprecated_inventory_source._can_update():
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
|
||||
user_capabilities[display_method] = False
|
||||
continue
|
||||
elif display_method in ['start', 'schedule'] and isinstance(obj, (Project)):
|
||||
if obj.scm_type == '':
|
||||
user_capabilities[display_method] = False
|
||||
@@ -650,8 +641,8 @@ class UserAccess(BaseAccess):
|
||||
# in these cases only superusers can modify orphan users
|
||||
return False
|
||||
return not obj.roles.all().exclude(
|
||||
content_type=ContentType.objects.get_for_model(User)
|
||||
).filter(ancestors__in=self.user.roles.all()).exists()
|
||||
ancestors__in=self.user.roles.all()
|
||||
).exists()
|
||||
else:
|
||||
return self.is_all_org_admin(obj)
|
||||
|
||||
@@ -1434,7 +1425,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
Users who are able to create deploy jobs can also run normal and check (dry run) jobs.
|
||||
'''
|
||||
if not data: # So the browseable API will work
|
||||
return Organization.accessible_objects(self.user, 'job_template_admin_role').exists()
|
||||
return Project.accessible_objects(self.user, 'use_role').exists()
|
||||
|
||||
# if reference_obj is provided, determine if it can be copied
|
||||
reference_obj = data.get('reference_obj', None)
|
||||
@@ -1503,11 +1494,6 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
|
||||
if data is None:
|
||||
return True
|
||||
|
||||
# standard type of check for organization - cannot change the value
|
||||
# unless posessing the respective job_template_admin_role, otherwise non-blocking
|
||||
if not self.check_related('organization', Organization, data, obj=obj, role_field='job_template_admin_role'):
|
||||
return False
|
||||
|
||||
data = dict(data)
|
||||
|
||||
if self.changes_are_non_sensitive(obj, data):
|
||||
|
||||
@@ -11,6 +11,7 @@ from prometheus_client import (
|
||||
Counter,
|
||||
Enum,
|
||||
CollectorRegistry,
|
||||
parser,
|
||||
)
|
||||
|
||||
from django.conf import settings
|
||||
@@ -30,6 +31,11 @@ def now_seconds():
|
||||
return dt_to_seconds(datetime.datetime.now())
|
||||
|
||||
|
||||
def safe_name(s):
|
||||
# Replace all non alpha-numeric characters with _
|
||||
return re.sub('[^0-9a-zA-Z]+', '_', s)
|
||||
|
||||
|
||||
# Second granularity; Per-minute
|
||||
class FixedSlidingWindow():
|
||||
def __init__(self, start_time=None):
|
||||
@@ -38,8 +44,8 @@ class FixedSlidingWindow():
|
||||
|
||||
def cleanup(self, now_bucket=None):
|
||||
now_bucket = now_bucket or now_seconds()
|
||||
if self.start_time + 60 <= now_bucket:
|
||||
self.start_time = now_bucket + 60 + 1
|
||||
if self.start_time + 60 < now_bucket:
|
||||
self.start_time = now_bucket - 60
|
||||
|
||||
# Delete old entries
|
||||
for k in list(self.buckets.keys()):
|
||||
@@ -47,16 +53,15 @@ class FixedSlidingWindow():
|
||||
del self.buckets[k]
|
||||
|
||||
def record(self, ts=None):
|
||||
ts = ts or datetime.datetime.now()
|
||||
now_bucket = int((ts - datetime.datetime(1970,1,1)).total_seconds())
|
||||
now_bucket = ts or dt_to_seconds(datetime.datetime.now())
|
||||
|
||||
val = self.buckets.get(now_bucket, 0)
|
||||
self.buckets[now_bucket] = val + 1
|
||||
|
||||
self.cleanup(now_bucket)
|
||||
|
||||
def render(self):
|
||||
self.cleanup()
|
||||
def render(self, ts=None):
|
||||
self.cleanup(now_bucket=ts)
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
@@ -99,7 +104,8 @@ class BroadcastWebsocketStatsManager():
|
||||
Stringified verion of all the stats
|
||||
'''
|
||||
redis_conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
return redis_conn.get(BROADCAST_WEBSOCKET_REDIS_KEY_NAME)
|
||||
stats_str = redis_conn.get(BROADCAST_WEBSOCKET_REDIS_KEY_NAME) or b''
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats():
|
||||
@@ -109,8 +115,8 @@ class BroadcastWebsocketStats():
|
||||
self._registry = CollectorRegistry()
|
||||
|
||||
# TODO: More robust replacement
|
||||
self.name = self.safe_name(self._local_hostname)
|
||||
self.remote_name = self.safe_name(self._remote_hostname)
|
||||
self.name = safe_name(self._local_hostname)
|
||||
self.remote_name = safe_name(self._remote_hostname)
|
||||
|
||||
self._messages_received_total = Counter(f'awx_{self.remote_name}_messages_received_total',
|
||||
'Number of messages received, to be forwarded, by the broadcast websocket system',
|
||||
@@ -122,6 +128,7 @@ class BroadcastWebsocketStats():
|
||||
'Websocket broadcast connection',
|
||||
states=['disconnected', 'connected'],
|
||||
registry=self._registry)
|
||||
self._connection.state('disconnected')
|
||||
self._connection_start = Gauge(f'awx_{self.remote_name}_connection_start',
|
||||
'Time the connection was established',
|
||||
registry=self._registry)
|
||||
@@ -131,10 +138,6 @@ class BroadcastWebsocketStats():
|
||||
registry=self._registry)
|
||||
self._internal_messages_received_per_minute = FixedSlidingWindow()
|
||||
|
||||
def safe_name(self, s):
|
||||
# Replace all non alpha-numeric characters with _
|
||||
return re.sub('[^0-9a-zA-Z]+', '_', s)
|
||||
|
||||
def unregister(self):
|
||||
self._registry.unregister(f'awx_{self.remote_name}_messages_received')
|
||||
self._registry.unregister(f'awx_{self.remote_name}_connection')
|
||||
|
||||
@@ -122,22 +122,27 @@ def cred_type_counts(since):
|
||||
return counts
|
||||
|
||||
|
||||
@register('inventory_counts', '1.0')
|
||||
@register('inventory_counts', '1.2')
|
||||
def inventory_counts(since):
|
||||
counts = {}
|
||||
for inv in models.Inventory.objects.filter(kind='').annotate(num_sources=Count('inventory_sources', distinct=True),
|
||||
num_hosts=Count('hosts', distinct=True)).only('id', 'name', 'kind'):
|
||||
source_list = []
|
||||
for source in inv.inventory_sources.filter().annotate(num_hosts=Count('hosts', distinct=True)).values('name','source', 'num_hosts'):
|
||||
source_list.append(source)
|
||||
counts[inv.id] = {'name': inv.name,
|
||||
'kind': inv.kind,
|
||||
'hosts': inv.num_hosts,
|
||||
'sources': inv.num_sources
|
||||
'sources': inv.num_sources,
|
||||
'source_list': source_list
|
||||
}
|
||||
|
||||
for smart_inv in models.Inventory.objects.filter(kind='smart'):
|
||||
counts[smart_inv.id] = {'name': smart_inv.name,
|
||||
'kind': smart_inv.kind,
|
||||
'num_hosts': smart_inv.hosts.count(),
|
||||
'num_sources': smart_inv.inventory_sources.count()
|
||||
'hosts': smart_inv.hosts.count(),
|
||||
'sources': 0,
|
||||
'source_list': []
|
||||
}
|
||||
return counts
|
||||
|
||||
@@ -222,10 +227,12 @@ def query_info(since, collection_type):
|
||||
|
||||
|
||||
# Copies Job Events from db to a .csv to be shipped
|
||||
@table_version('events_table.csv', '1.0')
|
||||
@table_version('events_table.csv', '1.1')
|
||||
@table_version('unified_jobs_table.csv', '1.0')
|
||||
@table_version('unified_job_template_table.csv', '1.0')
|
||||
def copy_tables(since, full_path):
|
||||
@table_version('workflow_job_node_table.csv', '1.0')
|
||||
@table_version('workflow_job_template_node_table.csv', '1.0')
|
||||
def copy_tables(since, full_path, subset=None):
|
||||
def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = open(file_path, 'w', encoding='utf-8')
|
||||
@@ -249,10 +256,16 @@ def copy_tables(since, full_path):
|
||||
main_jobevent.job_id,
|
||||
main_jobevent.host_id,
|
||||
main_jobevent.host_name
|
||||
, CAST(main_jobevent.event_data::json->>'start' AS TIMESTAMP WITH TIME ZONE) AS start,
|
||||
CAST(main_jobevent.event_data::json->>'end' AS TIMESTAMP WITH TIME ZONE) AS end,
|
||||
main_jobevent.event_data::json->'duration' AS duration,
|
||||
main_jobevent.event_data::json->'res'->'warnings' AS warnings,
|
||||
main_jobevent.event_data::json->'res'->'deprecations' AS deprecations
|
||||
FROM main_jobevent
|
||||
WHERE main_jobevent.created > {}
|
||||
ORDER BY main_jobevent.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='events', query=events_query, path=full_path)
|
||||
if not subset or 'events' in subset:
|
||||
_copy_table(table='events', query=events_query, path=full_path)
|
||||
|
||||
unified_job_query = '''COPY (SELECT main_unifiedjob.id,
|
||||
main_unifiedjob.polymorphic_ctype_id,
|
||||
@@ -276,11 +289,12 @@ def copy_tables(since, full_path):
|
||||
main_unifiedjob.instance_group_id
|
||||
FROM main_unifiedjob
|
||||
JOIN django_content_type ON main_unifiedjob.polymorphic_ctype_id = django_content_type.id
|
||||
JOIN main_organization ON main_organization.id = main_unifiedjob.organization_id
|
||||
WHERE main_unifiedjob.created > {}
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
LEFT JOIN main_organization ON main_organization.id = main_unifiedjob.organization_id
|
||||
WHERE (main_unifiedjob.created > {0} OR main_unifiedjob.finished > {0})
|
||||
AND main_unifiedjob.launch_type != 'sync'
|
||||
ORDER BY main_unifiedjob.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
if not subset or 'unified_jobs' in subset:
|
||||
_copy_table(table='unified_jobs', query=unified_job_query, path=full_path)
|
||||
|
||||
unified_job_template_query = '''COPY (SELECT main_unifiedjobtemplate.id,
|
||||
main_unifiedjobtemplate.polymorphic_ctype_id,
|
||||
@@ -299,6 +313,71 @@ def copy_tables(since, full_path):
|
||||
main_unifiedjobtemplate.status
|
||||
FROM main_unifiedjobtemplate, django_content_type
|
||||
WHERE main_unifiedjobtemplate.polymorphic_ctype_id = django_content_type.id
|
||||
ORDER BY main_unifiedjobtemplate.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
_copy_table(table='unified_job_template', query=unified_job_template_query, path=full_path)
|
||||
ORDER BY main_unifiedjobtemplate.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
if not subset or 'unified_job_template' in subset:
|
||||
_copy_table(table='unified_job_template', query=unified_job_template_query, path=full_path)
|
||||
|
||||
workflow_job_node_query = '''COPY (SELECT main_workflowjobnode.id,
|
||||
main_workflowjobnode.created,
|
||||
main_workflowjobnode.modified,
|
||||
main_workflowjobnode.job_id,
|
||||
main_workflowjobnode.unified_job_template_id,
|
||||
main_workflowjobnode.workflow_job_id,
|
||||
main_workflowjobnode.inventory_id,
|
||||
success_nodes.nodes AS success_nodes,
|
||||
failure_nodes.nodes AS failure_nodes,
|
||||
always_nodes.nodes AS always_nodes,
|
||||
main_workflowjobnode.do_not_run,
|
||||
main_workflowjobnode.all_parents_must_converge
|
||||
FROM main_workflowjobnode
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobnode_id, ARRAY_AGG(to_workflowjobnode_id) AS nodes
|
||||
FROM main_workflowjobnode_success_nodes
|
||||
GROUP BY from_workflowjobnode_id
|
||||
) success_nodes ON main_workflowjobnode.id = success_nodes.from_workflowjobnode_id
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobnode_id, ARRAY_AGG(to_workflowjobnode_id) AS nodes
|
||||
FROM main_workflowjobnode_failure_nodes
|
||||
GROUP BY from_workflowjobnode_id
|
||||
) failure_nodes ON main_workflowjobnode.id = failure_nodes.from_workflowjobnode_id
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobnode_id, ARRAY_AGG(to_workflowjobnode_id) AS nodes
|
||||
FROM main_workflowjobnode_always_nodes
|
||||
GROUP BY from_workflowjobnode_id
|
||||
) always_nodes ON main_workflowjobnode.id = always_nodes.from_workflowjobnode_id
|
||||
WHERE main_workflowjobnode.modified > {}
|
||||
ORDER BY main_workflowjobnode.id ASC) TO STDOUT WITH CSV HEADER'''.format(since.strftime("'%Y-%m-%d %H:%M:%S'"))
|
||||
if not subset or 'workflow_job_node' in subset:
|
||||
_copy_table(table='workflow_job_node', query=workflow_job_node_query, path=full_path)
|
||||
|
||||
workflow_job_template_node_query = '''COPY (SELECT main_workflowjobtemplatenode.id,
|
||||
main_workflowjobtemplatenode.created,
|
||||
main_workflowjobtemplatenode.modified,
|
||||
main_workflowjobtemplatenode.unified_job_template_id,
|
||||
main_workflowjobtemplatenode.workflow_job_template_id,
|
||||
main_workflowjobtemplatenode.inventory_id,
|
||||
success_nodes.nodes AS success_nodes,
|
||||
failure_nodes.nodes AS failure_nodes,
|
||||
always_nodes.nodes AS always_nodes,
|
||||
main_workflowjobtemplatenode.all_parents_must_converge
|
||||
FROM main_workflowjobtemplatenode
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobtemplatenode_id, ARRAY_AGG(to_workflowjobtemplatenode_id) AS nodes
|
||||
FROM main_workflowjobtemplatenode_success_nodes
|
||||
GROUP BY from_workflowjobtemplatenode_id
|
||||
) success_nodes ON main_workflowjobtemplatenode.id = success_nodes.from_workflowjobtemplatenode_id
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobtemplatenode_id, ARRAY_AGG(to_workflowjobtemplatenode_id) AS nodes
|
||||
FROM main_workflowjobtemplatenode_failure_nodes
|
||||
GROUP BY from_workflowjobtemplatenode_id
|
||||
) failure_nodes ON main_workflowjobtemplatenode.id = failure_nodes.from_workflowjobtemplatenode_id
|
||||
LEFT JOIN (
|
||||
SELECT from_workflowjobtemplatenode_id, ARRAY_AGG(to_workflowjobtemplatenode_id) AS nodes
|
||||
FROM main_workflowjobtemplatenode_always_nodes
|
||||
GROUP BY from_workflowjobtemplatenode_id
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
if not subset or 'workflow_job_template_node' in subset:
|
||||
_copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
return
|
||||
|
||||
@@ -667,7 +667,7 @@ register(
|
||||
allow_blank=True,
|
||||
default='',
|
||||
label=_('Logging Aggregator Username'),
|
||||
help_text=_('Username for external log aggregator (if required).'),
|
||||
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
@@ -679,7 +679,7 @@ register(
|
||||
default='',
|
||||
encrypted=True,
|
||||
label=_('Logging Aggregator Password/Token'),
|
||||
help_text=_('Password or authentication token for external log aggregator (if required).'),
|
||||
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
@@ -787,6 +787,39 @@ register(
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistance for external log aggregation (in GB)'),
|
||||
help_text=_('Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_PATH',
|
||||
field_class=fields.CharField,
|
||||
default='/var/lib/awx',
|
||||
label=_('File system location for rsyslogd disk persistence'),
|
||||
help_text=_('Location to persist logs that should be retried after an outage '
|
||||
'of the external log aggregator (defaults to /var/lib/awx). '
|
||||
'Equivalent to the rsyslogd queue.spoolDirectory setting.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_RSYSLOGD_DEBUG',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable rsyslogd debugging'),
|
||||
help_text=_('Enabled high verbosity debugging for rsyslogd. '
|
||||
'Useful for debugging connection issues for external log aggregation.'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
|
||||
@@ -38,7 +38,7 @@ ENV_BLACKLIST = frozenset((
|
||||
'AD_HOC_COMMAND_ID', 'REST_API_URL', 'REST_API_TOKEN', 'MAX_EVENT_RES',
|
||||
'CALLBACK_QUEUE', 'CALLBACK_CONNECTION', 'CACHE',
|
||||
'JOB_CALLBACK_DEBUG', 'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST', 'PROJECT_REVISION'
|
||||
'AWX_HOST', 'PROJECT_REVISION', 'SUPERVISOR_WEB_CONFIG_PATH'
|
||||
))
|
||||
|
||||
# loggers that may be called in process of emitting a log
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import logging
|
||||
import datetime
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
|
||||
@@ -29,7 +29,7 @@ class WebsocketSecretAuthHelper:
|
||||
|
||||
@classmethod
|
||||
def construct_secret(cls):
|
||||
nonce_serialized = "{}".format(int((datetime.datetime.utcnow() - datetime.datetime.fromtimestamp(0)).total_seconds()))
|
||||
nonce_serialized = f"{int(time.time())}"
|
||||
payload_dict = {
|
||||
'secret': settings.BROADCAST_WEBSOCKET_SECRET,
|
||||
'nonce': nonce_serialized
|
||||
@@ -70,10 +70,12 @@ class WebsocketSecretAuthHelper:
|
||||
raise ValueError("Invalid secret")
|
||||
|
||||
# Avoid timing attack and check the nonce after all the heavy lifting
|
||||
now = datetime.datetime.utcnow()
|
||||
nonce_parsed = datetime.datetime.fromtimestamp(int(nonce_parsed))
|
||||
if (now - nonce_parsed).total_seconds() > nonce_tolerance:
|
||||
raise ValueError("Potential replay attack or machine(s) time out of sync.")
|
||||
now = int(time.time())
|
||||
nonce_parsed = int(nonce_parsed)
|
||||
nonce_diff = now - nonce_parsed
|
||||
if abs(nonce_diff) > nonce_tolerance:
|
||||
logger.warn(f"Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
raise ValueError("Potential replay attack or machine(s) time out of sync by {nonce_diff} seconds.")
|
||||
|
||||
return True
|
||||
|
||||
@@ -93,19 +95,17 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
except Exception:
|
||||
# TODO: log ip of connected client
|
||||
logger.warn("Broadcast client failed to authorize.")
|
||||
logger.warn(f"client '{self.channel_name}' failed to authorize against the broadcast endpoint.")
|
||||
await self.close()
|
||||
return
|
||||
|
||||
# TODO: log ip of connected client
|
||||
logger.info(f"Broadcast client connected.")
|
||||
await self.accept()
|
||||
await self.channel_layer.group_add(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
logger.info(f"client '{self.channel_name}' joined the broadcast group.")
|
||||
|
||||
async def disconnect(self, code):
|
||||
# TODO: log ip of disconnected client
|
||||
logger.info("Client disconnected")
|
||||
logger.info("client '{self.channel_name}' disconnected from the broadcast group.")
|
||||
await self.channel_layer.group_discard(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
@@ -130,6 +130,14 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
await self.send_json({"close": True})
|
||||
await self.close()
|
||||
|
||||
async def disconnect(self, code):
|
||||
current_groups = set(self.scope['session'].pop('groups') if 'groups' in self.scope['session'] else [])
|
||||
for group_name in current_groups:
|
||||
await self.channel_layer.group_discard(
|
||||
group_name,
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@@ -187,7 +195,6 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
group_name,
|
||||
self.channel_name
|
||||
)
|
||||
logger.debug(f"Channel {self.channel_name} left groups {old_groups} and joined {new_groups_exclusive}")
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({
|
||||
"groups_current": list(new_groups),
|
||||
@@ -213,31 +220,6 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
async def emit_channel_notification_async(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
|
||||
channel_layer = get_channel_layer()
|
||||
await channel_layer.group_send(
|
||||
group,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": payload_dumped
|
||||
},
|
||||
)
|
||||
|
||||
await channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
|
||||
@@ -32,14 +32,33 @@ base_inputs = {
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'help_text': _('The CA certificate used to verify the SSL certificate of the Vault server')
|
||||
}],
|
||||
}, {
|
||||
'id': 'role_id',
|
||||
'label': _('AppRole role_id'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _('The Role ID for AppRole Authentication')
|
||||
}, {
|
||||
'id': 'secret_id',
|
||||
'label': _('AppRole secret_id'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication')
|
||||
}
|
||||
],
|
||||
'metadata': [{
|
||||
'id': 'secret_path',
|
||||
'label': _('Path to Secret'),
|
||||
'type': 'string',
|
||||
'help_text': _('The path to the secret stored in the secret backend e.g, /some/secret/')
|
||||
},{
|
||||
'id': 'auth_path',
|
||||
'label': _('Path to Auth'),
|
||||
'type': 'string',
|
||||
'help_text': _('The path where the Authentication method is mounted e.g, approle')
|
||||
}],
|
||||
'required': ['url', 'token', 'secret_path'],
|
||||
'required': ['url', 'secret_path'],
|
||||
}
|
||||
|
||||
hashi_kv_inputs = copy.deepcopy(base_inputs)
|
||||
@@ -88,8 +107,43 @@ hashi_ssh_inputs['metadata'] = [{
|
||||
hashi_ssh_inputs['required'].extend(['public_key', 'role'])
|
||||
|
||||
|
||||
def handle_auth(**kwargs):
|
||||
token = None
|
||||
|
||||
if kwargs.get('token'):
|
||||
token = kwargs['token']
|
||||
elif kwargs.get('role_id') and kwargs.get('secret_id'):
|
||||
token = approle_auth(**kwargs)
|
||||
else:
|
||||
raise Exception('Either token or AppRole parameters must be set')
|
||||
|
||||
return token
|
||||
|
||||
|
||||
def approle_auth(**kwargs):
|
||||
role_id = kwargs['role_id']
|
||||
secret_id = kwargs['secret_id']
|
||||
auth_path = kwargs.get('auth_path') or 'approle'
|
||||
|
||||
url = urljoin(kwargs['url'], 'v1')
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# AppRole Login
|
||||
request_kwargs['json'] = {'role_id': role_id, 'secret_id': secret_id}
|
||||
sess = requests.Session()
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
|
||||
|
||||
def kv_backend(**kwargs):
|
||||
token = kwargs['token']
|
||||
token = handle_auth(**kwargs)
|
||||
url = kwargs['url']
|
||||
secret_path = kwargs['secret_path']
|
||||
secret_backend = kwargs.get('secret_backend', None)
|
||||
@@ -144,7 +198,7 @@ def kv_backend(**kwargs):
|
||||
|
||||
|
||||
def ssh_backend(**kwargs):
|
||||
token = kwargs['token']
|
||||
token = handle_auth(**kwargs)
|
||||
url = urljoin(kwargs['url'], 'v1')
|
||||
secret_path = kwargs['secret_path']
|
||||
role = kwargs['role']
|
||||
|
||||
@@ -22,7 +22,7 @@ class Scheduler(Scheduler):
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warn(f'periodic beat started')
|
||||
logger.warn('periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
|
||||
@@ -118,9 +118,14 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
for e in events:
|
||||
try:
|
||||
if (
|
||||
isinstance(exc, IntegrityError),
|
||||
isinstance(exc, IntegrityError) and
|
||||
getattr(e, 'host_id', '')
|
||||
):
|
||||
# this is one potential IntegrityError we can
|
||||
|
||||
@@ -268,13 +268,6 @@ class IsolatedManager(object):
|
||||
# in the final sync
|
||||
self.consume_events()
|
||||
|
||||
# emit an EOF event
|
||||
event_data = {
|
||||
'event': 'EOF',
|
||||
'final_counter': len(self.handled_events)
|
||||
}
|
||||
self.event_handler(event_data)
|
||||
|
||||
return status, rc
|
||||
|
||||
def consume_events(self):
|
||||
@@ -287,7 +280,7 @@ class IsolatedManager(object):
|
||||
if os.path.exists(events_path):
|
||||
for event in set(os.listdir(events_path)) - self.handled_events:
|
||||
path = os.path.join(events_path, event)
|
||||
if os.path.exists(path):
|
||||
if os.path.exists(path) and os.path.isfile(path):
|
||||
try:
|
||||
event_data = json.load(
|
||||
open(os.path.join(events_path, event), 'r')
|
||||
@@ -420,8 +413,4 @@ class IsolatedManager(object):
|
||||
status, rc = self.dispatch(playbook, module, module_args)
|
||||
if status == 'successful':
|
||||
status, rc = self.check()
|
||||
else:
|
||||
# emit an EOF event
|
||||
event_data = {'event': 'EOF', 'final_counter': 0}
|
||||
self.event_handler(event_data)
|
||||
return status, rc
|
||||
|
||||
@@ -169,7 +169,7 @@ class AnsibleInventoryLoader(object):
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source)]
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
@@ -496,12 +496,6 @@ class Command(BaseCommand):
|
||||
group_names = all_group_names[offset:(offset + self._batch_size)]
|
||||
for group_pk in groups_qs.filter(name__in=group_names).values_list('pk', flat=True):
|
||||
del_group_pks.discard(group_pk)
|
||||
if self.inventory_source.deprecated_group_id in del_group_pks: # TODO: remove in 3.3
|
||||
logger.warning(
|
||||
'Group "%s" from v1 API is not deleted by overwrite',
|
||||
self.inventory_source.deprecated_group.name
|
||||
)
|
||||
del_group_pks.discard(self.inventory_source.deprecated_group_id)
|
||||
# Now delete all remaining groups in batches.
|
||||
all_del_pks = sorted(list(del_group_pks))
|
||||
for offset in range(0, len(all_del_pks), self._batch_size):
|
||||
@@ -534,12 +528,6 @@ class Command(BaseCommand):
|
||||
# Set of all host pks managed by this inventory source
|
||||
all_source_host_pks = self._existing_host_pks()
|
||||
for db_group in db_groups.all():
|
||||
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
|
||||
logger.debug(
|
||||
'Group "%s" from v1 API child group/host connections preserved',
|
||||
db_group.name
|
||||
)
|
||||
continue
|
||||
# Delete child group relationships not present in imported data.
|
||||
db_children = db_group.children
|
||||
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
|
||||
|
||||
@@ -7,7 +7,6 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.utils.handlers import AWXProxyHandler
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
@@ -56,11 +55,6 @@ class Command(BaseCommand):
|
||||
reaper.reap()
|
||||
consumer = None
|
||||
|
||||
# don't ship external logs inside the dispatcher's parent process
|
||||
# this exists to work around a race condition + deadlock bug on fork
|
||||
# in cpython itself:
|
||||
# https://bugs.python.org/issue37429
|
||||
AWXProxyHandler.disable()
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
consumer = AWXConsumerPG(
|
||||
|
||||
@@ -2,10 +2,20 @@
|
||||
# All Rights Reserved.
|
||||
import logging
|
||||
import asyncio
|
||||
import datetime
|
||||
import re
|
||||
import redis
|
||||
from datetime import datetime as dt
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db.models import Q
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
@@ -14,7 +24,106 @@ logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the websocket broadcaster'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true',
|
||||
help='print the internal state of any running broadcast websocket')
|
||||
|
||||
@classmethod
|
||||
def display_len(cls, s):
|
||||
return len(re.sub('\x1b.*?m', '', s))
|
||||
|
||||
@classmethod
|
||||
def _format_lines(cls, host_stats, padding=5):
|
||||
widths = [0 for i in host_stats[0]]
|
||||
for entry in host_stats:
|
||||
for i, e in enumerate(entry):
|
||||
if Command.display_len(e) > widths[i]:
|
||||
widths[i] = Command.display_len(e)
|
||||
paddings = [padding for i in widths]
|
||||
|
||||
lines = []
|
||||
for entry in host_stats:
|
||||
line = ""
|
||||
for pad, width, value in zip(paddings, widths, entry):
|
||||
if len(value) > Command.display_len(value):
|
||||
width += len(value) - Command.display_len(value)
|
||||
total_width = width + pad
|
||||
line += f'{value:{total_width}}'
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
@classmethod
|
||||
def get_connection_status(cls, me, hostnames, data):
|
||||
host_stats = [('hostname', 'state', 'start time', 'duration (sec)')]
|
||||
for h in hostnames:
|
||||
connection_color = '91' # red
|
||||
h_safe = safe_name(h)
|
||||
prefix = f'awx_{h_safe}'
|
||||
connection_state = data.get(f'{prefix}_connection', 'N/A')
|
||||
connection_started = 'N/A'
|
||||
connection_duration = 'N/A'
|
||||
if connection_state is None:
|
||||
connection_state = 'unknown'
|
||||
if connection_state == 'connected':
|
||||
connection_color = '92' # green
|
||||
connection_started = data.get(f'{prefix}_connection_start', 'Error')
|
||||
if connection_started != 'Error':
|
||||
connection_started = datetime.datetime.fromtimestamp(connection_started)
|
||||
connection_duration = int((dt.now() - connection_started).total_seconds())
|
||||
|
||||
connection_state = f'\033[{connection_color}m{connection_state}\033[0m'
|
||||
|
||||
host_stats.append((h, connection_state, str(connection_started), str(connection_duration)))
|
||||
|
||||
return host_stats
|
||||
|
||||
@classmethod
|
||||
def get_connection_stats(cls, me, hostnames, data):
|
||||
host_stats = [('hostname', 'total', 'per minute')]
|
||||
for h in hostnames:
|
||||
h_safe = safe_name(h)
|
||||
prefix = f'awx_{h_safe}'
|
||||
messages_total = data.get(f'{prefix}_messages_received', '0')
|
||||
messages_per_minute = data.get(f'{prefix}_messages_received_per_minute', '0')
|
||||
|
||||
host_stats.append((h, str(int(messages_total)), str(int(messages_per_minute))))
|
||||
|
||||
return host_stats
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
for family in stats_all:
|
||||
if family.type == 'gauge' and len(family.samples) > 1:
|
||||
for sample in family.samples:
|
||||
if sample.value >= 1:
|
||||
data[family.name] = sample.labels[family.name]
|
||||
break
|
||||
else:
|
||||
data[family.name] = family.samples[0].value
|
||||
me = Instance.objects.me()
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(Q(hostname=me.hostname) | Q(rampart_groups__controller__isnull=False))]
|
||||
|
||||
host_stats = Command.get_connection_status(me, hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{me.hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(me, hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{me.hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
@@ -121,6 +121,17 @@ class InstanceManager(models.Manager):
|
||||
if not hostname:
|
||||
hostname = settings.CLUSTER_HOST_ID
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
# if one exists, set it to None
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = None
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
|
||||
instance = self.filter(hostname=hostname)
|
||||
if instance.exists():
|
||||
instance = instance.get()
|
||||
|
||||
@@ -12,23 +12,19 @@ import urllib.parse
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models.signals import post_save
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db import IntegrityError, connection
|
||||
from django.utils.functional import curry
|
||||
from django.db import connection
|
||||
from django.shortcuts import get_object_or_404, redirect
|
||||
from django.apps import apps
|
||||
from django.utils.deprecation import MiddlewareMixin
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.urls import reverse, resolve
|
||||
|
||||
from awx.main.models import ActivityStream
|
||||
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
||||
from awx.conf import fields, register
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.middleware')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
perf_logger = logging.getLogger('awx.analytics.performance')
|
||||
|
||||
|
||||
@@ -76,61 +72,6 @@ class TimingMiddleware(threading.local, MiddlewareMixin):
|
||||
return filepath
|
||||
|
||||
|
||||
class ActivityStreamMiddleware(threading.local, MiddlewareMixin):
|
||||
|
||||
def __init__(self, get_response=None):
|
||||
self.disp_uid = None
|
||||
self.instance_ids = []
|
||||
super().__init__(get_response)
|
||||
|
||||
def process_request(self, request):
|
||||
if hasattr(request, 'user') and request.user.is_authenticated:
|
||||
user = request.user
|
||||
else:
|
||||
user = None
|
||||
|
||||
set_actor = curry(self.set_actor, user)
|
||||
self.disp_uid = str(uuid.uuid1())
|
||||
self.instance_ids = []
|
||||
post_save.connect(set_actor, sender=ActivityStream, dispatch_uid=self.disp_uid, weak=False)
|
||||
|
||||
def process_response(self, request, response):
|
||||
drf_request = getattr(request, 'drf_request', None)
|
||||
drf_user = getattr(drf_request, 'user', None)
|
||||
if self.disp_uid is not None:
|
||||
post_save.disconnect(dispatch_uid=self.disp_uid)
|
||||
|
||||
for instance in ActivityStream.objects.filter(id__in=self.instance_ids):
|
||||
if drf_user and drf_user.id:
|
||||
instance.actor = drf_user
|
||||
try:
|
||||
instance.save(update_fields=['actor'])
|
||||
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
|
||||
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
|
||||
actor=drf_user.username, operation=instance.operation,
|
||||
object1=instance.object1, object2=instance.object2))
|
||||
except IntegrityError:
|
||||
logger.debug("Integrity Error saving Activity Stream instance for id : " + str(instance.id))
|
||||
# else:
|
||||
# obj1_type_actual = instance.object1_type.split(".")[-1]
|
||||
# if obj1_type_actual in ("InventoryUpdate", "ProjectUpdate", "Job") and instance.id is not None:
|
||||
# instance.delete()
|
||||
|
||||
self.instance_ids = []
|
||||
return response
|
||||
|
||||
def set_actor(self, user, sender, instance, **kwargs):
|
||||
if sender == ActivityStream:
|
||||
if isinstance(user, User) and instance.actor is None:
|
||||
user = User.objects.filter(id=user.id)
|
||||
if user.exists():
|
||||
user = user[0]
|
||||
instance.actor = user
|
||||
else:
|
||||
if instance.id not in self.instance_ids:
|
||||
self.instance_ids.append(instance.id)
|
||||
|
||||
|
||||
class SessionTimeoutMiddleware(MiddlewareMixin):
|
||||
"""
|
||||
Resets the session timeout for both the UI and the actual session for the API
|
||||
|
||||
@@ -464,7 +464,7 @@ class Migration(migrations.Migration):
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='instance_group',
|
||||
field=models.ForeignKey(on_delete=models.SET_NULL, default=None, blank=True, to='main.InstanceGroup', help_text='The Rampart/Instance group the job was run under', null=True),
|
||||
field=models.ForeignKey(on_delete=models.SET_NULL, default=None, blank=True, to='main.InstanceGroup', help_text='The Instance group the job was run under', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
|
||||
@@ -16,6 +16,6 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='instance_group',
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The Rampart/Instance group the job was run under', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.InstanceGroup'),
|
||||
field=models.ForeignKey(blank=True, default=None, help_text='The Instance group the job was run under', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.InstanceGroup'),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
# Generated by Django 2.2.11 on 2020-04-03 00:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def remove_manual_inventory_sources(apps, schema_editor):
|
||||
'''Previously we would automatically create inventory sources after
|
||||
Group creation and we would use the parent Group as our interface for the user.
|
||||
During that process we would create InventorySource that had a source of "manual".
|
||||
'''
|
||||
InventoryUpdate = apps.get_model('main', 'InventoryUpdate')
|
||||
InventoryUpdate.objects.filter(source='').delete()
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
InventorySource.objects.filter(source='').delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0113_v370_event_bigint'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='inventorysource',
|
||||
name='deprecated_group',
|
||||
),
|
||||
migrations.RunPython(remove_manual_inventory_sources),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
]
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
# Django
|
||||
from django.conf import settings # noqa
|
||||
from django.db import connection, ProgrammingError
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# AWX
|
||||
@@ -91,14 +91,13 @@ def enforce_bigint_pk_migration():
|
||||
'main_systemjobevent'
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname}')
|
||||
if cursor.fetchone():
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
except ProgrammingError:
|
||||
# the table is gone (migration is unnecessary)
|
||||
pass
|
||||
cursor.execute(
|
||||
'SELECT 1 FROM information_schema.tables WHERE table_name=%s',
|
||||
(f'_old_{tblname}',)
|
||||
)
|
||||
if bool(cursor.rowcount):
|
||||
from awx.main.tasks import migrate_legacy_event_data
|
||||
migrate_legacy_event_data.apply_async([tblname])
|
||||
|
||||
|
||||
def cleanup_created_modified_by(sender, **kwargs):
|
||||
|
||||
@@ -799,6 +799,10 @@ ManagedCredentialType(
|
||||
'id': 'project',
|
||||
'label': ugettext_noop('Project (Tenant Name)'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'project_domain_name',
|
||||
'label': ugettext_noop('Project (Domain Name)'),
|
||||
'type': 'string',
|
||||
}, {
|
||||
'id': 'domain',
|
||||
'label': ugettext_noop('Domain Name'),
|
||||
|
||||
@@ -77,6 +77,8 @@ def _openstack_data(cred):
|
||||
username=cred.get_input('username', default=''),
|
||||
password=cred.get_input('password', default=''),
|
||||
project_name=cred.get_input('project', default=''))
|
||||
if cred.has_input('project_domain_name'):
|
||||
openstack_auth['project_domain_name'] = cred.get_input('project_domain_name', default='')
|
||||
if cred.has_input('domain'):
|
||||
openstack_auth['domain_name'] = cred.get_input('domain', default='')
|
||||
verify_state = cred.get_input('verify_ssl', default=True)
|
||||
|
||||
@@ -821,7 +821,6 @@ class InventorySourceOptions(BaseModel):
|
||||
injectors = dict()
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('', _('Manual')),
|
||||
('file', _('File, Directory or Script')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
@@ -932,8 +931,8 @@ class InventorySourceOptions(BaseModel):
|
||||
source = models.CharField(
|
||||
max_length=32,
|
||||
choices=SOURCE_CHOICES,
|
||||
blank=True,
|
||||
default='',
|
||||
blank=False,
|
||||
default=None,
|
||||
)
|
||||
source_path = models.CharField(
|
||||
max_length=1024,
|
||||
@@ -1237,14 +1236,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
|
||||
deprecated_group = models.OneToOneField(
|
||||
'Group',
|
||||
related_name='deprecated_inventory_source',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
|
||||
source_project = models.ForeignKey(
|
||||
'Project',
|
||||
related_name='scm_inventory_sources',
|
||||
@@ -1414,16 +1405,6 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
started=list(started_notification_templates),
|
||||
success=list(success_notification_templates))
|
||||
|
||||
def clean_source(self): # TODO: remove in 3.3
|
||||
source = self.source
|
||||
if source and self.deprecated_group:
|
||||
qs = self.deprecated_group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
|
||||
existing_sources = qs.exclude(pk=self.pk)
|
||||
if existing_sources.count():
|
||||
s = u', '.join([x.deprecated_group.name for x in existing_sources])
|
||||
raise ValidationError(_('Unable to configure this item for cloud sync. It is already managed by %s.') % s)
|
||||
return source
|
||||
|
||||
def clean_update_on_project_update(self):
|
||||
if self.update_on_project_update is True and \
|
||||
self.source == 'scm' and \
|
||||
@@ -1512,8 +1493,6 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
if self.inventory_source.inventory is not None:
|
||||
websocket_data.update(dict(inventory_id=self.inventory_source.inventory.pk))
|
||||
|
||||
if self.inventory_source.deprecated_group is not None: # TODO: remove in 3.3
|
||||
websocket_data.update(dict(group_id=self.inventory_source.deprecated_group.id))
|
||||
return websocket_data
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
@@ -1633,6 +1612,11 @@ class PluginFileInjector(object):
|
||||
# base injector should be one of None, "managed", or "template"
|
||||
# this dictates which logic to borrow from playbook injectors
|
||||
base_injector = None
|
||||
# every source should have collection, but these are set here
|
||||
# so that a source without a collection will have null values
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
|
||||
def __init__(self, ansible_version):
|
||||
# This is InventoryOptions instance, could be source or inventory update
|
||||
@@ -1659,7 +1643,11 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
if self.plugin_name is None:
|
||||
raise NotImplementedError('At minimum the plugin name is needed for inventory plugin use.')
|
||||
return {'plugin': self.plugin_name}
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
proper_name = self.plugin_name
|
||||
return {'plugin': proper_name}
|
||||
|
||||
def inventory_contents(self, inventory_update, private_data_dir):
|
||||
"""Returns a string that is the content for the inventory file for the inventory plugin
|
||||
@@ -1714,7 +1702,10 @@ class PluginFileInjector(object):
|
||||
return injected_env
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
return self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.INVENTORY_COLLECTIONS_ROOT
|
||||
return env
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
@@ -1759,6 +1750,8 @@ class azure_rm(PluginFileInjector):
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars, host names
|
||||
ini_env_reference = 'AZURE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'azure'
|
||||
collection = 'azcollection'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(azure_rm, self).get_plugin_env(*args, **kwargs)
|
||||
@@ -1890,9 +1883,11 @@ class azure_rm(PluginFileInjector):
|
||||
class ec2(PluginFileInjector):
|
||||
plugin_name = 'aws_ec2'
|
||||
# blocked by https://github.com/ansible/ansible/issues/54059
|
||||
# initial_version = '2.8' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
initial_version = '2.9' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
ini_env_reference = 'EC2_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'amazon'
|
||||
collection = 'aws'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(ec2, self).get_plugin_env(*args, **kwargs)
|
||||
@@ -2032,6 +2027,9 @@ class ec2(PluginFileInjector):
|
||||
grouping_data['key'] += ' | regex_replace("{rx}", "_")'.format(rx=legacy_regex)
|
||||
# end compatibility content
|
||||
|
||||
if source_vars.get('iam_role_arn', None):
|
||||
ret['iam_role_arn'] = source_vars['iam_role_arn']
|
||||
|
||||
# This was an allowed ec2.ini option, also plugin option, so pass through
|
||||
if source_vars.get('boto_profile', None):
|
||||
ret['boto_profile'] = source_vars['boto_profile']
|
||||
@@ -2040,6 +2038,10 @@ class ec2(PluginFileInjector):
|
||||
# Using the plugin, but still want dashes whitelisted
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
|
||||
if source_vars.get('nested_groups') is False:
|
||||
for this_keyed_group in keyed_groups:
|
||||
this_keyed_group.pop('parent_group', None)
|
||||
|
||||
if keyed_groups:
|
||||
ret['keyed_groups'] = keyed_groups
|
||||
|
||||
@@ -2051,18 +2053,35 @@ class ec2(PluginFileInjector):
|
||||
compose_dict.update(self._compat_compose_vars())
|
||||
# plugin provides "aws_ec2", but not this which the script gave
|
||||
ret['groups'] = {'ec2': True}
|
||||
# public_ip as hostname is non-default plugin behavior, script behavior
|
||||
ret['hostnames'] = [
|
||||
'network-interface.addresses.association.public-ip',
|
||||
'dns-name',
|
||||
'private-dns-name'
|
||||
]
|
||||
if source_vars.get('hostname_variable') is not None:
|
||||
hnames = []
|
||||
for expr in source_vars.get('hostname_variable').split(','):
|
||||
if expr == 'public_dns_name':
|
||||
hnames.append('dns-name')
|
||||
elif not expr.startswith('tag:') and '_' in expr:
|
||||
hnames.append(expr.replace('_', '-'))
|
||||
else:
|
||||
hnames.append(expr)
|
||||
ret['hostnames'] = hnames
|
||||
else:
|
||||
# public_ip as hostname is non-default plugin behavior, script behavior
|
||||
ret['hostnames'] = [
|
||||
'network-interface.addresses.association.public-ip',
|
||||
'dns-name',
|
||||
'private-dns-name'
|
||||
]
|
||||
# The script returned only running state by default, the plugin does not
|
||||
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
|
||||
# options: pending | running | shutting-down | terminated | stopping | stopped
|
||||
inst_filters['instance-state-name'] = ['running']
|
||||
# end compatibility content
|
||||
|
||||
if source_vars.get('destination_variable') or source_vars.get('vpc_destination_variable'):
|
||||
for fd in ('destination_variable', 'vpc_destination_variable'):
|
||||
if source_vars.get(fd):
|
||||
compose_dict['ansible_host'] = source_vars.get(fd)
|
||||
break
|
||||
|
||||
if compose_dict:
|
||||
ret['compose'] = compose_dict
|
||||
|
||||
@@ -2129,6 +2148,8 @@ class gce(PluginFileInjector):
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
ini_env_reference = 'GCE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'google'
|
||||
collection = 'cloud'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(gce, self).get_plugin_env(*args, **kwargs)
|
||||
@@ -2229,14 +2250,119 @@ class gce(PluginFileInjector):
|
||||
|
||||
|
||||
class vmware(PluginFileInjector):
|
||||
# plugin_name = 'vmware_vm_inventory' # FIXME: implement me
|
||||
plugin_name = 'vmware_vm_inventory'
|
||||
initial_version = '2.9'
|
||||
ini_env_reference = 'VMWARE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'community'
|
||||
collection = 'vmware'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'vmware_inventory.py' # exception
|
||||
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(vmware, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['strict'] = False
|
||||
# Documentation of props, see
|
||||
# https://github.com/ansible/ansible/blob/devel/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst
|
||||
UPPERCASE_PROPS = [
|
||||
"availableField",
|
||||
"configIssue",
|
||||
"configStatus",
|
||||
"customValue", # optional
|
||||
"datastore",
|
||||
"effectiveRole",
|
||||
"guestHeartbeatStatus", # optonal
|
||||
"layout", # optional
|
||||
"layoutEx", # optional
|
||||
"name",
|
||||
"network",
|
||||
"overallStatus",
|
||||
"parentVApp", # optional
|
||||
"permission",
|
||||
"recentTask",
|
||||
"resourcePool",
|
||||
"rootSnapshot",
|
||||
"snapshot", # optional
|
||||
"tag",
|
||||
"triggeredAlarmState",
|
||||
"value"
|
||||
]
|
||||
NESTED_PROPS = [
|
||||
"capability",
|
||||
"config",
|
||||
"guest",
|
||||
"runtime",
|
||||
"storage",
|
||||
"summary", # repeat of other properties
|
||||
]
|
||||
ret['properties'] = UPPERCASE_PROPS + NESTED_PROPS
|
||||
ret['compose'] = {'ansible_host': 'guest.ipAddress'} # default value
|
||||
ret['compose']['ansible_ssh_host'] = ret['compose']['ansible_host']
|
||||
# the ansible_uuid was unique every host, every import, from the script
|
||||
ret['compose']['ansible_uuid'] = '99999999 | random | to_uuid'
|
||||
for prop in UPPERCASE_PROPS:
|
||||
if prop == prop.lower():
|
||||
continue
|
||||
ret['compose'][prop.lower()] = prop
|
||||
ret['with_nested_properties'] = True
|
||||
# ret['property_name_format'] = 'lower_case' # only dacrystal/topic/vmware-inventory-plugin-property-format
|
||||
|
||||
# process custom options
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
alias_pattern = vmware_opts.get('alias_pattern')
|
||||
if alias_pattern:
|
||||
ret.setdefault('hostnames', [])
|
||||
for alias in alias_pattern.split(','): # make best effort
|
||||
striped_alias = alias.replace('{', '').replace('}', '').strip() # make best effort
|
||||
if not striped_alias:
|
||||
continue
|
||||
ret['hostnames'].append(striped_alias)
|
||||
|
||||
host_pattern = vmware_opts.get('host_pattern') # not working in script
|
||||
if host_pattern:
|
||||
stripped_hp = host_pattern.replace('{', '').replace('}', '').strip() # make best effort
|
||||
ret['compose']['ansible_host'] = stripped_hp
|
||||
ret['compose']['ansible_ssh_host'] = stripped_hp
|
||||
|
||||
host_filters = vmware_opts.get('host_filters')
|
||||
if host_filters:
|
||||
ret.setdefault('filters', [])
|
||||
for hf in host_filters.split(','):
|
||||
striped_hf = hf.replace('{', '').replace('}', '').strip() # make best effort
|
||||
if not striped_hf:
|
||||
continue
|
||||
ret['filters'].append(striped_hf)
|
||||
else:
|
||||
# default behavior filters by power state
|
||||
ret['filters'] = ['runtime.powerState == "poweredOn"']
|
||||
|
||||
groupby_patterns = vmware_opts.get('groupby_patterns')
|
||||
ret.setdefault('keyed_groups', [])
|
||||
if groupby_patterns:
|
||||
for pattern in groupby_patterns.split(','):
|
||||
stripped_pattern = pattern.replace('{', '').replace('}', '').strip() # make best effort
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': stripped_pattern
|
||||
})
|
||||
else:
|
||||
# default groups from script
|
||||
for entry in ('guest.guestId', '"templates" if config.template else "guests"'):
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': entry
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
@@ -2267,6 +2393,8 @@ class openstack(PluginFileInjector):
|
||||
plugin_name = 'openstack'
|
||||
# minimum version of 2.7.8 may be theoretically possible
|
||||
initial_version = '2.8' # Driven by consistency with other sources
|
||||
namespace = 'openstack'
|
||||
collection = 'cloud'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@@ -2318,7 +2446,10 @@ class openstack(PluginFileInjector):
|
||||
return self.build_script_private_data(inventory_update, private_data_dir, mk_cache=False)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
return self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
script_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(script_env)
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
def use_host_name_for_name(a_bool_maybe):
|
||||
@@ -2330,12 +2461,10 @@ class openstack(PluginFileInjector):
|
||||
else:
|
||||
return 'uuid'
|
||||
|
||||
ret = dict(
|
||||
plugin=self.plugin_name,
|
||||
fail_on_errors=True,
|
||||
expand_hostvars=True,
|
||||
inventory_hostname=use_host_name_for_name(False),
|
||||
)
|
||||
ret = super(openstack, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['fail_on_errors'] = True
|
||||
ret['expand_hostvars'] = True
|
||||
ret['inventory_hostname'] = use_host_name_for_name(False)
|
||||
# Note: mucking with defaults will break import integrity
|
||||
# For the plugin, we need to use the same defaults as the old script
|
||||
# or else imports will conflict. To find script defaults you have
|
||||
@@ -2360,8 +2489,10 @@ class openstack(PluginFileInjector):
|
||||
class rhv(PluginFileInjector):
|
||||
"""ovirt uses the custom credential templating, and that is all
|
||||
"""
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
plugin_name = 'ovirt'
|
||||
base_injector = 'template'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt_collection'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@@ -2371,8 +2502,10 @@ class rhv(PluginFileInjector):
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
ini_env_reference = 'FOREMAN_INI_PATH'
|
||||
# initial_version = '2.8' # FIXME: turn on after plugin is validated
|
||||
initial_version = '2.9'
|
||||
# No base injector, because this does not work in playbooks. Bug??
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@@ -2434,18 +2567,60 @@ class satellite6(PluginFileInjector):
|
||||
# this assumes that this is merged
|
||||
# https://github.com/ansible/ansible/pull/52693
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
ret = {}
|
||||
ret = super(satellite6, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
if credential:
|
||||
ret['FOREMAN_SERVER'] = credential.get_input('host', default='')
|
||||
ret['FOREMAN_USER'] = credential.get_input('username', default='')
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
|
||||
want_ansible_ssh_host = False
|
||||
foreman_opts = inventory_update.source_vars_dict.copy()
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
|
||||
# Compatibility content
|
||||
group_by_hostvar = {
|
||||
"environment": {"prefix": "foreman_environment_",
|
||||
"separator": "",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | "
|
||||
"regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')"}, # NOQA: W605
|
||||
"location": {"prefix": "foreman_location_",
|
||||
"separator": "",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"organization": {"prefix": "foreman_organization_",
|
||||
"separator": "",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "foreman_lifecycle_environment_",
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['lifecycle_environment_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"content_view": {"prefix": "foreman_content_view_",
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"}
|
||||
}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
ret['legacy_hostvars'] = True
|
||||
ret['want_facts'] = True
|
||||
ret['want_params'] = True
|
||||
|
||||
if want_ansible_ssh_host:
|
||||
ret['compose'] = {'ansible_ssh_host': "foreman['ip6'] | default(foreman['ip'], true)"}
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class cloudforms(PluginFileInjector):
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
ini_env_reference = 'CLOUDFORMS_INI_PATH'
|
||||
# Also no base_injector because this does not work in playbooks
|
||||
# namespace = '' # does not have a collection
|
||||
# collection = ''
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
@@ -2481,6 +2656,8 @@ class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.8' # Driven by "include_metadata" hostvars
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(tower, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
@@ -2489,6 +2666,7 @@ class tower(PluginFileInjector):
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(tower, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# Credentials injected as env vars, same as script
|
||||
try:
|
||||
# plugin can take an actual int type
|
||||
@@ -2496,11 +2674,9 @@ class tower(PluginFileInjector):
|
||||
except ValueError:
|
||||
# inventory_id could be a named URL
|
||||
identifier = iri_to_uri(inventory_update.instance_filters)
|
||||
return {
|
||||
'plugin': self.plugin_name,
|
||||
'inventory_id': identifier,
|
||||
'include_metadata': True # used for license check
|
||||
}
|
||||
ret['inventory_id'] = identifier
|
||||
ret['include_metadata'] = True # used for license check
|
||||
return ret
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
|
||||
@@ -199,7 +199,7 @@ class ProjectOptions(models.Model):
|
||||
results = []
|
||||
project_path = self.get_project_path()
|
||||
if project_path:
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path)):
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=True):
|
||||
if skip_directory(dirpath):
|
||||
continue
|
||||
for filename in filenames:
|
||||
|
||||
@@ -191,7 +191,7 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
return rrule
|
||||
|
||||
@classmethod
|
||||
def rrulestr(cls, rrule, **kwargs):
|
||||
def rrulestr(cls, rrule, fast_forward=True, **kwargs):
|
||||
"""
|
||||
Apply our own custom rrule parsing requirements
|
||||
"""
|
||||
@@ -205,11 +205,17 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
|
||||
'A valid TZID must be provided (e.g., America/New_York)'
|
||||
)
|
||||
|
||||
if 'MINUTELY' in rrule or 'HOURLY' in rrule:
|
||||
if fast_forward and ('MINUTELY' in rrule or 'HOURLY' in rrule):
|
||||
try:
|
||||
first_event = x[0]
|
||||
if first_event < now() - datetime.timedelta(days=365 * 5):
|
||||
raise ValueError('RRULE values with more than 1000 events are not allowed.')
|
||||
if first_event < now():
|
||||
# hourly/minutely rrules with far-past DTSTART values
|
||||
# are *really* slow to precompute
|
||||
# start *from* one week ago to speed things up drastically
|
||||
dtstart = x._rrule[0]._dtstart.strftime(':%Y%m%dT')
|
||||
new_start = (now() - datetime.timedelta(days=7)).strftime(':%Y%m%dT')
|
||||
new_rrule = rrule.replace(dtstart, new_start)
|
||||
return Schedule.rrulestr(new_rrule, fast_forward=False)
|
||||
except IndexError:
|
||||
pass
|
||||
return x
|
||||
|
||||
@@ -707,7 +707,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
help_text=_('The Rampart/Instance group the job was run under'),
|
||||
help_text=_('The Instance group the job was run under'),
|
||||
)
|
||||
organization = models.ForeignKey(
|
||||
'Organization',
|
||||
|
||||
@@ -89,7 +89,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
grafana_data['isRegion'] = self.isRegion
|
||||
grafana_data['dashboardId'] = self.dashboardId
|
||||
grafana_data['panelId'] = self.panelId
|
||||
grafana_data['tags'] = self.annotation_tags
|
||||
if self.annotation_tags:
|
||||
grafana_data['tags'] = self.annotation_tags
|
||||
grafana_data['text'] = m.subject
|
||||
grafana_headers['Authorization'] = "Bearer {}".format(self.grafana_key)
|
||||
grafana_headers['Content-Type'] = "application/json"
|
||||
|
||||
@@ -123,7 +123,7 @@ class SimpleDAG(object):
|
||||
self.root_nodes.discard(to_obj_ord)
|
||||
|
||||
if from_obj_ord is None and to_obj_ord is None:
|
||||
raise LookupError("From object {} and to object not found".format(from_obj, to_obj))
|
||||
raise LookupError("From object {} and to object {} not found".format(from_obj, to_obj))
|
||||
elif from_obj_ord is None:
|
||||
raise LookupError("From object not found {}".format(from_obj))
|
||||
elif to_obj_ord is None:
|
||||
|
||||
@@ -226,7 +226,7 @@ class TaskManager():
|
||||
# non-Ansible jobs on isolated instances run on controller
|
||||
task.instance_group = rampart_group.controller
|
||||
task.execution_node = random.choice(list(rampart_group.controller.instances.all().values_list('hostname', flat=True)))
|
||||
logger.debug('Submitting isolated {} to queue {}.'.format(
|
||||
logger.debug('Submitting isolated {} to queue {} on node {}.'.format(
|
||||
task.log_format, task.instance_group.name, task.execution_node))
|
||||
elif controller_node:
|
||||
task.instance_group = rampart_group
|
||||
|
||||
@@ -52,6 +52,7 @@ from awx.conf.utils import conf_to_dict
|
||||
__all__ = []
|
||||
|
||||
logger = logging.getLogger('awx.main.signals')
|
||||
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
|
||||
|
||||
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
|
||||
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
|
||||
@@ -363,12 +364,24 @@ def model_serializer_mapping():
|
||||
}
|
||||
|
||||
|
||||
def emit_activity_stream_change(instance):
|
||||
if 'migrate' in sys.argv:
|
||||
# don't emit activity stream external logs during migrations, it
|
||||
# could be really noisy
|
||||
return
|
||||
from awx.api.serializers import ActivityStreamSerializer
|
||||
actor = None
|
||||
if instance.actor:
|
||||
actor = instance.actor.username
|
||||
summary_fields = ActivityStreamSerializer(instance).get_summary_fields(instance)
|
||||
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
|
||||
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
|
||||
actor=actor, operation=instance.operation,
|
||||
object1=instance.object1, object2=instance.object2, summary_fields=summary_fields))
|
||||
|
||||
|
||||
def activity_stream_create(sender, instance, created, **kwargs):
|
||||
if created and activity_stream_enabled:
|
||||
# TODO: remove deprecated_group conditional in 3.3
|
||||
# Skip recording any inventory source directly associated with a group.
|
||||
if isinstance(instance, InventorySource) and instance.deprecated_group:
|
||||
return
|
||||
_type = type(instance)
|
||||
if getattr(_type, '_deferred', False):
|
||||
return
|
||||
@@ -399,6 +412,9 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_update(sender, instance, **kwargs):
|
||||
@@ -430,15 +446,14 @@ def activity_stream_update(sender, instance, **kwargs):
|
||||
else:
|
||||
activity_entry.setting = conf_to_dict(instance)
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_delete(sender, instance, **kwargs):
|
||||
if not activity_stream_enabled:
|
||||
return
|
||||
# TODO: remove deprecated_group conditional in 3.3
|
||||
# Skip recording any inventory source directly associated with a group.
|
||||
if isinstance(instance, InventorySource) and instance.deprecated_group:
|
||||
return
|
||||
# Inventory delete happens in the task system rather than request-response-cycle.
|
||||
# If we trigger this handler there we may fall into db-integrity-related race conditions.
|
||||
# So we add flag verification to prevent normal signal handling. This funciton will be
|
||||
@@ -467,6 +482,9 @@ def activity_stream_delete(sender, instance, **kwargs):
|
||||
object1=object1,
|
||||
actor=get_current_user_or_none())
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
def activity_stream_associate(sender, instance, **kwargs):
|
||||
@@ -540,6 +558,9 @@ def activity_stream_associate(sender, instance, **kwargs):
|
||||
activity_entry.role.add(role)
|
||||
activity_entry.object_relationship_type = obj_rel
|
||||
activity_entry.save()
|
||||
connection.on_commit(
|
||||
lambda: emit_activity_stream_change(activity_entry)
|
||||
)
|
||||
|
||||
|
||||
@receiver(current_user_getter)
|
||||
|
||||
@@ -73,6 +73,7 @@ from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
get_awx_version)
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.common import _get_ansible_version, get_custom_venv_choices
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -140,6 +141,9 @@ def dispatch_startup():
|
||||
# and Tower fall out of use/support, we can probably just _assume_ that
|
||||
# everybody has moved to bigint, and remove this code entirely
|
||||
enforce_bigint_pk_migration()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
@@ -280,6 +284,12 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([
|
||||
setting.startswith('LOG_AGGREGATOR')
|
||||
for setting in setting_keys
|
||||
]):
|
||||
connection.on_commit(reconfigure_rsyslog)
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@@ -1466,7 +1476,7 @@ class BaseTask(object):
|
||||
params.get('module'),
|
||||
module_args,
|
||||
ident=str(self.instance.pk))
|
||||
self.event_ct = len(isolated_manager_instance.handled_events)
|
||||
self.finished_callback(None)
|
||||
else:
|
||||
res = ansible_runner.interface.run(**params)
|
||||
status = res.status
|
||||
@@ -2064,29 +2074,34 @@ class RunProjectUpdate(BaseTask):
|
||||
if settings.GALAXY_IGNORE_CERTS:
|
||||
env['ANSIBLE_GALAXY_IGNORE'] = True
|
||||
# Set up the public Galaxy server, if enabled
|
||||
galaxy_configured = False
|
||||
if settings.PUBLIC_GALAXY_ENABLED:
|
||||
galaxy_servers = [settings.PUBLIC_GALAXY_SERVER]
|
||||
galaxy_servers = [settings.PUBLIC_GALAXY_SERVER] # static setting
|
||||
else:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = []
|
||||
# Set up fallback Galaxy servers, if configured
|
||||
if settings.FALLBACK_GALAXY_SERVERS:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = settings.FALLBACK_GALAXY_SERVERS + galaxy_servers
|
||||
# Set up the primary Galaxy server, if configured
|
||||
if settings.PRIMARY_GALAXY_URL:
|
||||
galaxy_configured = True
|
||||
galaxy_servers = [{'id': 'primary_galaxy'}] + galaxy_servers
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
value = getattr(settings, 'PRIMARY_GALAXY_{}'.format(key.upper()))
|
||||
if value:
|
||||
galaxy_servers[0][key] = value
|
||||
for server in galaxy_servers:
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
if not server.get(key):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
if galaxy_configured:
|
||||
for server in galaxy_servers:
|
||||
for key in GALAXY_SERVER_FIELDS:
|
||||
if not server.get(key):
|
||||
continue
|
||||
env_key = ('ANSIBLE_GALAXY_SERVER_{}_{}'.format(server.get('id', 'unnamed'), key)).upper()
|
||||
env[env_key] = server[key]
|
||||
if galaxy_servers:
|
||||
# now set the precedence of galaxy servers
|
||||
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join([server.get('id', 'unnamed') for server in galaxy_servers])
|
||||
return env
|
||||
|
||||
def _build_scm_url_extra_vars(self, project_update):
|
||||
@@ -2397,7 +2412,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
return [self.get_path_to('..', 'plugins', 'inventory')]
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
"""
|
||||
@@ -2727,9 +2742,12 @@ class RunAdHocCommand(BaseTask):
|
||||
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
|
||||
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
|
||||
|
||||
# Specify empty SSH args (should disable ControlPersist entirely for
|
||||
# ad hoc commands).
|
||||
env.setdefault('ANSIBLE_SSH_ARGS', '')
|
||||
# Create a directory for ControlPath sockets that is unique to each
|
||||
# ad hoc command and visible inside the proot environment (when enabled).
|
||||
cp_dir = os.path.join(private_data_dir, 'cp')
|
||||
if not os.path.exists(cp_dir):
|
||||
os.mkdir(cp_dir, 0o700)
|
||||
env['ANSIBLE_SSH_CONTROL_PATH'] = cp_dir
|
||||
|
||||
return env
|
||||
|
||||
|
||||
@@ -39,5 +39,5 @@ keyed_groups:
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plain_host_names: true
|
||||
plugin: azure_rm
|
||||
plugin: azure.azcollection.azure_rm
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
boto_profile: /tmp/my_boto_stuff
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ansible_host: public_dns_name
|
||||
ec2_account_id: owner_id
|
||||
ec2_ami_launch_index: ami_launch_index | string
|
||||
ec2_architecture: architecture
|
||||
@@ -50,9 +50,8 @@ filters:
|
||||
groups:
|
||||
ec2: true
|
||||
hostnames:
|
||||
- network-interface.addresses.association.public-ip
|
||||
- dns-name
|
||||
- private-dns-name
|
||||
iam_role_arn: arn:aws:iam::123456789012:role/test-role
|
||||
keyed_groups:
|
||||
- key: placement.availability_zone
|
||||
parent_group: zones
|
||||
@@ -75,7 +74,7 @@ keyed_groups:
|
||||
parent_group: '{{ placement.region }}'
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: aws_ec2
|
||||
plugin: amazon.aws.aws_ec2
|
||||
regions:
|
||||
- us-east-2
|
||||
- ap-south-1
|
||||
|
||||
@@ -40,7 +40,7 @@ keyed_groups:
|
||||
- key: image
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: gcp_compute
|
||||
plugin: google.cloud.gcp_compute
|
||||
projects:
|
||||
- fooo
|
||||
retrieve_image_info: true
|
||||
|
||||
@@ -8,6 +8,7 @@ clouds:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_domain_name: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
inventory_hostname: uuid
|
||||
plugin: openstack
|
||||
plugin: openstack.cloud.openstack
|
||||
|
||||
7
awx/main/tests/data/inventory/plugins/rhv/env.json
Normal file
7
awx/main/tests/data/inventory/plugins/rhv/env.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
||||
@@ -0,0 +1 @@
|
||||
plugin: ovirt.ovirt_collection.ovirt
|
||||
@@ -1 +1,22 @@
|
||||
plugin: foreman
|
||||
compose:
|
||||
ansible_ssh_host: foreman['ip6'] | default(foreman['ip'], true)
|
||||
keyed_groups:
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')
|
||||
prefix: foreman_environment_
|
||||
separator: ''
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_location_
|
||||
separator: ''
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_organization_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_lifecycle_environment_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_content_view_
|
||||
separator: ''
|
||||
legacy_hostvars: true
|
||||
plugin: theforeman.foreman.foreman
|
||||
want_facts: true
|
||||
want_params: true
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
include_metadata: true
|
||||
inventory_id: 42
|
||||
plugin: tower
|
||||
plugin: awx.awx.tower
|
||||
|
||||
7
awx/main/tests/data/inventory/plugins/vmware/env.json
Normal file
7
awx/main/tests/data/inventory/plugins/vmware/env.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_VALIDATE_CERTS": "False"
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
compose:
|
||||
ansible_host: guest.ipAddress
|
||||
ansible_ssh_host: guest.ipAddress
|
||||
ansible_uuid: 99999999 | random | to_uuid
|
||||
availablefield: availableField
|
||||
configissue: configIssue
|
||||
configstatus: configStatus
|
||||
customvalue: customValue
|
||||
effectiverole: effectiveRole
|
||||
guestheartbeatstatus: guestHeartbeatStatus
|
||||
layoutex: layoutEx
|
||||
overallstatus: overallStatus
|
||||
parentvapp: parentVApp
|
||||
recenttask: recentTask
|
||||
resourcepool: resourcePool
|
||||
rootsnapshot: rootSnapshot
|
||||
triggeredalarmstate: triggeredAlarmState
|
||||
filters:
|
||||
- config.zoo == "DC0_H0_VM0"
|
||||
hostnames:
|
||||
- config.foo
|
||||
keyed_groups:
|
||||
- key: config.asdf
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: community.vmware.vmware_vm_inventory
|
||||
properties:
|
||||
- availableField
|
||||
- configIssue
|
||||
- configStatus
|
||||
- customValue
|
||||
- datastore
|
||||
- effectiveRole
|
||||
- guestHeartbeatStatus
|
||||
- layout
|
||||
- layoutEx
|
||||
- name
|
||||
- network
|
||||
- overallStatus
|
||||
- parentVApp
|
||||
- permission
|
||||
- recentTask
|
||||
- resourcePool
|
||||
- rootSnapshot
|
||||
- snapshot
|
||||
- tag
|
||||
- triggeredAlarmState
|
||||
- value
|
||||
- capability
|
||||
- config
|
||||
- guest
|
||||
- runtime
|
||||
- storage
|
||||
- summary
|
||||
strict: false
|
||||
with_nested_properties: true
|
||||
@@ -1,9 +1,11 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
iam_role_arn = arn:aws:iam::123456789012:role/test-role
|
||||
hostname_variable = public_dns_name
|
||||
destination_variable = public_dns_name
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
destination_variable = public_dns_name
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
|
||||
@@ -10,6 +10,7 @@ clouds:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_domain_name: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
|
||||
@@ -5,6 +5,7 @@ username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
host_filters = foobaa
|
||||
groupby_patterns = fouo
|
||||
alias_pattern = {{ config.foo }}
|
||||
host_filters = {{ config.zoo == "DC0_H0_VM0" }}
|
||||
groupby_patterns = {{ config.asdf }}
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ def create_job_template(name, roles=None, persisted=True, webhook_service='', **
|
||||
if 'organization' in kwargs:
|
||||
org = kwargs['organization']
|
||||
if type(org) is not Organization:
|
||||
org = mk_organization(org, '%s-desc'.format(org), persisted=persisted)
|
||||
org = mk_organization(org, org, persisted=persisted)
|
||||
|
||||
if 'credential' in kwargs:
|
||||
cred = kwargs['credential']
|
||||
@@ -298,7 +298,7 @@ def create_organization(name, roles=None, persisted=True, **kwargs):
|
||||
labels = {}
|
||||
notification_templates = {}
|
||||
|
||||
org = mk_organization(name, '%s-desc'.format(name), persisted=persisted)
|
||||
org = mk_organization(name, name, persisted=persisted)
|
||||
|
||||
if 'inventories' in kwargs:
|
||||
for i in kwargs['inventories']:
|
||||
|
||||
161
awx/main/tests/functional/analytics/test_collectors.py
Normal file
161
awx/main/tests/functional/analytics/test_collectors.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import pytest
|
||||
import tempfile
|
||||
import os
|
||||
import shutil
|
||||
import csv
|
||||
|
||||
from django.utils.timezone import now
|
||||
from datetime import timedelta
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
|
||||
from awx.main.analytics import collectors
|
||||
|
||||
from awx.main.models import (
|
||||
ProjectUpdate,
|
||||
InventorySource,
|
||||
WorkflowJob,
|
||||
WorkflowJobNode,
|
||||
JobTemplate,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sqlite_copy_expert(request):
|
||||
# copy_expert is postgres-specific, and SQLite doesn't support it; mock its
|
||||
# behavior to test that it writes a file that contains stdout from events
|
||||
path = tempfile.mkdtemp(prefix="copied_tables")
|
||||
|
||||
def write_stdout(self, sql, fd):
|
||||
# Would be cool if we instead properly disected the SQL query and verified
|
||||
# it that way. But instead, we just take the nieve approach here.
|
||||
assert sql.startswith("COPY (")
|
||||
assert sql.endswith(") TO STDOUT WITH CSV HEADER")
|
||||
|
||||
sql = sql.replace("COPY (", "")
|
||||
sql = sql.replace(") TO STDOUT WITH CSV HEADER", "")
|
||||
# sqlite equivalent
|
||||
sql = sql.replace("ARRAY_AGG", "GROUP_CONCAT")
|
||||
|
||||
# Remove JSON style queries
|
||||
# TODO: could replace JSON style queries with sqlite kind of equivalents
|
||||
sql_new = []
|
||||
for line in sql.split("\n"):
|
||||
if line.find("main_jobevent.event_data::") == -1:
|
||||
sql_new.append(line)
|
||||
elif not line.endswith(","):
|
||||
sql_new[-1] = sql_new[-1].rstrip(",")
|
||||
sql = "\n".join(sql_new)
|
||||
|
||||
self.execute(sql)
|
||||
results = self.fetchall()
|
||||
headers = [i[0] for i in self.description]
|
||||
|
||||
csv_handle = csv.writer(
|
||||
fd,
|
||||
delimiter=",",
|
||||
quoting=csv.QUOTE_ALL,
|
||||
escapechar="\\",
|
||||
lineterminator="\n",
|
||||
)
|
||||
csv_handle.writerow(headers)
|
||||
csv_handle.writerows(results)
|
||||
|
||||
setattr(SQLiteCursorWrapper, "copy_expert", write_stdout)
|
||||
request.addfinalizer(lambda: shutil.rmtree(path))
|
||||
request.addfinalizer(lambda: delattr(SQLiteCursorWrapper, "copy_expert"))
|
||||
return path
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_copy_tables_unified_job_query(
|
||||
sqlite_copy_expert, project, inventory, job_template
|
||||
):
|
||||
"""
|
||||
Ensure that various unified job types are in the output of the query.
|
||||
"""
|
||||
|
||||
time_start = now() - timedelta(hours=9)
|
||||
inv_src = InventorySource.objects.create(
|
||||
name="inventory_update1", inventory=inventory, source="gce"
|
||||
)
|
||||
|
||||
project_update_name = ProjectUpdate.objects.create(
|
||||
project=project, name="project_update1"
|
||||
).name
|
||||
inventory_update_name = inv_src.create_unified_job().name
|
||||
job_name = job_template.create_unified_job().name
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="unified_jobs")
|
||||
with open(os.path.join(tmpdir, "unified_jobs_table.csv")) as f:
|
||||
lines = "".join([l for l in f])
|
||||
|
||||
assert project_update_name in lines
|
||||
assert inventory_update_name in lines
|
||||
assert job_name in lines
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job(states=["new", "new", "new", "new", "new"]):
|
||||
"""
|
||||
Workflow topology:
|
||||
node[0]
|
||||
/\
|
||||
s/ \f
|
||||
/ \
|
||||
node[1,5] node[3]
|
||||
/ \
|
||||
s/ \f
|
||||
/ \
|
||||
node[2] node[4]
|
||||
"""
|
||||
wfj = WorkflowJob.objects.create()
|
||||
jt = JobTemplate.objects.create(name="test-jt")
|
||||
nodes = [
|
||||
WorkflowJobNode.objects.create(workflow_job=wfj, unified_job_template=jt)
|
||||
for i in range(0, 6)
|
||||
]
|
||||
for node, state in zip(nodes, states):
|
||||
if state:
|
||||
node.job = jt.create_job()
|
||||
node.job.status = state
|
||||
node.job.save()
|
||||
node.save()
|
||||
nodes[0].success_nodes.add(nodes[1])
|
||||
nodes[0].success_nodes.add(nodes[5])
|
||||
nodes[1].success_nodes.add(nodes[2])
|
||||
nodes[0].failure_nodes.add(nodes[3])
|
||||
nodes[3].failure_nodes.add(nodes[4])
|
||||
return wfj
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_copy_tables_workflow_job_node_query(sqlite_copy_expert, workflow_job):
|
||||
time_start = now() - timedelta(hours=9)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="workflow_job_node_query")
|
||||
with open(os.path.join(tmpdir, "workflow_job_node_table.csv")) as f:
|
||||
reader = csv.reader(f)
|
||||
# Pop the headers
|
||||
next(reader)
|
||||
lines = [l for l in reader]
|
||||
|
||||
ids = [int(l[0]) for l in lines]
|
||||
|
||||
assert ids == list(
|
||||
workflow_job.workflow_nodes.all().values_list("id", flat=True)
|
||||
)
|
||||
|
||||
for index, relationship in zip(
|
||||
[7, 8, 9], ["success_nodes", "failure_nodes", "always_nodes"]
|
||||
):
|
||||
for i, l in enumerate(lines):
|
||||
related_nodes = (
|
||||
[int(e) for e in l[index].split(",")] if l[index] else []
|
||||
)
|
||||
assert related_nodes == list(
|
||||
getattr(workflow_job.workflow_nodes.all()[i], relationship)
|
||||
.all()
|
||||
.values_list("id", flat=True)
|
||||
), f"(right side) workflow_nodes.all()[{i}].{relationship}.all()"
|
||||
@@ -13,10 +13,10 @@ def test_empty():
|
||||
"active_host_count": 0,
|
||||
"credential": 0,
|
||||
"custom_inventory_script": 0,
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"custom_virtualenvs": 0, # dev env ansible3
|
||||
"host": 0,
|
||||
"inventory": 0,
|
||||
"inventories": {'normal': 0, 'smart': 0},
|
||||
"inventories": {"normal": 0, "smart": 0},
|
||||
"job_template": 0,
|
||||
"notification_template": 0,
|
||||
"organization": 0,
|
||||
@@ -27,28 +27,97 @@ def test_empty():
|
||||
"user": 0,
|
||||
"workflow_job_template": 0,
|
||||
"unified_job": 0,
|
||||
"pending_jobs": 0
|
||||
"pending_jobs": 0,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_database_counts(organization_factory, job_template_factory,
|
||||
workflow_job_template_factory):
|
||||
objs = organization_factory('org', superusers=['admin'])
|
||||
jt = job_template_factory('test', organization=objs.organization,
|
||||
inventory='test_inv', project='test_project',
|
||||
credential='test_cred')
|
||||
workflow_job_template_factory('test')
|
||||
def test_database_counts(
|
||||
organization_factory, job_template_factory, workflow_job_template_factory
|
||||
):
|
||||
objs = organization_factory("org", superusers=["admin"])
|
||||
jt = job_template_factory(
|
||||
"test",
|
||||
organization=objs.organization,
|
||||
inventory="test_inv",
|
||||
project="test_project",
|
||||
credential="test_cred",
|
||||
)
|
||||
workflow_job_template_factory("test")
|
||||
models.Team(organization=objs.organization).save()
|
||||
models.Host(inventory=jt.inventory).save()
|
||||
models.Schedule(
|
||||
rrule='DTSTART;TZID=America/New_York:20300504T150000',
|
||||
unified_job_template=jt.job_template
|
||||
rrule="DTSTART;TZID=America/New_York:20300504T150000",
|
||||
unified_job_template=jt.job_template,
|
||||
).save()
|
||||
models.CustomInventoryScript(organization=objs.organization).save()
|
||||
|
||||
counts = collectors.counts(None)
|
||||
for key in ('organization', 'team', 'user', 'inventory', 'credential',
|
||||
'project', 'job_template', 'workflow_job_template', 'host',
|
||||
'schedule', 'custom_inventory_script'):
|
||||
for key in (
|
||||
"organization",
|
||||
"team",
|
||||
"user",
|
||||
"inventory",
|
||||
"credential",
|
||||
"project",
|
||||
"job_template",
|
||||
"workflow_job_template",
|
||||
"host",
|
||||
"schedule",
|
||||
"custom_inventory_script",
|
||||
):
|
||||
assert counts[key] == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_inventory_counts(organization_factory, inventory_factory):
|
||||
(inv1, inv2, inv3) = [inventory_factory(f"inv-{i}") for i in range(3)]
|
||||
|
||||
s1 = inv1.inventory_sources.create(name="src1", source="ec2")
|
||||
s2 = inv1.inventory_sources.create(name="src2", source="file")
|
||||
s3 = inv1.inventory_sources.create(name="src3", source="gce")
|
||||
|
||||
s1.hosts.create(name="host1", inventory=inv1)
|
||||
s1.hosts.create(name="host2", inventory=inv1)
|
||||
s1.hosts.create(name="host3", inventory=inv1)
|
||||
|
||||
s2.hosts.create(name="host4", inventory=inv1)
|
||||
s2.hosts.create(name="host5", inventory=inv1)
|
||||
|
||||
s3.hosts.create(name="host6", inventory=inv1)
|
||||
|
||||
s1 = inv2.inventory_sources.create(name="src1", source="ec2")
|
||||
|
||||
s1.hosts.create(name="host1", inventory=inv2)
|
||||
s1.hosts.create(name="host2", inventory=inv2)
|
||||
s1.hosts.create(name="host3", inventory=inv2)
|
||||
|
||||
inv_counts = collectors.inventory_counts(None)
|
||||
|
||||
assert {
|
||||
inv1.id: {
|
||||
"name": "inv-0",
|
||||
"kind": "",
|
||||
"hosts": 6,
|
||||
"sources": 3,
|
||||
"source_list": [
|
||||
{"name": "src1", "source": "ec2", "num_hosts": 3},
|
||||
{"name": "src2", "source": "file", "num_hosts": 2},
|
||||
{"name": "src3", "source": "gce", "num_hosts": 1},
|
||||
],
|
||||
},
|
||||
inv2.id: {
|
||||
"name": "inv-1",
|
||||
"kind": "",
|
||||
"hosts": 3,
|
||||
"sources": 1,
|
||||
"source_list": [{"name": "src1", "source": "ec2", "num_hosts": 3}],
|
||||
},
|
||||
inv3.id: {
|
||||
"name": "inv-2",
|
||||
"kind": "",
|
||||
"hosts": 0,
|
||||
"sources": 0,
|
||||
"source_list": [],
|
||||
},
|
||||
} == inv_counts
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.middleware import ActivityStreamMiddleware
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.access import ActivityStreamAccess
|
||||
from awx.conf.models import Setting
|
||||
@@ -61,28 +60,6 @@ def test_ctint_activity_stream(monkeypatch, get, user, settings):
|
||||
assert response.data['summary_fields']['setting'][0]['name'] == 'FOO'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_middleware_actor_added(monkeypatch, post, get, user, settings):
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
u = user('admin-poster', True)
|
||||
|
||||
url = reverse('api:organization_list')
|
||||
response = post(url,
|
||||
dict(name='test-org', description='test-desc'),
|
||||
u,
|
||||
middleware=ActivityStreamMiddleware())
|
||||
assert response.status_code == 201
|
||||
|
||||
org_id = response.data['id']
|
||||
activity_stream = ActivityStream.objects.filter(organization__pk=org_id).first()
|
||||
|
||||
url = reverse('api:activity_stream_detail', kwargs={'pk': activity_stream.pk})
|
||||
response = get(url, u)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.data['summary_fields']['actor']['username'] == 'admin-poster'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin, settings):
|
||||
settings.ACTIVITY_STREAM_ENABLED = True
|
||||
|
||||
@@ -972,7 +972,7 @@ def test_field_removal(put, organization, admin, credentialtype_ssh):
|
||||
['insights_inventories', Inventory()],
|
||||
['unifiedjobs', Job()],
|
||||
['unifiedjobtemplates', JobTemplate()],
|
||||
['unifiedjobtemplates', InventorySource()],
|
||||
['unifiedjobtemplates', InventorySource(source='ec2')],
|
||||
['projects', Project()],
|
||||
['workflowjobnodes', WorkflowJobNode()],
|
||||
])
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from django.db import connection
|
||||
from django.test.utils import override_settings
|
||||
@@ -326,6 +328,38 @@ def test_refresh_accesstoken(oauth_application, post, get, delete, admin):
|
||||
assert original_refresh_token.revoked # is not None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_refresh_token_expiration_is_respected(oauth_application, post, get, delete, admin):
|
||||
response = post(
|
||||
reverse('api:o_auth2_application_token_list', kwargs={'pk': oauth_application.pk}),
|
||||
{'scope': 'read'}, admin, expect=201
|
||||
)
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
refresh_token = RefreshToken.objects.get(token=response.data['refresh_token'])
|
||||
refresh_url = drf_reverse('api:oauth_authorization_root_view') + 'token/'
|
||||
short_lived = {
|
||||
'ACCESS_TOKEN_EXPIRE_SECONDS': 1,
|
||||
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 1,
|
||||
'REFRESH_TOKEN_EXPIRE_SECONDS': 1
|
||||
}
|
||||
time.sleep(1)
|
||||
with override_settings(OAUTH2_PROVIDER=short_lived):
|
||||
response = post(
|
||||
refresh_url,
|
||||
data='grant_type=refresh_token&refresh_token=' + refresh_token.token,
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
HTTP_AUTHORIZATION='Basic ' + smart_str(base64.b64encode(smart_bytes(':'.join([
|
||||
oauth_application.client_id, oauth_application.client_secret
|
||||
]))))
|
||||
)
|
||||
assert response.status_code == 403
|
||||
assert b'The refresh token has expired.' in response.content
|
||||
assert RefreshToken.objects.filter(token=refresh_token).exists()
|
||||
assert AccessToken.objects.count() == 1
|
||||
assert RefreshToken.objects.count() == 1
|
||||
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_revoke_access_then_refreshtoken(oauth_application, post, get, delete, admin):
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import datetime
|
||||
import pytest
|
||||
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import JobTemplate, Schedule
|
||||
@@ -140,7 +142,6 @@ def test_encrypted_survey_answer(post, patch, admin_user, project, inventory, su
|
||||
("DTSTART:20030925T104941Z RRULE:FREQ=DAILY;INTERVAL=10;COUNT=500;UNTIL=20040925T104941Z", "RRULE may not contain both COUNT and UNTIL"), # noqa
|
||||
("DTSTART;TZID=America/New_York:20300308T050000Z RRULE:FREQ=DAILY;INTERVAL=1", "rrule parsing failed validation"),
|
||||
("DTSTART:20300308T050000 RRULE:FREQ=DAILY;INTERVAL=1", "DTSTART cannot be a naive datetime"),
|
||||
("DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1", "more than 1000 events are not allowed"), # noqa
|
||||
])
|
||||
def test_invalid_rrules(post, admin_user, project, inventory, rrule, error):
|
||||
job_template = JobTemplate.objects.create(
|
||||
@@ -342,6 +343,40 @@ def test_months_with_31_days(post, admin_user):
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.timeout(3)
|
||||
@pytest.mark.parametrize('freq, delta, total_seconds', (
|
||||
('MINUTELY', 1, 60),
|
||||
('MINUTELY', 15, 15 * 60),
|
||||
('HOURLY', 1, 3600),
|
||||
('HOURLY', 4, 3600 * 4),
|
||||
))
|
||||
def test_really_old_dtstart(post, admin_user, freq, delta, total_seconds):
|
||||
url = reverse('api:schedule_rrule')
|
||||
# every <interval>, at the :30 second mark
|
||||
rrule = f'DTSTART;TZID=America/New_York:20051231T000030 RRULE:FREQ={freq};INTERVAL={delta}'
|
||||
start = now()
|
||||
next_ten = post(url, {'rrule': rrule}, admin_user, expect=200).data['utc']
|
||||
|
||||
assert len(next_ten) == 10
|
||||
|
||||
# the first date is *in the future*
|
||||
assert next_ten[0] >= start
|
||||
|
||||
# ...but *no more than* <interval> into the future
|
||||
assert now() + datetime.timedelta(**{
|
||||
'minutes' if freq == 'MINUTELY' else 'hours': delta
|
||||
})
|
||||
|
||||
# every date in the list is <interval> greater than the last
|
||||
for i, x in enumerate(next_ten):
|
||||
if i == 0:
|
||||
continue
|
||||
assert x.second == 30
|
||||
delta = (x - next_ten[i - 1])
|
||||
assert delta.total_seconds() == total_seconds
|
||||
|
||||
|
||||
def test_dst_rollback_duplicates(post, admin_user):
|
||||
# From Nov 2 -> Nov 3, 2030, daylight savings ends and we "roll back" an hour.
|
||||
# Make sure we don't "double count" duplicate times in the "rolled back"
|
||||
|
||||
@@ -5,17 +5,13 @@
|
||||
# Python
|
||||
import pytest
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
# Mock
|
||||
from unittest import mock
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.conf.models import Setting
|
||||
from awx.main.utils.handlers import AWXProxyHandler, LoggingConnectivityException
|
||||
from awx.conf.registry import settings_registry
|
||||
|
||||
|
||||
TEST_GIF_LOGO = 'data:image/gif;base64,R0lGODlhIQAjAPIAAP//////AP8AAMzMAJmZADNmAAAAAAAAACH/C05FVFNDQVBFMi4wAwEAAAAh+QQJCgAHACwAAAAAIQAjAAADo3i63P4wykmrvTjrzZsxXfR94WMQBFh6RECuixHMLyzPQ13ewZCvow9OpzEAjIBj79cJJmU+FceIVEZ3QRozxBttmyOBwPBtisdX4Bha3oxmS+llFIPHQXQKkiSEXz9PeklHBzx3hYNyEHt4fmmAhHp8Nz45KgV5FgWFOFEGmwWbGqEfniChohmoQZ+oqRiZDZhEgk81I4mwg4EKVbxzrDHBEAkAIfkECQoABwAsAAAAACEAIwAAA6V4utz+MMpJq724GpP15p1kEAQYQmOwnWjgrmxjuMEAx8rsDjZ+fJvdLWQAFAHGWo8FRM54JqIRmYTigDrDMqZTbbbMj0CgjTLHZKvPQH6CTx+a2vKR0XbbOsoZ7SphG057gjl+c0dGgzeGNiaBiSgbBQUHBV08NpOVlkMSk0FKjZuURHiiOJxQnSGfQJuoEKREejK0dFRGjoiQt7iOuLx0rgxYEQkAIfkECQoABwAsAAAAACEAIwAAA7h4utxnxslJDSGR6nrz/owxYB64QUEwlGaVqlB7vrAJscsd3Lhy+wBArGEICo3DUFH4QDqK0GMy51xOgcGlEAfJ+iAFie62chR+jYKaSAuQGOqwJp7jGQRDuol+F/jxZWsyCmoQfwYwgoM5Oyg1i2w0A2WQIW2TPYOIkleQmy+UlYygoaIPnJmapKmqKiusMmSdpjxypnALtrcHioq3ury7hGm3dnVosVpMWFmwREZbddDOSsjVswcJACH5BAkKAAcALAAAAAAhACMAAAOxeLrc/jDKSZUxNS9DCNYV54HURQwfGRlDEFwqdLVuGjOsW9/Odb0wnsUAKBKNwsMFQGwyNUHckVl8bqI4o43lA26PNkv1S9DtNuOeVirw+aTI3qWAQwnud1vhLSnQLS0GeFF+GoVKNF0fh4Z+LDQ6Bn5/MTNmL0mAl2E3j2aclTmRmYCQoKEDiaRDKFhJez6UmbKyQowHtzy1uEl8DLCnEktrQ2PBD1NxSlXKIW5hz6cJACH5BAkKAAcALAAAAAAhACMAAAOkeLrc/jDKSau9OOvNlTFd9H3hYxAEWDJfkK5LGwTq+g0zDR/GgM+10A04Cm56OANgqTRmkDTmSOiLMgFOTM9AnFJHuexzYBAIijZf2SweJ8ttbbXLmd5+wBiJosSCoGF/fXEeS1g8gHl9hxODKkh4gkwVIwUekESIhA4FlgV3PyCWG52WI2oGnR2lnUWpqhqVEF4Xi7QjhpsshpOFvLosrnpoEAkAIfkECQoABwAsAAAAACEAIwAAA6l4utz+MMpJq71YGpPr3t1kEAQXQltQnk8aBCa7bMMLy4wx1G8s072PL6SrGQDI4zBThCU/v50zCVhidIYgNPqxWZkDg0AgxB2K4vEXbBSvr1JtZ3uOext0x7FqovF6OXtfe1UzdjAxhINPM013ChtJER8FBQeVRX8GlpggFZWWfjwblTiigGZnfqRmpUKbljKxDrNMeY2eF4R8jUiSur6/Z8GFV2WBtwwJACH5BAkKAAcALAAAAAAhACMAAAO6eLrcZi3KyQwhkGpq8f6ONWQgaAxB8JTfg6YkO50pzD5xhaurhCsGAKCnEw6NucNDCAkyI8ugdAhFKpnJJdMaeiofBejowUseCr9GYa0j1GyMdVgjBxoEuPSZXWKf7gKBeHtzMms0gHgGfDIVLztmjScvNZEyk28qjT40b5aXlHCbDgOhnzedoqOOlKeopaqrCy56sgtotbYKhYW6e7e9tsHBssO6eSTIm1peV0iuFUZDyU7NJnmcuQsJACH5BAkKAAcALAAAAAAhACMAAAOteLrc/jDKSZsxNS9DCNYV54Hh4H0kdAXBgKaOwbYX/Miza1vrVe8KA2AoJL5gwiQgeZz4GMXlcHl8xozQ3kW3KTajL9zsBJ1+sV2fQfALem+XAlRApxu4ioI1UpC76zJ4fRqDBzI+LFyFhH1iiS59fkgziW07jjRAG5QDeECOLk2Tj6KjnZafW6hAej6Smgevr6yysza2tiCuMasUF2Yov2gZUUQbU8YaaqjLpQkAOw==' # NOQA
|
||||
@@ -237,73 +233,95 @@ def test_ui_settings(get, put, patch, delete, admin):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_requires_superuser(get, post, alice):
|
||||
def test_logging_aggregator_connection_test_requires_superuser(post, alice):
|
||||
url = reverse('api:setting_logging_test')
|
||||
post(url, {}, user=alice, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('key', [
|
||||
'LOG_AGGREGATOR_TYPE',
|
||||
'LOG_AGGREGATOR_HOST',
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregator_connection_test_not_enabled(post, admin):
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {}, user=admin, expect=409)
|
||||
assert 'Logging not enabled' in resp.data.get('error')
|
||||
|
||||
|
||||
def _mock_logging_defaults():
|
||||
# Pre-populate settings obj with defaults
|
||||
class MockSettings:
|
||||
pass
|
||||
mock_settings_obj = MockSettings()
|
||||
mock_settings_json = dict()
|
||||
for key in settings_registry.get_registered_settings(category_slug='logging'):
|
||||
value = settings_registry.get_setting_field(key).get_default()
|
||||
setattr(mock_settings_obj, key, value)
|
||||
mock_settings_json[key] = value
|
||||
setattr(mock_settings_obj, 'MAX_EVENT_RES_DATA', 700000)
|
||||
return mock_settings_obj, mock_settings_json
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize('key, value, error', [
|
||||
['LOG_AGGREGATOR_TYPE', 'logstash', 'Cannot enable log aggregator without providing host.'],
|
||||
['LOG_AGGREGATOR_HOST', 'https://logstash', 'Cannot enable log aggregator without providing type.']
|
||||
])
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_bad_request(get, post, admin, key):
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {}, user=admin, expect=400)
|
||||
assert 'This field is required.' in resp.data.get(key, [])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_valid(mocker, get, post, admin):
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
url = reverse('api:setting_logging_test')
|
||||
user_data = {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080,
|
||||
'LOG_AGGREGATOR_USERNAME': 'logger',
|
||||
'LOG_AGGREGATOR_PASSWORD': 'mcstash'
|
||||
}
|
||||
post(url, user_data, user=admin, expect=200)
|
||||
args, kwargs = perform_test.call_args_list[0]
|
||||
create_settings = kwargs['custom_settings']
|
||||
for k, v in user_data.items():
|
||||
assert hasattr(create_settings, k)
|
||||
assert getattr(create_settings, k) == v
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_with_masked_password(mocker, patch, post, admin):
|
||||
def test_logging_aggregator_missing_settings(put, post, admin, key, value, error):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings[key] = value
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
patch(url, user=admin, data={'LOG_AGGREGATOR_PASSWORD': 'password123'}, expect=200)
|
||||
time.sleep(1) # log settings are cached slightly
|
||||
response = put(url, data=mock_settings, user=admin, expect=400)
|
||||
assert error in str(response.data)
|
||||
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
url = reverse('api:setting_logging_test')
|
||||
user_data = {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080,
|
||||
'LOG_AGGREGATOR_USERNAME': 'logger',
|
||||
'LOG_AGGREGATOR_PASSWORD': '$encrypted$'
|
||||
}
|
||||
post(url, user_data, user=admin, expect=200)
|
||||
args, kwargs = perform_test.call_args_list[0]
|
||||
create_settings = kwargs['custom_settings']
|
||||
assert getattr(create_settings, 'LOG_AGGREGATOR_PASSWORD') == 'password123'
|
||||
|
||||
@pytest.mark.parametrize('type, host, port, username, password', [
|
||||
['logstash', 'localhost', 8080, 'logger', 'mcstash'],
|
||||
['loggly', 'http://logs-01.loggly.com/inputs/1fd38090-hash-h4a$h-8d80-t0k3n71/tag/http/', None, None, None],
|
||||
['splunk', 'https://yoursplunk:8088/services/collector/event', None, None, None],
|
||||
['other', '97.221.40.41', 9000, 'logger', 'mcstash'],
|
||||
['sumologic', 'https://endpoint5.collection.us2.sumologic.com/receiver/v1/http/Zagnw_f9XGr_zZgd-_EPM0hb8_rUU7_RU8Q==',
|
||||
None, None, None]
|
||||
])
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregator_valid_settings(put, post, admin, type, host, port, username, password):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
# type = 'splunk'
|
||||
# host = 'https://yoursplunk:8088/services/collector/event'
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings['LOG_AGGREGATOR_TYPE'] = type
|
||||
mock_settings['LOG_AGGREGATOR_HOST'] = host
|
||||
if port:
|
||||
mock_settings['LOG_AGGREGATOR_PORT'] = port
|
||||
if username:
|
||||
mock_settings['LOG_AGGREGATOR_USERNAME'] = username
|
||||
if password:
|
||||
mock_settings['LOG_AGGREGATOR_PASSWORD'] = password
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
response = put(url, data=mock_settings, user=admin, expect=200)
|
||||
assert type in response.data.get('LOG_AGGREGATOR_TYPE')
|
||||
assert host in response.data.get('LOG_AGGREGATOR_HOST')
|
||||
if port:
|
||||
assert port == response.data.get('LOG_AGGREGATOR_PORT')
|
||||
if username:
|
||||
assert username in response.data.get('LOG_AGGREGATOR_USERNAME')
|
||||
if password: # Note: password should be encrypted
|
||||
assert '$encrypted$' in response.data.get('LOG_AGGREGATOR_PASSWORD')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_logging_aggregrator_connection_test_invalid(mocker, get, post, admin):
|
||||
with mock.patch.object(AWXProxyHandler, 'perform_test') as perform_test:
|
||||
perform_test.side_effect = LoggingConnectivityException('404: Not Found')
|
||||
url = reverse('api:setting_logging_test')
|
||||
resp = post(url, {
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_HOST': 'localhost',
|
||||
'LOG_AGGREGATOR_PORT': 8080
|
||||
}, user=admin, expect=500)
|
||||
assert resp.data == {'error': '404: Not Found'}
|
||||
def test_logging_aggregator_connection_test_valid(put, post, admin):
|
||||
_, mock_settings = _mock_logging_defaults()
|
||||
type = 'other'
|
||||
host = 'https://localhost'
|
||||
mock_settings['LOG_AGGREGATOR_ENABLED'] = True
|
||||
mock_settings['LOG_AGGREGATOR_TYPE'] = type
|
||||
mock_settings['LOG_AGGREGATOR_HOST'] = host
|
||||
# POST to save these mock settings
|
||||
url = reverse('api:setting_singleton_detail', kwargs={'category_slug': 'logging'})
|
||||
put(url, data=mock_settings, user=admin, expect=200)
|
||||
# "Test" the logger
|
||||
url = reverse('api:setting_logging_test')
|
||||
post(url, {}, user=admin, expect=202)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -23,9 +23,9 @@ def _mk_project_update():
|
||||
|
||||
|
||||
def _mk_inventory_update():
|
||||
source = InventorySource()
|
||||
source = InventorySource(source='ec2')
|
||||
source.save()
|
||||
iu = InventoryUpdate(inventory_source=source)
|
||||
iu = InventoryUpdate(inventory_source=source, source='e2')
|
||||
return iu
|
||||
|
||||
|
||||
|
||||
@@ -123,7 +123,11 @@ def test_delete_project_update_in_active_state(project, delete, admin, status):
|
||||
@pytest.mark.parametrize("status", list(TEST_STATES))
|
||||
@pytest.mark.django_db
|
||||
def test_delete_inventory_update_in_active_state(inventory_source, delete, admin, status):
|
||||
i = InventoryUpdate.objects.create(inventory_source=inventory_source, status=status)
|
||||
i = InventoryUpdate.objects.create(
|
||||
inventory_source=inventory_source,
|
||||
status=status,
|
||||
source=inventory_source.source
|
||||
)
|
||||
url = reverse('api:inventory_update_detail', kwargs={'pk': i.pk})
|
||||
delete(url, None, admin, expect=403)
|
||||
|
||||
|
||||
@@ -228,7 +228,7 @@ class TestINIImports:
|
||||
assert inventory.hosts.count() == 1 # baseline worked
|
||||
|
||||
inv_src2 = inventory.inventory_sources.create(
|
||||
name='bar', overwrite=True
|
||||
name='bar', overwrite=True, source='ec2'
|
||||
)
|
||||
os.environ['INVENTORY_SOURCE_ID'] = str(inv_src2.pk)
|
||||
os.environ['INVENTORY_UPDATE_ID'] = str(inv_src2.create_unified_job().pk)
|
||||
|
||||
@@ -568,7 +568,10 @@ def inventory_source_factory(inventory_factory):
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_update(inventory_source):
|
||||
return InventoryUpdate.objects.create(inventory_source=inventory_source)
|
||||
return InventoryUpdate.objects.create(
|
||||
inventory_source=inventory_source,
|
||||
source=inventory_source.source
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -197,9 +197,10 @@ class TestRelatedJobs:
|
||||
assert job.id in [jerb.id for jerb in group._get_related_jobs()]
|
||||
|
||||
def test_related_group_update(self, group):
|
||||
src = group.inventory_sources.create(name='foo')
|
||||
src = group.inventory_sources.create(name='foo', source='ec2')
|
||||
job = InventoryUpdate.objects.create(
|
||||
inventory_source=src
|
||||
inventory_source=src,
|
||||
source=src.source
|
||||
)
|
||||
assert job.id in [jerb.id for jerb in group._get_related_jobs()]
|
||||
|
||||
|
||||
@@ -109,6 +109,7 @@ class TestJobNotificationMixin(object):
|
||||
kwargs = {}
|
||||
if JobClass is InventoryUpdate:
|
||||
kwargs['inventory_source'] = inventory_source
|
||||
kwargs['source'] = inventory_source.source
|
||||
elif JobClass is ProjectUpdate:
|
||||
kwargs['project'] = project
|
||||
|
||||
|
||||
@@ -325,16 +325,19 @@ def test_dst_phantom_hour(job_template):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.timeout(3)
|
||||
def test_beginning_of_time(job_template):
|
||||
# ensure that really large generators don't have performance issues
|
||||
start = now()
|
||||
rrule = 'DTSTART:19700101T000000Z RRULE:FREQ=MINUTELY;INTERVAL=1'
|
||||
s = Schedule(
|
||||
name='Some Schedule',
|
||||
rrule=rrule,
|
||||
unified_job_template=job_template
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
s.save()
|
||||
s.save()
|
||||
assert s.next_run > start
|
||||
assert (s.next_run - start).total_seconds() < 60
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -297,7 +297,10 @@ class TestInstanceGroupOrdering:
|
||||
assert ad_hoc.preferred_instance_groups == [ig_inv, ig_org]
|
||||
|
||||
def test_inventory_update_instance_groups(self, instance_group_factory, inventory_source, default_instance_group):
|
||||
iu = InventoryUpdate.objects.create(inventory_source=inventory_source)
|
||||
iu = InventoryUpdate.objects.create(
|
||||
inventory_source=inventory_source,
|
||||
source=inventory_source.source
|
||||
)
|
||||
assert iu.preferred_instance_groups == [default_instance_group]
|
||||
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
|
||||
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
|
||||
|
||||
@@ -16,7 +16,7 @@ DATA = os.path.join(os.path.dirname(data.__file__), 'inventory')
|
||||
|
||||
TEST_SOURCE_FIELDS = {
|
||||
'vmware': {
|
||||
'instance_filters': 'foobaa',
|
||||
'instance_filters': '{{ config.name == "only_my_server" }},{{ somevar == "bar"}}',
|
||||
'group_by': 'fouo'
|
||||
},
|
||||
'ec2': {
|
||||
@@ -38,7 +38,10 @@ TEST_SOURCE_FIELDS = {
|
||||
|
||||
INI_TEST_VARS = {
|
||||
'ec2': {
|
||||
'boto_profile': '/tmp/my_boto_stuff'
|
||||
'boto_profile': '/tmp/my_boto_stuff',
|
||||
'iam_role_arn': 'arn:aws:iam::123456789012:role/test-role',
|
||||
'hostname_variable': 'public_dns_name',
|
||||
'destination_variable': 'public_dns_name'
|
||||
},
|
||||
'gce': {},
|
||||
'openstack': {
|
||||
@@ -50,6 +53,9 @@ INI_TEST_VARS = {
|
||||
'rhv': {}, # there are none
|
||||
'tower': {}, # there are none
|
||||
'vmware': {
|
||||
'alias_pattern': "{{ config.foo }}",
|
||||
'host_filters': '{{ config.zoo == "DC0_H0_VM0" }}',
|
||||
'groupby_patterns': "{{ config.asdf }}",
|
||||
# setting VMWARE_VALIDATE_CERTS is duplicated with env var
|
||||
},
|
||||
'azure_rm': {
|
||||
@@ -315,9 +321,10 @@ def test_inventory_update_injected_content(this_kind, script_or_plugin, inventor
|
||||
with mock.patch('awx.main.models.inventory.PluginFileInjector.should_use_plugin', return_value=use_plugin):
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
with mock.patch.object(task, 'get_ansible_version', return_value='2.13'):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
|
||||
@@ -186,7 +186,11 @@ def test_group(get, admin_user):
|
||||
def test_inventory_source(get, admin_user):
|
||||
test_org = Organization.objects.create(name='test_org')
|
||||
test_inv = Inventory.objects.create(name='test_inv', organization=test_org)
|
||||
test_source = InventorySource.objects.create(name='test_source', inventory=test_inv)
|
||||
test_source = InventorySource.objects.create(
|
||||
name='test_source',
|
||||
inventory=test_inv,
|
||||
source='ec2'
|
||||
)
|
||||
url = reverse('api:inventory_source_detail', kwargs={'pk': test_source.pk})
|
||||
response = get(url, user=admin_user, expect=200)
|
||||
assert response.data['related']['named_url'].endswith('/test_source++test_inv++test_org/')
|
||||
|
||||
@@ -90,7 +90,7 @@ def test_inherited_notification_templates(get, post, user, organization, project
|
||||
notification_templates.append(response.data['id'])
|
||||
i = Inventory.objects.create(name='test', organization=organization)
|
||||
i.save()
|
||||
isrc = InventorySource.objects.create(name='test', inventory=i)
|
||||
isrc = InventorySource.objects.create(name='test', inventory=i, source='ec2')
|
||||
isrc.save()
|
||||
jt = JobTemplate.objects.create(name='test', inventory=i, project=project, playbook='debug.yml')
|
||||
jt.save()
|
||||
|
||||
@@ -65,14 +65,29 @@ def test_job_template_access_read_level(jt_linked, rando):
|
||||
assert not access.can_unattach(jt_linked, cred, 'credentials', {})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_project_use_access(project, rando):
|
||||
project.use_role.members.add(rando)
|
||||
access = JobTemplateAccess(rando)
|
||||
assert access.can_add(None)
|
||||
assert access.can_add({'project': project.id, 'ask_inventory_on_launch': True})
|
||||
project2 = Project.objects.create(
|
||||
name='second-project', scm_type=project.scm_type, playbook_files=project.playbook_files,
|
||||
organization=project.organization,
|
||||
)
|
||||
project2.use_role.members.add(rando)
|
||||
jt = JobTemplate.objects.create(project=project, ask_inventory_on_launch=True)
|
||||
jt.admin_role.members.add(rando)
|
||||
assert access.can_change(jt, {'project': project2.pk})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_access_use_level(jt_linked, rando):
|
||||
access = JobTemplateAccess(rando)
|
||||
jt_linked.project.use_role.members.add(rando)
|
||||
jt_linked.inventory.use_role.members.add(rando)
|
||||
jt_linked.organization.job_template_admin_role.members.add(rando)
|
||||
jt_linked.admin_role.members.add(rando)
|
||||
proj_pk = jt_linked.project.pk
|
||||
org_pk = jt_linked.organization_id
|
||||
|
||||
assert access.can_change(jt_linked, {'job_type': 'check', 'project': proj_pk})
|
||||
assert access.can_change(jt_linked, {'job_type': 'check', 'inventory': None})
|
||||
@@ -80,8 +95,8 @@ def test_job_template_access_use_level(jt_linked, rando):
|
||||
for cred in jt_linked.credentials.all():
|
||||
assert access.can_unattach(jt_linked, cred, 'credentials', {})
|
||||
|
||||
assert access.can_add(dict(inventory=jt_linked.inventory.pk, project=proj_pk, organization=org_pk))
|
||||
assert access.can_add(dict(project=proj_pk, organization=org_pk))
|
||||
assert access.can_add(dict(inventory=jt_linked.inventory.pk, project=proj_pk))
|
||||
assert access.can_add(dict(project=proj_pk))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -94,17 +109,16 @@ def test_job_template_access_admin(role_names, jt_linked, rando):
|
||||
assert not access.can_read(jt_linked)
|
||||
assert not access.can_delete(jt_linked)
|
||||
|
||||
# Appoint this user as admin of the organization
|
||||
jt_linked.organization.admin_role.members.add(rando)
|
||||
org_pk = jt_linked.organization.id
|
||||
# Appoint this user to the org role
|
||||
organization = jt_linked.organization
|
||||
for role_name in role_names:
|
||||
getattr(organization, role_name).members.add(rando)
|
||||
|
||||
# Assign organization permission in the same way the create view does
|
||||
organization = jt_linked.inventory.organization
|
||||
ssh_cred.admin_role.parents.add(organization.admin_role)
|
||||
|
||||
proj_pk = jt_linked.project.pk
|
||||
assert access.can_add(dict(inventory=jt_linked.inventory.pk, project=proj_pk, organization=org_pk))
|
||||
assert access.can_add(dict(credential=ssh_cred.pk, project=proj_pk, organization=org_pk))
|
||||
assert access.can_add(dict(inventory=jt_linked.inventory.pk, project=proj_pk))
|
||||
|
||||
for cred in jt_linked.credentials.all():
|
||||
assert access.can_unattach(jt_linked, cred, 'credentials', {})
|
||||
@@ -170,12 +184,10 @@ class TestOrphanJobTemplate:
|
||||
@pytest.mark.job_permissions
|
||||
def test_job_template_creator_access(project, organization, rando, post):
|
||||
project.use_role.members.add(rando)
|
||||
organization.job_template_admin_role.members.add(rando)
|
||||
response = post(url=reverse('api:job_template_list'), data=dict(
|
||||
name='newly-created-jt',
|
||||
ask_inventory_on_launch=True,
|
||||
project=project.pk,
|
||||
organization=organization.id,
|
||||
playbook='helloworld.yml'
|
||||
), user=rando, expect=201)
|
||||
|
||||
|
||||
@@ -24,7 +24,11 @@ def test_implied_organization_subquery_inventory():
|
||||
inventory = Inventory.objects.create(name='foo{}'.format(i))
|
||||
else:
|
||||
inventory = Inventory.objects.create(name='foo{}'.format(i), organization=org)
|
||||
inv_src = InventorySource.objects.create(name='foo{}'.format(i), inventory=inventory)
|
||||
inv_src = InventorySource.objects.create(
|
||||
name='foo{}'.format(i),
|
||||
inventory=inventory,
|
||||
source='ec2'
|
||||
)
|
||||
sources = UnifiedJobTemplate.objects.annotate(
|
||||
test_field=rbac.implicit_org_subquery(UnifiedJobTemplate, InventorySource)
|
||||
)
|
||||
|
||||
@@ -60,6 +60,8 @@ def test_org_user_role_attach(user, organization, inventory):
|
||||
'''
|
||||
admin = user('admin')
|
||||
nonmember = user('nonmember')
|
||||
other_org = Organization.objects.create(name="other_org")
|
||||
other_org.member_role.members.add(nonmember)
|
||||
inventory.admin_role.members.add(nonmember)
|
||||
|
||||
organization.admin_role.members.add(admin)
|
||||
@@ -186,13 +188,17 @@ def test_need_all_orgs_to_admin_user(user):
|
||||
|
||||
# Orphaned user can be added to member role, only in special cases
|
||||
@pytest.mark.django_db
|
||||
def test_orphaned_user_allowed(org_admin, rando, organization):
|
||||
def test_orphaned_user_allowed(org_admin, rando, organization, org_credential):
|
||||
'''
|
||||
We still allow adoption of orphaned* users by assigning them to
|
||||
organization member role, but only in the situation where the
|
||||
org admin already posesses indirect access to all of the user's roles
|
||||
*orphaned means user is not a member of any organization
|
||||
'''
|
||||
# give a descendent role to rando, to trigger the conditional
|
||||
# where all ancestor roles of rando should be in the set of
|
||||
# org_admin roles.
|
||||
org_credential.admin_role.members.add(rando)
|
||||
role_access = RoleAccess(org_admin)
|
||||
org_access = OrganizationAccess(org_admin)
|
||||
assert role_access.can_attach(organization.member_role, rando, 'members', None)
|
||||
|
||||
69
awx/main/tests/unit/analytics/test_broadcast_websocket.py
Normal file
69
awx/main/tests/unit/analytics/test_broadcast_websocket.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import datetime
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import FixedSlidingWindow
|
||||
from awx.main.analytics.broadcast_websocket import dt_to_seconds
|
||||
|
||||
|
||||
class TestFixedSlidingWindow():
|
||||
|
||||
def ts(self, **kwargs):
|
||||
e = {
|
||||
'year': 1985,
|
||||
'month': 1,
|
||||
'day': 1,
|
||||
'hour': 1,
|
||||
}
|
||||
return dt_to_seconds(datetime.datetime(**kwargs, **e))
|
||||
|
||||
def test_record_same_minute(self):
|
||||
"""
|
||||
Legend:
|
||||
- = record()
|
||||
^ = render()
|
||||
|---| = 1 minute, 60 seconds
|
||||
|
||||
....................
|
||||
|------------------------------------------------------------|
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
"""
|
||||
|
||||
fsw = FixedSlidingWindow(self.ts(minute=0, second=0, microsecond=0))
|
||||
for i in range(20):
|
||||
fsw.record(self.ts(minute=0, second=i, microsecond=0))
|
||||
assert (i + 1) == fsw.render(self.ts(minute=0, second=i, microsecond=0))
|
||||
|
||||
|
||||
def test_record_same_minute_render_diff_minute(self):
|
||||
"""
|
||||
Legend:
|
||||
- = record()
|
||||
^ = render()
|
||||
|---| = 1 minute, 60 seconds
|
||||
|
||||
....................
|
||||
|------------------------------------------------------------|
|
||||
^^ ^
|
||||
AB C
|
||||
|------------------------------------------------------------|
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
DEEEEEEEEEEEEEEEEEEEF
|
||||
"""
|
||||
|
||||
fsw = FixedSlidingWindow(self.ts(minute=0, second=0, microsecond=0))
|
||||
for i in range(20):
|
||||
fsw.record(self.ts(minute=0, second=i, microsecond=0))
|
||||
|
||||
assert 20 == fsw.render(self.ts(minute=0, second=19, microsecond=0)), \
|
||||
"A. The second of the last record() call"
|
||||
assert 20 == fsw.render(self.ts(minute=0, second=20, microsecond=0)), \
|
||||
"B. The second after the last record() call"
|
||||
assert 20 == fsw.render(self.ts(minute=0, second=59, microsecond=0)), \
|
||||
"C. Last second in the same minute that all record() called in"
|
||||
assert 20 == fsw.render(self.ts(minute=1, second=0, microsecond=0)), \
|
||||
"D. First second of the minute following the minute that all record() calls in"
|
||||
for i in range(20):
|
||||
assert 20 - i == fsw.render(self.ts(minute=1, second=i, microsecond=0)), \
|
||||
"E. Sliding window where 1 record() should drop from the results each time"
|
||||
|
||||
assert 0 == fsw.render(self.ts(minute=1, second=20, microsecond=0)), \
|
||||
"F. First second one minute after all record() calls"
|
||||
174
awx/main/tests/unit/api/test_logger.py
Normal file
174
awx/main/tests/unit/api/test_logger.py
Normal file
@@ -0,0 +1,174 @@
|
||||
import pytest
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.utils.external_logging import construct_rsyslog_conf_template
|
||||
from awx.main.tests.functional.api.test_settings import _mock_logging_defaults
|
||||
|
||||
'''
|
||||
# Example User Data
|
||||
data_logstash = {
|
||||
"LOG_AGGREGATOR_TYPE": "logstash",
|
||||
"LOG_AGGREGATOR_HOST": "localhost",
|
||||
"LOG_AGGREGATOR_PORT": 8080,
|
||||
"LOG_AGGREGATOR_PROTOCOL": "tcp",
|
||||
"LOG_AGGREGATOR_USERNAME": "logger",
|
||||
"LOG_AGGREGATOR_PASSWORD": "mcstash"
|
||||
}
|
||||
|
||||
data_netcat = {
|
||||
"LOG_AGGREGATOR_TYPE": "other",
|
||||
"LOG_AGGREGATOR_HOST": "localhost",
|
||||
"LOG_AGGREGATOR_PORT": 9000,
|
||||
"LOG_AGGREGATOR_PROTOCOL": "udp",
|
||||
}
|
||||
|
||||
data_loggly = {
|
||||
"LOG_AGGREGATOR_TYPE": "loggly",
|
||||
"LOG_AGGREGATOR_HOST": "http://logs-01.loggly.com/inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/",
|
||||
"LOG_AGGREGATOR_PORT": 8080,
|
||||
"LOG_AGGREGATOR_PROTOCOL": "https"
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
# Test reconfigure logging settings function
|
||||
# name this whatever you want
|
||||
@pytest.mark.parametrize(
|
||||
'enabled, type, host, port, protocol, expected_config', [
|
||||
(
|
||||
True,
|
||||
'loggly',
|
||||
'http://logs-01.loggly.com/inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/',
|
||||
None,
|
||||
'https',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # localhost w/ custom UDP port
|
||||
'other',
|
||||
'localhost',
|
||||
9000,
|
||||
'udp',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # localhost w/ custom TCP port
|
||||
'other',
|
||||
'localhost',
|
||||
9000,
|
||||
'tcp',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # https, default port 443
|
||||
'splunk',
|
||||
'https://yoursplunk/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # http, default port 80
|
||||
'splunk',
|
||||
'http://yoursplunk/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # https, custom port in URL string
|
||||
'splunk',
|
||||
'https://yoursplunk:8088/services/collector/event',
|
||||
None,
|
||||
None,
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # https, custom port explicitly specified
|
||||
'splunk',
|
||||
'https://yoursplunk/services/collector/event',
|
||||
8088,
|
||||
None,
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # no scheme specified in URL, default to https, respect custom port
|
||||
'splunk',
|
||||
'yoursplunk.org/services/collector/event',
|
||||
8088,
|
||||
'https',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # respect custom http-only port
|
||||
'splunk',
|
||||
'http://yoursplunk.org/services/collector/event',
|
||||
8088,
|
||||
None,
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_rsyslog_conf_template(enabled, type, host, port, protocol, expected_config):
|
||||
|
||||
mock_settings, _ = _mock_logging_defaults()
|
||||
|
||||
# Set test settings
|
||||
logging_defaults = getattr(settings, 'LOGGING')
|
||||
setattr(mock_settings, 'LOGGING', logging_defaults)
|
||||
setattr(mock_settings, 'LOGGING["handlers"]["external_logger"]["address"]', '/var/run/awx-rsyslog/rsyslog.sock')
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_ENABLED', enabled)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_TYPE', type)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_HOST', host)
|
||||
if port:
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_PORT', port)
|
||||
if protocol:
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_PROTOCOL', protocol)
|
||||
|
||||
# create rsyslog conf template
|
||||
tmpl = construct_rsyslog_conf_template(mock_settings)
|
||||
|
||||
# check validity of created template
|
||||
assert expected_config in tmpl
|
||||
|
||||
|
||||
def test_splunk_auth():
|
||||
mock_settings, _ = _mock_logging_defaults()
|
||||
# Set test settings
|
||||
logging_defaults = getattr(settings, 'LOGGING')
|
||||
setattr(mock_settings, 'LOGGING', logging_defaults)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_ENABLED', True)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_TYPE', 'splunk')
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_HOST', 'example.org')
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_PASSWORD', 'SECRET-TOKEN')
|
||||
|
||||
tmpl = construct_rsyslog_conf_template(mock_settings)
|
||||
assert 'httpheaderkey="Authorization" httpheadervalue="Splunk SECRET-TOKEN"' in tmpl
|
||||
@@ -18,7 +18,7 @@ def test_send_messages():
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'https://example.com/api/annotations',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer testapikey'},
|
||||
json={'tags': [], 'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None, 'time': 60000, 'dashboardId': None},
|
||||
json={'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None, 'time': 60000, 'dashboardId': None},
|
||||
verify=True)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -36,7 +36,7 @@ def test_send_messages_with_no_verify_ssl():
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'https://example.com/api/annotations',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer testapikey'},
|
||||
json={'tags': [], 'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None,'time': 60000, 'dashboardId': None},
|
||||
json={'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None,'time': 60000, 'dashboardId': None},
|
||||
verify=False)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -54,7 +54,7 @@ def test_send_messages_with_dashboardid():
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'https://example.com/api/annotations',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer testapikey'},
|
||||
json={'tags': [], 'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None, 'time': 60000, 'dashboardId': 42},
|
||||
json={'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': None, 'time': 60000, 'dashboardId': 42},
|
||||
verify=True)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -72,7 +72,7 @@ def test_send_messages_with_panelid():
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'https://example.com/api/annotations',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer testapikey'},
|
||||
json={'tags': [], 'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': 42, 'time': 60000, 'dashboardId': None},
|
||||
json={'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': 42, 'time': 60000, 'dashboardId': None},
|
||||
verify=True)
|
||||
assert sent_messages == 1
|
||||
|
||||
@@ -90,7 +90,7 @@ def test_send_messages_with_bothids():
|
||||
requests_mock.post.assert_called_once_with(
|
||||
'https://example.com/api/annotations',
|
||||
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer testapikey'},
|
||||
json={'tags': [], 'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': 42, 'time': 60000, 'dashboardId': 42},
|
||||
json={'text': 'test subject', 'isRegion': True, 'timeEnd': 120000, 'panelId': 42, 'time': 60000, 'dashboardId': 42},
|
||||
verify=True)
|
||||
assert sent_messages == 1
|
||||
|
||||
|
||||
@@ -183,6 +183,51 @@ def test_openstack_client_config_generation(mocker, source, expected, private_da
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("source,expected", [
|
||||
(None, True), (False, False), (True, True)
|
||||
])
|
||||
def test_openstack_client_config_generation_with_project_domain_name(mocker, source, expected, private_data_dir):
|
||||
update = tasks.RunInventoryUpdate()
|
||||
credential_type = CredentialType.defaults['openstack']()
|
||||
inputs = {
|
||||
'host': 'https://keystone.openstack.example.org',
|
||||
'username': 'demo',
|
||||
'password': 'secrete',
|
||||
'project': 'demo-project',
|
||||
'domain': 'my-demo-domain',
|
||||
'project_domain_name': 'project-domain',
|
||||
}
|
||||
if source is not None:
|
||||
inputs['verify_ssl'] = source
|
||||
credential = Credential(pk=1, credential_type=credential_type, inputs=inputs)
|
||||
|
||||
inventory_update = mocker.Mock(**{
|
||||
'source': 'openstack',
|
||||
'source_vars_dict': {},
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/venv/foo'
|
||||
})
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
cloud_credential = yaml.safe_load(
|
||||
cloud_config.get('credentials')[credential]
|
||||
)
|
||||
assert cloud_credential['clouds'] == {
|
||||
'devstack': {
|
||||
'auth': {
|
||||
'auth_url': 'https://keystone.openstack.example.org',
|
||||
'password': 'secrete',
|
||||
'project_name': 'demo-project',
|
||||
'username': 'demo',
|
||||
'domain_name': 'my-demo-domain',
|
||||
'project_domain_name': 'project-domain',
|
||||
},
|
||||
'verify': expected,
|
||||
'private': True,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("source,expected", [
|
||||
(False, False), (True, True)
|
||||
])
|
||||
@@ -1807,8 +1852,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
assert 'AWS_ACCESS_KEY_ID' not in env
|
||||
assert 'AWS_SECRET_ACCESS_KEY' not in env
|
||||
@@ -1915,8 +1961,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
@@ -2153,8 +2200,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['FOREMAN_INI_PATH'])
|
||||
|
||||
@@ -1,393 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import base64
|
||||
import logging
|
||||
import socket
|
||||
import datetime
|
||||
from dateutil.tz import tzutc
|
||||
from io import StringIO
|
||||
from uuid import uuid4
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from django.conf import LazySettings
|
||||
from django.utils.encoding import smart_str
|
||||
import pytest
|
||||
import requests
|
||||
from requests_futures.sessions import FuturesSession
|
||||
|
||||
from awx.main.utils.handlers import (BaseHandler, BaseHTTPSHandler as HTTPSHandler,
|
||||
TCPHandler, UDPHandler, _encode_payload_for_socket,
|
||||
PARAM_NAMES, LoggingConnectivityException,
|
||||
AWXProxyHandler)
|
||||
from awx.main.utils.formatters import LogstashFormatter
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def https_adapter():
|
||||
class FakeHTTPSAdapter(requests.adapters.HTTPAdapter):
|
||||
requests = []
|
||||
status = 200
|
||||
reason = None
|
||||
|
||||
def send(self, request, **kwargs):
|
||||
self.requests.append(request)
|
||||
resp = requests.models.Response()
|
||||
resp.status_code = self.status
|
||||
resp.reason = self.reason
|
||||
resp.request = request
|
||||
return resp
|
||||
|
||||
return FakeHTTPSAdapter()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def connection_error_adapter():
|
||||
class ConnectionErrorAdapter(requests.adapters.HTTPAdapter):
|
||||
|
||||
def send(self, request, **kwargs):
|
||||
err = requests.packages.urllib3.exceptions.SSLError()
|
||||
raise requests.exceptions.ConnectionError(err, request=request)
|
||||
|
||||
return ConnectionErrorAdapter()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_socket(tmpdir_factory, request):
|
||||
sok = socket.socket
|
||||
sok.send = mock.MagicMock()
|
||||
sok.connect = mock.MagicMock()
|
||||
sok.setblocking = mock.MagicMock()
|
||||
sok.close = mock.MagicMock()
|
||||
return sok
|
||||
|
||||
|
||||
def test_https_logging_handler_requests_async_implementation():
|
||||
handler = HTTPSHandler()
|
||||
assert isinstance(handler.session, FuturesSession)
|
||||
|
||||
|
||||
def test_https_logging_handler_has_default_http_timeout():
|
||||
handler = TCPHandler()
|
||||
assert handler.tcp_timeout == 5
|
||||
|
||||
|
||||
@pytest.mark.parametrize('param', ['host', 'port', 'indv_facts'])
|
||||
def test_base_logging_handler_defaults(param):
|
||||
handler = BaseHandler()
|
||||
assert hasattr(handler, param) and getattr(handler, param) is None
|
||||
|
||||
|
||||
@pytest.mark.parametrize('param', ['host', 'port', 'indv_facts'])
|
||||
def test_base_logging_handler_kwargs(param):
|
||||
handler = BaseHandler(**{param: 'EXAMPLE'})
|
||||
assert hasattr(handler, param) and getattr(handler, param) == 'EXAMPLE'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('params', [
|
||||
{
|
||||
'LOG_AGGREGATOR_HOST': 'https://server.invalid',
|
||||
'LOG_AGGREGATOR_PORT': 22222,
|
||||
'LOG_AGGREGATOR_TYPE': 'loggly',
|
||||
'LOG_AGGREGATOR_USERNAME': 'foo',
|
||||
'LOG_AGGREGATOR_PASSWORD': 'bar',
|
||||
'LOG_AGGREGATOR_INDIVIDUAL_FACTS': True,
|
||||
'LOG_AGGREGATOR_TCP_TIMEOUT': 96,
|
||||
'LOG_AGGREGATOR_VERIFY_CERT': False,
|
||||
'LOG_AGGREGATOR_PROTOCOL': 'https'
|
||||
},
|
||||
{
|
||||
'LOG_AGGREGATOR_HOST': 'https://server.invalid',
|
||||
'LOG_AGGREGATOR_PORT': 22222,
|
||||
'LOG_AGGREGATOR_PROTOCOL': 'udp'
|
||||
}
|
||||
])
|
||||
def test_real_handler_from_django_settings(params):
|
||||
settings = LazySettings()
|
||||
settings.configure(**params)
|
||||
handler = AWXProxyHandler().get_handler(custom_settings=settings)
|
||||
# need the _reverse_ dictionary from PARAM_NAMES
|
||||
attr_lookup = {}
|
||||
for attr_name, setting_name in PARAM_NAMES.items():
|
||||
attr_lookup[setting_name] = attr_name
|
||||
for setting_name, val in params.items():
|
||||
attr_name = attr_lookup[setting_name]
|
||||
if attr_name == 'protocol':
|
||||
continue
|
||||
assert hasattr(handler, attr_name)
|
||||
|
||||
|
||||
def test_invalid_kwarg_to_real_handler():
|
||||
settings = LazySettings()
|
||||
settings.configure(**{
|
||||
'LOG_AGGREGATOR_HOST': 'https://server.invalid',
|
||||
'LOG_AGGREGATOR_PORT': 22222,
|
||||
'LOG_AGGREGATOR_PROTOCOL': 'udp',
|
||||
'LOG_AGGREGATOR_VERIFY_CERT': False # setting not valid for UDP handler
|
||||
})
|
||||
handler = AWXProxyHandler().get_handler(custom_settings=settings)
|
||||
assert not hasattr(handler, 'verify_cert')
|
||||
|
||||
|
||||
def test_protocol_not_specified():
|
||||
settings = LazySettings()
|
||||
settings.configure(**{
|
||||
'LOG_AGGREGATOR_HOST': 'https://server.invalid',
|
||||
'LOG_AGGREGATOR_PORT': 22222,
|
||||
'LOG_AGGREGATOR_PROTOCOL': None # awx/settings/defaults.py
|
||||
})
|
||||
handler = AWXProxyHandler().get_handler(custom_settings=settings)
|
||||
assert isinstance(handler, logging.NullHandler)
|
||||
|
||||
|
||||
def test_base_logging_handler_emit_system_tracking(dummy_log_record):
|
||||
handler = BaseHandler(host='127.0.0.1', indv_facts=True)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
dummy_log_record.name = 'awx.analytics.system_tracking'
|
||||
dummy_log_record.msg = None
|
||||
dummy_log_record.inventory_id = 11
|
||||
dummy_log_record.host_name = 'my_lucky_host'
|
||||
dummy_log_record.job_id = 777
|
||||
dummy_log_record.ansible_facts = {
|
||||
"ansible_kernel": "4.4.66-boot2docker",
|
||||
"ansible_machine": "x86_64",
|
||||
"ansible_swapfree_mb": 4663,
|
||||
}
|
||||
dummy_log_record.ansible_facts_modified = datetime.datetime.now(tzutc()).isoformat()
|
||||
sent_payloads = handler.emit(dummy_log_record)
|
||||
|
||||
assert len(sent_payloads) == 1
|
||||
assert sent_payloads[0]['ansible_facts'] == dummy_log_record.ansible_facts
|
||||
assert sent_payloads[0]['ansible_facts_modified'] == dummy_log_record.ansible_facts_modified
|
||||
assert sent_payloads[0]['level'] == 'INFO'
|
||||
assert sent_payloads[0]['logger_name'] == 'awx.analytics.system_tracking'
|
||||
assert sent_payloads[0]['job_id'] == dummy_log_record.job_id
|
||||
assert sent_payloads[0]['inventory_id'] == dummy_log_record.inventory_id
|
||||
assert sent_payloads[0]['host_name'] == dummy_log_record.host_name
|
||||
|
||||
|
||||
@pytest.mark.parametrize('host, port, normalized, hostname_only', [
|
||||
('http://localhost', None, 'http://localhost', False),
|
||||
('http://localhost', 8080, 'http://localhost:8080', False),
|
||||
('https://localhost', 443, 'https://localhost:443', False),
|
||||
('ftp://localhost', 443, 'ftp://localhost:443', False),
|
||||
('https://localhost:550', 443, 'https://localhost:550', False),
|
||||
('https://localhost:yoho/foobar', 443, 'https://localhost:443/foobar', False),
|
||||
('https://localhost:yoho/foobar', None, 'https://localhost:yoho/foobar', False),
|
||||
('http://splunk.server:8088/services/collector/event', 80,
|
||||
'http://splunk.server:8088/services/collector/event', False),
|
||||
('http://splunk.server/services/collector/event', 8088,
|
||||
'http://splunk.server:8088/services/collector/event', False),
|
||||
('splunk.server:8088/services/collector/event', 80,
|
||||
'http://splunk.server:8088/services/collector/event', False),
|
||||
('splunk.server/services/collector/event', 8088,
|
||||
'http://splunk.server:8088/services/collector/event', False),
|
||||
('localhost', None, 'http://localhost', False),
|
||||
('localhost', 8080, 'http://localhost:8080', False),
|
||||
('localhost', 4399, 'localhost', True),
|
||||
('tcp://localhost:4399/foo/bar', 4399, 'localhost', True),
|
||||
])
|
||||
def test_base_logging_handler_host_format(host, port, normalized, hostname_only):
|
||||
handler = BaseHandler(host=host, port=port)
|
||||
assert handler._get_host(scheme='http', hostname_only=hostname_only) == normalized
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'status, reason, exc',
|
||||
[(200, '200 OK', None), (404, 'Not Found', LoggingConnectivityException)]
|
||||
)
|
||||
@pytest.mark.parametrize('protocol', ['http', 'https', None])
|
||||
def test_https_logging_handler_connectivity_test(https_adapter, status, reason, exc, protocol):
|
||||
host = 'example.org'
|
||||
if protocol:
|
||||
host = '://'.join([protocol, host])
|
||||
https_adapter.status = status
|
||||
https_adapter.reason = reason
|
||||
settings = LazySettings()
|
||||
settings.configure(**{
|
||||
'LOG_AGGREGATOR_HOST': host,
|
||||
'LOG_AGGREGATOR_PORT': 8080,
|
||||
'LOG_AGGREGATOR_TYPE': 'logstash',
|
||||
'LOG_AGGREGATOR_USERNAME': 'user',
|
||||
'LOG_AGGREGATOR_PASSWORD': 'password',
|
||||
'LOG_AGGREGATOR_LOGGERS': ['awx', 'activity_stream', 'job_events', 'system_tracking'],
|
||||
'LOG_AGGREGATOR_PROTOCOL': 'https',
|
||||
'CLUSTER_HOST_ID': '',
|
||||
'LOG_AGGREGATOR_TOWER_UUID': str(uuid4()),
|
||||
'LOG_AGGREGATOR_LEVEL': 'DEBUG',
|
||||
})
|
||||
|
||||
class FakeHTTPSHandler(HTTPSHandler):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FakeHTTPSHandler, self).__init__(*args, **kwargs)
|
||||
self.session.mount('{}://'.format(protocol or 'https'), https_adapter)
|
||||
|
||||
def emit(self, record):
|
||||
return super(FakeHTTPSHandler, self).emit(record)
|
||||
|
||||
with mock.patch.object(AWXProxyHandler, 'get_handler_class') as mock_get_class:
|
||||
mock_get_class.return_value = FakeHTTPSHandler
|
||||
if exc:
|
||||
with pytest.raises(exc) as e:
|
||||
AWXProxyHandler().perform_test(settings)
|
||||
assert str(e).endswith('%s: %s' % (status, reason))
|
||||
else:
|
||||
assert AWXProxyHandler().perform_test(settings) is None
|
||||
|
||||
|
||||
def test_https_logging_handler_logstash_auth_info():
|
||||
handler = HTTPSHandler(message_type='logstash', username='bob', password='ansible')
|
||||
handler._add_auth_information()
|
||||
assert isinstance(handler.session.auth, requests.auth.HTTPBasicAuth)
|
||||
assert handler.session.auth.username == 'bob'
|
||||
assert handler.session.auth.password == 'ansible'
|
||||
|
||||
|
||||
def test_https_logging_handler_splunk_auth_info():
|
||||
handler = HTTPSHandler(message_type='splunk', password='ansible')
|
||||
handler._add_auth_information()
|
||||
assert handler.session.headers['Authorization'] == 'Splunk ansible'
|
||||
assert handler.session.headers['Content-Type'] == 'application/json'
|
||||
|
||||
|
||||
def test_https_logging_handler_connection_error(connection_error_adapter,
|
||||
dummy_log_record):
|
||||
handler = HTTPSHandler(host='127.0.0.1', message_type='logstash')
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
handler.session.mount('http://', connection_error_adapter)
|
||||
|
||||
buff = StringIO()
|
||||
logging.getLogger('awx.main.utils.handlers').addHandler(
|
||||
logging.StreamHandler(buff)
|
||||
)
|
||||
|
||||
async_futures = handler.emit(dummy_log_record)
|
||||
with pytest.raises(requests.exceptions.ConnectionError):
|
||||
[future.result() for future in async_futures]
|
||||
assert 'failed to emit log to external aggregator\nTraceback' in buff.getvalue()
|
||||
|
||||
# we should only log failures *periodically*, so causing *another*
|
||||
# immediate failure shouldn't report a second ConnectionError
|
||||
buff.truncate(0)
|
||||
async_futures = handler.emit(dummy_log_record)
|
||||
with pytest.raises(requests.exceptions.ConnectionError):
|
||||
[future.result() for future in async_futures]
|
||||
assert buff.getvalue() == ''
|
||||
|
||||
|
||||
@pytest.mark.parametrize('message_type', ['logstash', 'splunk'])
|
||||
def test_https_logging_handler_emit_without_cred(https_adapter, dummy_log_record,
|
||||
message_type):
|
||||
handler = HTTPSHandler(host='127.0.0.1', message_type=message_type)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
handler.session.mount('https://', https_adapter)
|
||||
async_futures = handler.emit(dummy_log_record)
|
||||
[future.result() for future in async_futures]
|
||||
|
||||
assert len(https_adapter.requests) == 1
|
||||
request = https_adapter.requests[0]
|
||||
assert request.url == 'https://127.0.0.1/'
|
||||
assert request.method == 'POST'
|
||||
|
||||
if message_type == 'logstash':
|
||||
# A username + password weren't used, so this header should be missing
|
||||
assert 'Authorization' not in request.headers
|
||||
|
||||
if message_type == 'splunk':
|
||||
assert request.headers['Authorization'] == 'Splunk None'
|
||||
|
||||
|
||||
def test_https_logging_handler_emit_logstash_with_creds(https_adapter,
|
||||
dummy_log_record):
|
||||
handler = HTTPSHandler(host='127.0.0.1',
|
||||
username='user', password='pass',
|
||||
message_type='logstash')
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
handler.session.mount('https://', https_adapter)
|
||||
async_futures = handler.emit(dummy_log_record)
|
||||
[future.result() for future in async_futures]
|
||||
|
||||
assert len(https_adapter.requests) == 1
|
||||
request = https_adapter.requests[0]
|
||||
assert request.headers['Authorization'] == 'Basic %s' % smart_str(base64.b64encode(b"user:pass"))
|
||||
|
||||
|
||||
def test_https_logging_handler_emit_splunk_with_creds(https_adapter,
|
||||
dummy_log_record):
|
||||
handler = HTTPSHandler(host='127.0.0.1',
|
||||
password='pass', message_type='splunk')
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
handler.session.mount('https://', https_adapter)
|
||||
async_futures = handler.emit(dummy_log_record)
|
||||
[future.result() for future in async_futures]
|
||||
|
||||
assert len(https_adapter.requests) == 1
|
||||
request = https_adapter.requests[0]
|
||||
assert request.headers['Authorization'] == 'Splunk pass'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('payload, encoded_payload', [
|
||||
('foobar', 'foobar'),
|
||||
({'foo': 'bar'}, '{"foo": "bar"}'),
|
||||
({u'测试键': u'测试值'}, '{"测试键": "测试值"}'),
|
||||
])
|
||||
def test_encode_payload_for_socket(payload, encoded_payload):
|
||||
assert _encode_payload_for_socket(payload).decode('utf-8') == encoded_payload
|
||||
|
||||
|
||||
def test_udp_handler_create_socket_at_init():
|
||||
handler = UDPHandler(host='127.0.0.1', port=4399)
|
||||
assert hasattr(handler, 'socket')
|
||||
assert isinstance(handler.socket, socket.socket)
|
||||
assert handler.socket.family == socket.AF_INET
|
||||
assert handler.socket.type == socket.SOCK_DGRAM
|
||||
|
||||
|
||||
def test_udp_handler_send(dummy_log_record):
|
||||
handler = UDPHandler(host='127.0.0.1', port=4399)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
with mock.patch('awx.main.utils.handlers._encode_payload_for_socket', return_value="des") as encode_mock,\
|
||||
mock.patch.object(handler, 'socket') as socket_mock:
|
||||
handler.emit(dummy_log_record)
|
||||
encode_mock.assert_called_once_with(handler.format(dummy_log_record))
|
||||
socket_mock.sendto.assert_called_once_with("des", ('127.0.0.1', 4399))
|
||||
|
||||
|
||||
def test_tcp_handler_send(fake_socket, dummy_log_record):
|
||||
handler = TCPHandler(host='127.0.0.1', port=4399, tcp_timeout=5)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
with mock.patch('socket.socket', return_value=fake_socket) as sok_init_mock,\
|
||||
mock.patch('select.select', return_value=([], [fake_socket], [])):
|
||||
handler.emit(dummy_log_record)
|
||||
sok_init_mock.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
|
||||
fake_socket.connect.assert_called_once_with(('127.0.0.1', 4399))
|
||||
fake_socket.setblocking.assert_called_once_with(0)
|
||||
fake_socket.send.assert_called_once_with(handler.format(dummy_log_record))
|
||||
fake_socket.close.assert_called_once()
|
||||
|
||||
|
||||
def test_tcp_handler_return_if_socket_unavailable(fake_socket, dummy_log_record):
|
||||
handler = TCPHandler(host='127.0.0.1', port=4399, tcp_timeout=5)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
with mock.patch('socket.socket', return_value=fake_socket) as sok_init_mock,\
|
||||
mock.patch('select.select', return_value=([], [], [])):
|
||||
handler.emit(dummy_log_record)
|
||||
sok_init_mock.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
|
||||
fake_socket.connect.assert_called_once_with(('127.0.0.1', 4399))
|
||||
fake_socket.setblocking.assert_called_once_with(0)
|
||||
assert not fake_socket.send.called
|
||||
fake_socket.close.assert_called_once()
|
||||
|
||||
|
||||
def test_tcp_handler_log_exception(fake_socket, dummy_log_record):
|
||||
handler = TCPHandler(host='127.0.0.1', port=4399, tcp_timeout=5)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
with mock.patch('socket.socket', return_value=fake_socket) as sok_init_mock,\
|
||||
mock.patch('select.select', return_value=([], [], [])),\
|
||||
mock.patch('awx.main.utils.handlers.logger') as logger_mock:
|
||||
fake_socket.connect.side_effect = Exception("foo")
|
||||
handler.emit(dummy_log_record)
|
||||
sok_init_mock.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM)
|
||||
logger_mock.exception.assert_called_once()
|
||||
fake_socket.close.assert_called_once()
|
||||
assert not fake_socket.send.called
|
||||
@@ -8,7 +8,7 @@ def test_produce_supervisor_command(mocker):
|
||||
mock_process.communicate = communicate_mock
|
||||
Popen_mock = mocker.MagicMock(return_value=mock_process)
|
||||
with mocker.patch.object(reload.subprocess, 'Popen', Popen_mock):
|
||||
reload._supervisor_service_command("restart")
|
||||
reload.supervisor_service_command("restart")
|
||||
reload.subprocess.Popen.assert_called_once_with(
|
||||
['supervisorctl', 'restart', 'tower-processes:*',],
|
||||
stderr=-1, stdin=-1, stdout=-1)
|
||||
|
||||
@@ -64,6 +64,7 @@ def could_be_playbook(project_path, dir_path, filename):
|
||||
matched = True
|
||||
break
|
||||
except IOError:
|
||||
logger.exception(f'failed to open {playbook_path}')
|
||||
return None
|
||||
if not matched:
|
||||
return None
|
||||
|
||||
@@ -56,6 +56,7 @@ __all__ = [
|
||||
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',
|
||||
'get_custom_venv_choices', 'get_external_account', 'task_manager_bulk_reschedule',
|
||||
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout',
|
||||
'StubLicense'
|
||||
]
|
||||
|
||||
|
||||
|
||||
117
awx/main/utils/external_logging.py
Normal file
117
awx/main/utils/external_logging.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import os
|
||||
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.utils.reload import supervisor_service_command
|
||||
|
||||
|
||||
def construct_rsyslog_conf_template(settings=settings):
|
||||
tmpl = ''
|
||||
parts = []
|
||||
enabled = getattr(settings, 'LOG_AGGREGATOR_ENABLED')
|
||||
host = getattr(settings, 'LOG_AGGREGATOR_HOST', '')
|
||||
port = getattr(settings, 'LOG_AGGREGATOR_PORT', '')
|
||||
protocol = getattr(settings, 'LOG_AGGREGATOR_PROTOCOL', '')
|
||||
timeout = getattr(settings, 'LOG_AGGREGATOR_TCP_TIMEOUT', 5)
|
||||
max_disk_space = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_GB', 1)
|
||||
spool_directory = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_PATH', '/var/lib/awx').rstrip('/')
|
||||
|
||||
if not os.access(spool_directory, os.W_OK):
|
||||
spool_directory = '/var/lib/awx'
|
||||
|
||||
max_bytes = settings.MAX_EVENT_RES_DATA
|
||||
if settings.LOG_AGGREGATOR_RSYSLOGD_DEBUG:
|
||||
parts.append('$DebugLevel 2')
|
||||
parts.extend([
|
||||
'$WorkDirectory /var/lib/awx/rsyslog',
|
||||
f'$MaxMessageSize {max_bytes}',
|
||||
'$IncludeConfig /var/lib/awx/rsyslog/conf.d/*.conf',
|
||||
f'main_queue(queue.spoolDirectory="{spool_directory}" queue.maxdiskspace="{max_disk_space}g" queue.type="Disk" queue.filename="awx-external-logger-backlog")', # noqa
|
||||
'module(load="imuxsock" SysSock.Use="off")',
|
||||
'input(type="imuxsock" Socket="' + settings.LOGGING['handlers']['external_logger']['address'] + '" unlink="on")',
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
])
|
||||
|
||||
def escape_quotes(x):
|
||||
return x.replace('"', '\\"')
|
||||
|
||||
if not enabled:
|
||||
parts.append('action(type="omfile" file="/dev/null")') # rsyslog needs *at least* one valid action to start
|
||||
tmpl = '\n'.join(parts)
|
||||
return tmpl
|
||||
|
||||
if protocol.startswith('http'):
|
||||
scheme = 'https'
|
||||
# urlparse requires '//' to be provided if scheme is not specified
|
||||
original_parsed = urlparse.urlsplit(host)
|
||||
if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:
|
||||
host = '%s://%s' % (scheme, host) if scheme else '//%s' % host
|
||||
parsed = urlparse.urlsplit(host)
|
||||
|
||||
host = escape_quotes(parsed.hostname)
|
||||
try:
|
||||
if parsed.port:
|
||||
port = parsed.port
|
||||
except ValueError:
|
||||
port = settings.LOG_AGGREGATOR_PORT
|
||||
|
||||
# https://github.com/rsyslog/rsyslog-doc/blob/master/source/configuration/modules/omhttp.rst
|
||||
ssl = 'on' if parsed.scheme == 'https' else 'off'
|
||||
skip_verify = 'off' if settings.LOG_AGGREGATOR_VERIFY_CERT else 'on'
|
||||
allow_unsigned = 'off' if settings.LOG_AGGREGATOR_VERIFY_CERT else 'on'
|
||||
if not port:
|
||||
port = 443 if parsed.scheme == 'https' else 80
|
||||
|
||||
params = [
|
||||
'type="omhttp"',
|
||||
f'server="{host}"',
|
||||
f'serverport="{port}"',
|
||||
f'usehttps="{ssl}"',
|
||||
f'allowunsignedcerts="{allow_unsigned}"',
|
||||
f'skipverifyhost="{skip_verify}"',
|
||||
'action.resumeRetryCount="-1"',
|
||||
'template="awx"',
|
||||
'errorfile="/var/log/tower/rsyslog.err"',
|
||||
f'action.resumeInterval="{timeout}"'
|
||||
]
|
||||
if parsed.path:
|
||||
path = urlparse.quote(parsed.path[1:])
|
||||
if parsed.query:
|
||||
path = f'{path}?{urlparse.quote(parsed.query)}'
|
||||
params.append(f'restpath="{path}"')
|
||||
username = escape_quotes(getattr(settings, 'LOG_AGGREGATOR_USERNAME', ''))
|
||||
password = escape_quotes(getattr(settings, 'LOG_AGGREGATOR_PASSWORD', ''))
|
||||
if getattr(settings, 'LOG_AGGREGATOR_TYPE', None) == 'splunk':
|
||||
# splunk has a weird authorization header <shrug>
|
||||
if password:
|
||||
# from omhttp docs:
|
||||
# https://www.rsyslog.com/doc/v8-stable/configuration/modules/omhttp.html
|
||||
# > Currently only a single additional header/key pair is
|
||||
# > configurable, further development is needed to support
|
||||
# > arbitrary header key/value lists.
|
||||
params.append('httpheaderkey="Authorization"')
|
||||
params.append(f'httpheadervalue="Splunk {password}"')
|
||||
elif username:
|
||||
params.append(f'uid="{username}"')
|
||||
if password:
|
||||
# you can only have a basic auth password if there's a username
|
||||
params.append(f'pwd="{password}"')
|
||||
params = ' '.join(params)
|
||||
parts.extend(['module(load="omhttp")', f'action({params})'])
|
||||
elif protocol and host and port:
|
||||
parts.append(
|
||||
f'action(type="omfwd" target="{host}" port="{port}" protocol="{protocol}" action.resumeRetryCount="-1" action.resumeInterval="{timeout}" template="awx")' # noqa
|
||||
)
|
||||
else:
|
||||
parts.append('action(type="omfile" file="/dev/null")') # rsyslog needs *at least* one valid action to start
|
||||
tmpl = '\n'.join(parts)
|
||||
return tmpl
|
||||
|
||||
|
||||
def reconfigure_rsyslog():
|
||||
tmpl = construct_rsyslog_conf_template()
|
||||
with open('/var/lib/awx/rsyslog/rsyslog.conf', 'w') as f:
|
||||
f.write(tmpl + '\n')
|
||||
supervisor_service_command(command='restart', service='awx-rsyslogd')
|
||||
@@ -3,13 +3,13 @@
|
||||
|
||||
from copy import copy
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import traceback
|
||||
import socket
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
from dateutil.tz import tzutc
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
@@ -91,18 +91,13 @@ class LogstashFormatterBase(logging.Formatter):
|
||||
'processName': record.processName,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def format_timestamp(cls, time):
|
||||
tstamp = datetime.utcfromtimestamp(time)
|
||||
return tstamp.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (tstamp.microsecond / 1000) + "Z"
|
||||
|
||||
@classmethod
|
||||
def format_exception(cls, exc_info):
|
||||
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
|
||||
|
||||
@classmethod
|
||||
def serialize(cls, message):
|
||||
return bytes(json.dumps(message), 'utf-8')
|
||||
return json.dumps(message, cls=DjangoJSONEncoder) + '\n'
|
||||
|
||||
|
||||
class LogstashFormatter(LogstashFormatterBase):
|
||||
@@ -132,14 +127,14 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
pass # best effort here, if it's not valid JSON, then meh
|
||||
return raw_data
|
||||
elif kind == 'system_tracking':
|
||||
data = copy(raw_data['ansible_facts'])
|
||||
data = copy(raw_data.get('ansible_facts', {}))
|
||||
else:
|
||||
data = copy(raw_data)
|
||||
if isinstance(data, str):
|
||||
data = json.loads(data)
|
||||
data_for_log = {}
|
||||
|
||||
if kind == 'job_events':
|
||||
if kind == 'job_events' and raw_data.get('python_objects', {}).get('job_event'):
|
||||
job_event = raw_data['python_objects']['job_event']
|
||||
for field_object in job_event._meta.fields:
|
||||
|
||||
@@ -157,9 +152,6 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
|
||||
try:
|
||||
data_for_log[key] = getattr(job_event, fd)
|
||||
if fd in ['created', 'modified'] and data_for_log[key] is not None:
|
||||
time_float = time.mktime(data_for_log[key].timetuple())
|
||||
data_for_log[key] = self.format_timestamp(time_float)
|
||||
except Exception as e:
|
||||
data_for_log[key] = 'Exception `{}` producing field'.format(e)
|
||||
|
||||
@@ -173,10 +165,10 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
data['ansible_python'].pop('version_info', None)
|
||||
|
||||
data_for_log['ansible_facts'] = data
|
||||
data_for_log['ansible_facts_modified'] = raw_data['ansible_facts_modified']
|
||||
data_for_log['inventory_id'] = raw_data['inventory_id']
|
||||
data_for_log['host_name'] = raw_data['host_name']
|
||||
data_for_log['job_id'] = raw_data['job_id']
|
||||
data_for_log['ansible_facts_modified'] = raw_data.get('ansible_facts_modified')
|
||||
data_for_log['inventory_id'] = raw_data.get('inventory_id')
|
||||
data_for_log['host_name'] = raw_data.get('host_name')
|
||||
data_for_log['job_id'] = raw_data.get('job_id')
|
||||
elif kind == 'performance':
|
||||
def convert_to_type(t, val):
|
||||
if t is float:
|
||||
@@ -231,10 +223,12 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
return fields
|
||||
|
||||
def format(self, record):
|
||||
stamp = datetime.utcfromtimestamp(record.created)
|
||||
stamp = stamp.replace(tzinfo=tzutc())
|
||||
message = {
|
||||
# Field not included, but exist in related logs
|
||||
# 'path': record.pathname
|
||||
'@timestamp': self.format_timestamp(record.created),
|
||||
'@timestamp': stamp,
|
||||
'message': record.getMessage(),
|
||||
'host': self.host,
|
||||
|
||||
@@ -250,4 +244,7 @@ class LogstashFormatter(LogstashFormatterBase):
|
||||
if record.exc_info:
|
||||
message.update(self.get_debug_fields(record))
|
||||
|
||||
if settings.LOG_AGGREGATOR_TYPE == 'splunk':
|
||||
# splunk messages must have a top level "event" key
|
||||
message = {'event': message}
|
||||
return self.serialize(message)
|
||||
|
||||
@@ -3,404 +3,29 @@
|
||||
|
||||
# Python
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import threading
|
||||
import socket
|
||||
import select
|
||||
from urllib import parse as urlparse
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from requests.exceptions import RequestException
|
||||
import os.path
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
# requests futures, a dependency used by these handlers
|
||||
from requests_futures.sessions import FuturesSession
|
||||
import cachetools
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.formatters import LogstashFormatter
|
||||
class RSysLogHandler(logging.handlers.SysLogHandler):
|
||||
|
||||
append_nul = False
|
||||
|
||||
__all__ = ['BaseHTTPSHandler', 'TCPHandler', 'UDPHandler',
|
||||
'AWXProxyHandler']
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.handlers')
|
||||
|
||||
|
||||
# Translation of parameter names to names in Django settings
|
||||
# logging settings category, only those related to handler / log emission
|
||||
PARAM_NAMES = {
|
||||
'host': 'LOG_AGGREGATOR_HOST',
|
||||
'port': 'LOG_AGGREGATOR_PORT',
|
||||
'message_type': 'LOG_AGGREGATOR_TYPE',
|
||||
'username': 'LOG_AGGREGATOR_USERNAME',
|
||||
'password': 'LOG_AGGREGATOR_PASSWORD',
|
||||
'indv_facts': 'LOG_AGGREGATOR_INDIVIDUAL_FACTS',
|
||||
'tcp_timeout': 'LOG_AGGREGATOR_TCP_TIMEOUT',
|
||||
'verify_cert': 'LOG_AGGREGATOR_VERIFY_CERT',
|
||||
'protocol': 'LOG_AGGREGATOR_PROTOCOL'
|
||||
}
|
||||
|
||||
|
||||
def unused_callback(sess, resp):
|
||||
pass
|
||||
|
||||
|
||||
class LoggingConnectivityException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VerboseThreadPoolExecutor(ThreadPoolExecutor):
|
||||
|
||||
last_log_emit = 0
|
||||
|
||||
def submit(self, func, *args, **kwargs):
|
||||
def _wrapped(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception:
|
||||
# If an exception occurs in a concurrent thread worker (like
|
||||
# a ConnectionError or a read timeout), periodically log
|
||||
# that failure.
|
||||
#
|
||||
# This approach isn't really thread-safe, so we could
|
||||
# potentially log once per thread every 10 seconds, but it
|
||||
# beats logging *every* failed HTTP request in a scenario where
|
||||
# you've typo'd your log aggregator hostname.
|
||||
now = time.time()
|
||||
if now - self.last_log_emit > 10:
|
||||
logger.exception('failed to emit log to external aggregator')
|
||||
self.last_log_emit = now
|
||||
raise
|
||||
return super(VerboseThreadPoolExecutor, self).submit(_wrapped, *args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class SocketResult:
|
||||
'''
|
||||
A class to be the return type of methods that send data over a socket
|
||||
allows object to be used in the same way as a request futures object
|
||||
'''
|
||||
def __init__(self, ok, reason=None):
|
||||
self.ok = ok
|
||||
self.reason = reason
|
||||
|
||||
def result(self):
|
||||
return self
|
||||
|
||||
|
||||
class BaseHandler(logging.Handler):
|
||||
def __init__(self, host=None, port=None, indv_facts=None, **kwargs):
|
||||
super(BaseHandler, self).__init__()
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.indv_facts = indv_facts
|
||||
|
||||
def _send(self, payload):
|
||||
"""Actually send message to log aggregator.
|
||||
"""
|
||||
return payload
|
||||
|
||||
def _format_and_send_record(self, record):
|
||||
if self.indv_facts:
|
||||
return [self._send(json.loads(self.format(record)))]
|
||||
return [self._send(self.format(record))]
|
||||
|
||||
def emit(self, record):
|
||||
"""
|
||||
Emit a log record. Returns a list of zero or more
|
||||
implementation-specific objects for tests.
|
||||
"""
|
||||
def emit(self, msg):
|
||||
if not settings.LOG_AGGREGATOR_ENABLED:
|
||||
return
|
||||
if not os.path.exists(settings.LOGGING['handlers']['external_logger']['address']):
|
||||
return
|
||||
try:
|
||||
return self._format_and_send_record(record)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
def _get_host(self, scheme='', hostname_only=False):
|
||||
"""Return the host name of log aggregator.
|
||||
"""
|
||||
host = self.host or ''
|
||||
# urlparse requires '//' to be provided if scheme is not specified
|
||||
original_parsed = urlparse.urlsplit(host)
|
||||
if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:
|
||||
host = '%s://%s' % (scheme, host) if scheme else '//%s' % host
|
||||
parsed = urlparse.urlsplit(host)
|
||||
|
||||
if hostname_only:
|
||||
return parsed.hostname
|
||||
|
||||
try:
|
||||
port = parsed.port or self.port
|
||||
except ValueError:
|
||||
port = self.port
|
||||
netloc = parsed.netloc if port is None else '%s:%s' % (parsed.hostname, port)
|
||||
|
||||
url_components = list(parsed)
|
||||
url_components[1] = netloc
|
||||
ret = urlparse.urlunsplit(url_components)
|
||||
return ret.lstrip('/')
|
||||
|
||||
|
||||
class BaseHTTPSHandler(BaseHandler):
|
||||
'''
|
||||
Originally derived from python-logstash library
|
||||
Non-blocking request accomplished by FuturesSession, similar
|
||||
to the loggly-python-handler library
|
||||
'''
|
||||
def _add_auth_information(self):
|
||||
if self.message_type == 'logstash':
|
||||
if not self.username:
|
||||
# Logstash authentication not enabled
|
||||
return
|
||||
logstash_auth = requests.auth.HTTPBasicAuth(self.username, self.password)
|
||||
self.session.auth = logstash_auth
|
||||
elif self.message_type == 'splunk':
|
||||
auth_header = "Splunk %s" % self.password
|
||||
headers = {
|
||||
"Authorization": auth_header,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
self.session.headers.update(headers)
|
||||
|
||||
def __init__(self, fqdn=False, message_type=None, username=None, password=None,
|
||||
tcp_timeout=5, verify_cert=True, **kwargs):
|
||||
self.fqdn = fqdn
|
||||
self.message_type = message_type
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.tcp_timeout = tcp_timeout
|
||||
self.verify_cert = verify_cert
|
||||
super(BaseHTTPSHandler, self).__init__(**kwargs)
|
||||
self.session = FuturesSession(executor=VerboseThreadPoolExecutor(
|
||||
max_workers=2 # this is the default used by requests_futures
|
||||
))
|
||||
self._add_auth_information()
|
||||
|
||||
def _get_post_kwargs(self, payload_input):
|
||||
if self.message_type == 'splunk':
|
||||
# Splunk needs data nested under key "event"
|
||||
if not isinstance(payload_input, dict):
|
||||
payload_input = json.loads(payload_input)
|
||||
payload_input = {'event': payload_input}
|
||||
if isinstance(payload_input, dict):
|
||||
payload_str = json.dumps(payload_input)
|
||||
else:
|
||||
payload_str = payload_input
|
||||
kwargs = dict(data=payload_str, background_callback=unused_callback,
|
||||
timeout=self.tcp_timeout)
|
||||
if self.verify_cert is False:
|
||||
kwargs['verify'] = False
|
||||
return kwargs
|
||||
|
||||
|
||||
def _send(self, payload):
|
||||
"""See:
|
||||
https://docs.python.org/3/library/concurrent.futures.html#future-objects
|
||||
http://pythonhosted.org/futures/
|
||||
"""
|
||||
return self.session.post(self._get_host(scheme='https'),
|
||||
**self._get_post_kwargs(payload))
|
||||
|
||||
|
||||
def _encode_payload_for_socket(payload):
|
||||
encoded_payload = payload
|
||||
if isinstance(encoded_payload, dict):
|
||||
encoded_payload = json.dumps(encoded_payload, ensure_ascii=False)
|
||||
if isinstance(encoded_payload, str):
|
||||
encoded_payload = encoded_payload.encode('utf-8')
|
||||
return encoded_payload
|
||||
|
||||
|
||||
class TCPHandler(BaseHandler):
|
||||
def __init__(self, tcp_timeout=5, **kwargs):
|
||||
self.tcp_timeout = tcp_timeout
|
||||
super(TCPHandler, self).__init__(**kwargs)
|
||||
|
||||
def _send(self, payload):
|
||||
payload = _encode_payload_for_socket(payload)
|
||||
sok = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
sok.connect((self._get_host(hostname_only=True), self.port or 0))
|
||||
sok.setblocking(0)
|
||||
_, ready_to_send, _ = select.select([], [sok], [], float(self.tcp_timeout))
|
||||
if len(ready_to_send) == 0:
|
||||
ret = SocketResult(False, "Socket currently busy, failed to send message")
|
||||
logger.warning(ret.reason)
|
||||
else:
|
||||
sok.send(payload)
|
||||
ret = SocketResult(True) # success!
|
||||
except Exception as e:
|
||||
ret = SocketResult(False, "Error sending message from %s: %s" %
|
||||
(TCPHandler.__name__,
|
||||
' '.join(str(arg) for arg in e.args)))
|
||||
logger.exception(ret.reason)
|
||||
finally:
|
||||
sok.close()
|
||||
return ret
|
||||
|
||||
|
||||
class UDPHandler(BaseHandler):
|
||||
message = "Cannot determine if UDP messages are received."
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(UDPHandler, self).__init__(**kwargs)
|
||||
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
|
||||
def _send(self, payload):
|
||||
payload = _encode_payload_for_socket(payload)
|
||||
self.socket.sendto(payload, (self._get_host(hostname_only=True), self.port or 0))
|
||||
return SocketResult(True, reason=self.message)
|
||||
|
||||
|
||||
class AWXNullHandler(logging.NullHandler):
|
||||
'''
|
||||
Only additional this does is accept arbitrary __init__ params because
|
||||
the proxy handler does not (yet) work with arbitrary handler classes
|
||||
'''
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AWXNullHandler, self).__init__()
|
||||
|
||||
|
||||
HANDLER_MAPPING = {
|
||||
'https': BaseHTTPSHandler,
|
||||
'tcp': TCPHandler,
|
||||
'udp': UDPHandler,
|
||||
}
|
||||
|
||||
|
||||
TTLCache = cachetools.TTLCache
|
||||
|
||||
if 'py.test' in os.environ.get('_', ''):
|
||||
# don't cache settings in unit tests
|
||||
class TTLCache(TTLCache):
|
||||
|
||||
def __getitem__(self, item):
|
||||
raise KeyError()
|
||||
|
||||
|
||||
class AWXProxyHandler(logging.Handler):
|
||||
'''
|
||||
Handler specific to the AWX external logging feature
|
||||
|
||||
Will dynamically create a handler specific to the configured
|
||||
protocol, and will create a new one automatically on setting change
|
||||
|
||||
Managing parameters:
|
||||
All parameters will get their value from settings as a default
|
||||
if the parameter was either provided on init, or set manually,
|
||||
this value will take precedence.
|
||||
Parameters match same parameters in the actualized handler classes.
|
||||
'''
|
||||
|
||||
thread_local = threading.local()
|
||||
_auditor = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# TODO: process 'level' kwarg
|
||||
super(AWXProxyHandler, self).__init__(**kwargs)
|
||||
self._handler = None
|
||||
self._old_kwargs = {}
|
||||
|
||||
@property
|
||||
def auditor(self):
|
||||
if not self._auditor:
|
||||
self._auditor = logging.handlers.RotatingFileHandler(
|
||||
filename='/var/log/tower/external.log',
|
||||
maxBytes=1024 * 1024 * 50, # 50 MB
|
||||
backupCount=5,
|
||||
)
|
||||
|
||||
class WritableLogstashFormatter(LogstashFormatter):
|
||||
@classmethod
|
||||
def serialize(cls, message):
|
||||
return json.dumps(message)
|
||||
|
||||
self._auditor.setFormatter(WritableLogstashFormatter())
|
||||
return self._auditor
|
||||
|
||||
def get_handler_class(self, protocol):
|
||||
return HANDLER_MAPPING.get(protocol, AWXNullHandler)
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'get_handler')
|
||||
def get_handler(self, custom_settings=None, force_create=False):
|
||||
new_kwargs = {}
|
||||
use_settings = custom_settings or settings
|
||||
for field_name, setting_name in PARAM_NAMES.items():
|
||||
val = getattr(use_settings, setting_name, None)
|
||||
if val is None:
|
||||
continue
|
||||
new_kwargs[field_name] = val
|
||||
if new_kwargs == self._old_kwargs and self._handler and (not force_create):
|
||||
# avoids re-creating session objects, and other such things
|
||||
return self._handler
|
||||
self._old_kwargs = new_kwargs.copy()
|
||||
# TODO: remove any kwargs no applicable to that particular handler
|
||||
protocol = new_kwargs.pop('protocol', None)
|
||||
HandlerClass = self.get_handler_class(protocol)
|
||||
# cleanup old handler and make new one
|
||||
if self._handler:
|
||||
self._handler.close()
|
||||
logger.debug('Creating external log handler due to startup or settings change.')
|
||||
self._handler = HandlerClass(**new_kwargs)
|
||||
if self.formatter:
|
||||
# self.format(record) is called inside of emit method
|
||||
# so not safe to assume this can be handled within self
|
||||
self._handler.setFormatter(self.formatter)
|
||||
return self._handler
|
||||
|
||||
@cachetools.cached(cache=TTLCache(maxsize=1, ttl=3), key=lambda *args, **kw: 'should_audit')
|
||||
def should_audit(self):
|
||||
return settings.LOG_AGGREGATOR_AUDIT
|
||||
|
||||
def emit(self, record):
|
||||
if AWXProxyHandler.thread_local.enabled:
|
||||
actual_handler = self.get_handler()
|
||||
if self.should_audit():
|
||||
self.auditor.setLevel(settings.LOG_AGGREGATOR_LEVEL)
|
||||
self.auditor.emit(record)
|
||||
return actual_handler.emit(record)
|
||||
|
||||
def perform_test(self, custom_settings):
|
||||
"""
|
||||
Tests logging connectivity for given settings module.
|
||||
@raises LoggingConnectivityException
|
||||
"""
|
||||
handler = self.get_handler(custom_settings=custom_settings, force_create=True)
|
||||
handler.setFormatter(LogstashFormatter())
|
||||
logger = logging.getLogger(__file__)
|
||||
fn, lno, func, _ = logger.findCaller()
|
||||
record = logger.makeRecord('awx', 10, fn, lno,
|
||||
'AWX Connection Test', tuple(),
|
||||
None, func)
|
||||
futures = handler.emit(record)
|
||||
for future in futures:
|
||||
try:
|
||||
resp = future.result()
|
||||
if not resp.ok:
|
||||
if isinstance(resp, SocketResult):
|
||||
raise LoggingConnectivityException(
|
||||
'Socket error: {}'.format(resp.reason or '')
|
||||
)
|
||||
else:
|
||||
raise LoggingConnectivityException(
|
||||
': '.join([str(resp.status_code), resp.reason or ''])
|
||||
)
|
||||
except RequestException as e:
|
||||
raise LoggingConnectivityException(str(e))
|
||||
|
||||
@classmethod
|
||||
def disable(cls):
|
||||
cls.thread_local.enabled = False
|
||||
|
||||
|
||||
AWXProxyHandler.thread_local.enabled = True
|
||||
return super(RSysLogHandler, self).emit(msg)
|
||||
except ConnectionRefusedError:
|
||||
# rsyslogd has gone to lunch; this generally means that it's just
|
||||
# been restarted (due to a configuration change)
|
||||
# unfortunately, we can't log that because...rsyslogd is down (and
|
||||
# would just us back ddown this code path)
|
||||
pass
|
||||
|
||||
|
||||
ColorHandler = logging.StreamHandler
|
||||
|
||||
@@ -4,25 +4,24 @@
|
||||
# Python
|
||||
import subprocess
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
|
||||
logger = logging.getLogger('awx.main.utils.reload')
|
||||
|
||||
|
||||
def _supervisor_service_command(command, communicate=True):
|
||||
def supervisor_service_command(command, service='*', communicate=True):
|
||||
'''
|
||||
example use pattern of supervisorctl:
|
||||
# supervisorctl restart tower-processes:receiver tower-processes:factcacher
|
||||
'''
|
||||
group_name = 'tower-processes'
|
||||
if settings.DEBUG:
|
||||
group_name = 'awx-processes'
|
||||
args = ['supervisorctl']
|
||||
if settings.DEBUG:
|
||||
args.extend(['-c', '/supervisor.conf'])
|
||||
args.extend([command, '{}:*'.format(group_name)])
|
||||
|
||||
supervisor_config_path = os.getenv('SUPERVISOR_WEB_CONFIG_PATH', None)
|
||||
if supervisor_config_path:
|
||||
args.extend(['-c', supervisor_config_path])
|
||||
|
||||
args.extend([command, ':'.join(['tower-processes', service])])
|
||||
logger.debug('Issuing command to {} services, args={}'.format(command, args))
|
||||
supervisor_process = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
@@ -30,15 +29,16 @@ def _supervisor_service_command(command, communicate=True):
|
||||
restart_stdout, restart_err = supervisor_process.communicate()
|
||||
restart_code = supervisor_process.returncode
|
||||
if restart_code or restart_err:
|
||||
logger.error('supervisorctl {} errored with exit code `{}`, stdout:\n{}stderr:\n{}'.format(
|
||||
command, restart_code, restart_stdout.strip(), restart_err.strip()))
|
||||
logger.error('supervisorctl {} {} errored with exit code `{}`, stdout:\n{}stderr:\n{}'.format(
|
||||
command, service, restart_code, restart_stdout.strip(), restart_err.strip()))
|
||||
else:
|
||||
logger.info('supervisorctl {} finished, stdout:\n{}'.format(
|
||||
command, restart_stdout.strip()))
|
||||
logger.debug(
|
||||
'supervisorctl {} {} succeeded'.format(command, service)
|
||||
)
|
||||
else:
|
||||
logger.info('Submitted supervisorctl {} command, not waiting for result'.format(command))
|
||||
|
||||
|
||||
def stop_local_services(communicate=True):
|
||||
logger.warn('Stopping services on this node in response to user action')
|
||||
_supervisor_service_command(command='stop', communicate=communicate)
|
||||
supervisor_service_command(command='stop', communicate=communicate)
|
||||
|
||||
@@ -70,7 +70,7 @@ class WebsocketTask():
|
||||
|
||||
async def connect(self, attempt):
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper # noqa
|
||||
logger.debug(f"{self.name} connect attempt {attempt} to {self.remote_host}")
|
||||
logger.debug(f"Connection from {self.name} to {self.remote_host} attempt number {attempt}.")
|
||||
|
||||
'''
|
||||
Can not put get_channel_layer() in the init code because it is in the init
|
||||
@@ -83,7 +83,7 @@ class WebsocketTask():
|
||||
if attempt > 0:
|
||||
await asyncio.sleep(settings.BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
logger.warn(f"{self.name} connection to {self.remote_host} cancelled")
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} cancelled")
|
||||
raise
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/{self.endpoint}/"
|
||||
@@ -93,29 +93,29 @@ class WebsocketTask():
|
||||
try:
|
||||
async with aiohttp.ClientSession(headers={'secret': secret_val},
|
||||
timeout=timeout) as session:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl) as websocket:
|
||||
async with session.ws_connect(uri, ssl=self.verify_ssl, heartbeat=20) as websocket:
|
||||
logger.info(f"Connection from {self.name} to {self.remote_host} established.")
|
||||
self.stats.record_connection_established()
|
||||
attempt = 0
|
||||
await self.run_loop(websocket)
|
||||
except asyncio.CancelledError:
|
||||
# TODO: Check if connected and disconnect
|
||||
# Possibly use run_until_complete() if disconnect is async
|
||||
logger.warn(f"{self.name} connection to {self.remote_host} cancelled")
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} cancelled.")
|
||||
self.stats.record_connection_lost()
|
||||
raise
|
||||
except client_exceptions.ClientConnectorError as e:
|
||||
logger.warn(f"Failed to connect to {self.remote_host}: '{e}'. Reconnecting ...")
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} failed: '{e}'.")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warn(f"Timeout while trying to connect to {self.remote_host}. Reconnecting ...")
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} timed out.")
|
||||
except Exception as e:
|
||||
# Early on, this is our canary. I'm not sure what exceptions we can really encounter.
|
||||
logger.warn(f"Websocket broadcast client exception {type(e)} {e}")
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} failed for unknown reason: '{e}'.")
|
||||
else:
|
||||
logger.warn(f"Connection from {self.name} to {self.remote_host} list.")
|
||||
|
||||
self.stats.record_connection_lost()
|
||||
self.start(attempt=attempt + 1)
|
||||
|
||||
def start(self, attempt=0):
|
||||
self.async_task = self.event_loop.create_task(self.connect(attempt=attempt))
|
||||
@@ -163,9 +163,9 @@ class BroadcastWebsocketManager(object):
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.warn(f"{self.local_hostname} going to remove {deleted_remote_hosts} from the websocket broadcast list")
|
||||
logger.warn(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
if new_remote_hosts:
|
||||
logger.warn(f"{self.local_hostname} going to add {new_remote_hosts} to the websocket broadcast list")
|
||||
logger.warn(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
for h in deleted_remote_hosts:
|
||||
self.broadcast_tasks[h].cancel()
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
recursive: true
|
||||
set_remote_user: false
|
||||
rsync_opts:
|
||||
- "--blocking-io"
|
||||
- "--rsh=$RSH"
|
||||
environment:
|
||||
RSH: "oc rsh --config={{ ansible_kubectl_config }}"
|
||||
@@ -51,6 +52,7 @@
|
||||
mode: pull
|
||||
set_remote_user: false
|
||||
rsync_opts:
|
||||
- "--blocking-io"
|
||||
- "--rsh=$RSH"
|
||||
environment:
|
||||
RSH: "oc rsh --config={{ ansible_kubectl_config }}"
|
||||
|
||||
@@ -136,14 +136,15 @@
|
||||
register: doesRequirementsExist
|
||||
|
||||
- name: fetch galaxy roles from requirements.yml
|
||||
command: ansible-galaxy install -r requirements.yml -p {{roles_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
command: ansible-galaxy install -r roles/requirements.yml -p {{roles_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
args:
|
||||
chdir: "{{project_path|quote}}/roles"
|
||||
chdir: "{{project_path|quote}}"
|
||||
register: galaxy_result
|
||||
when: doesRequirementsExist.stat.exists
|
||||
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
||||
environment:
|
||||
ANSIBLE_FORCE_COLOR: false
|
||||
GIT_SSH_COMMAND: "ssh -o StrictHostKeyChecking=no"
|
||||
|
||||
when: roles_enabled|bool
|
||||
tags:
|
||||
@@ -156,15 +157,16 @@
|
||||
register: doesCollectionRequirementsExist
|
||||
|
||||
- name: fetch galaxy collections from collections/requirements.yml
|
||||
command: ansible-galaxy collection install -r requirements.yml -p {{collections_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
command: ansible-galaxy collection install -r collections/requirements.yml -p {{collections_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
args:
|
||||
chdir: "{{project_path|quote}}/collections"
|
||||
chdir: "{{project_path|quote}}"
|
||||
register: galaxy_collection_result
|
||||
when: doesCollectionRequirementsExist.stat.exists
|
||||
changed_when: "'Installing ' in galaxy_collection_result.stdout"
|
||||
environment:
|
||||
ANSIBLE_FORCE_COLOR: false
|
||||
ANSIBLE_COLLECTIONS_PATHS: "{{ collections_destination }}"
|
||||
GIT_SSH_COMMAND: "ssh -o StrictHostKeyChecking=no"
|
||||
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.8', '>=')"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user