mirror of
https://github.com/ansible/awx.git
synced 2026-02-08 13:04:43 -03:30
Compare commits
491 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
442d539ff8 | ||
|
|
3952946d9c | ||
|
|
3fa34dad04 | ||
|
|
1233462419 | ||
|
|
0ad78874ce | ||
|
|
2bbbb04499 | ||
|
|
fa1294922b | ||
|
|
277b6897fa | ||
|
|
d7f9e66710 | ||
|
|
81d388d137 | ||
|
|
60800d6740 | ||
|
|
02cf4585f8 | ||
|
|
669d4535b1 | ||
|
|
b09d9cbe41 | ||
|
|
d2bbe7aa1a | ||
|
|
246aee623b | ||
|
|
6b3ec46fe8 | ||
|
|
9dec359898 | ||
|
|
222998281f | ||
|
|
7164049062 | ||
|
|
e768d5e7fc | ||
|
|
e5ec761434 | ||
|
|
40b020c370 | ||
|
|
2a566f575f | ||
|
|
2784409c46 | ||
|
|
946a86f350 | ||
|
|
57a6c8d693 | ||
|
|
142c0da9f0 | ||
|
|
28baa8b24b | ||
|
|
3f8bc0d7c8 | ||
|
|
0df4047d3d | ||
|
|
feb9c5700c | ||
|
|
cf6235f6ea | ||
|
|
b457c8f133 | ||
|
|
824d798d81 | ||
|
|
76dcd6d72a | ||
|
|
64846c3347 | ||
|
|
95b8bd63ea | ||
|
|
3b60529488 | ||
|
|
7a59922f0f | ||
|
|
360352b78e | ||
|
|
21abf5a788 | ||
|
|
62dbb6bfdb | ||
|
|
460f31a05d | ||
|
|
91dbc2de30 | ||
|
|
e603cfd7ab | ||
|
|
3cdeb48d3f | ||
|
|
1f17e02fe9 | ||
|
|
0e54f76f80 | ||
|
|
5df08711e9 | ||
|
|
41d0548af6 | ||
|
|
11da8e254d | ||
|
|
30346618f1 | ||
|
|
7c88a51992 | ||
|
|
1a0407ba50 | ||
|
|
1dbea4614b | ||
|
|
6e9a43513e | ||
|
|
e50c8fc9c9 | ||
|
|
e6416d770b | ||
|
|
4de7de3ce9 | ||
|
|
e6b1e55274 | ||
|
|
f9a7db6045 | ||
|
|
edb0df788b | ||
|
|
40cd87f253 | ||
|
|
22a9c29961 | ||
|
|
951f13c066 | ||
|
|
4a5edf7b88 | ||
|
|
ced8f42835 | ||
|
|
8c51993278 | ||
|
|
5fcf1a2d5e | ||
|
|
61a1cfa35a | ||
|
|
4531c418e2 | ||
|
|
7623257a6c | ||
|
|
3fa3ddf04b | ||
|
|
fdb53bd1af | ||
|
|
0615252cf7 | ||
|
|
cb453de6a4 | ||
|
|
9c57f550ed | ||
|
|
7060fbd3c2 | ||
|
|
974b219858 | ||
|
|
4657680f9e | ||
|
|
64e3135754 | ||
|
|
68a8dda869 | ||
|
|
fb8b90254c | ||
|
|
86f1ba984a | ||
|
|
e3814c6f0f | ||
|
|
559d917184 | ||
|
|
ec2c121762 | ||
|
|
0c3d27e818 | ||
|
|
0a3ac25c1a | ||
|
|
101c70c152 | ||
|
|
80fb713f91 | ||
|
|
22d1c8a59c | ||
|
|
68568be235 | ||
|
|
aa54cf097b | ||
|
|
24571166f6 | ||
|
|
af70e3bb49 | ||
|
|
990eead3ac | ||
|
|
5e5026aae8 | ||
|
|
3dec277331 | ||
|
|
41894e30ac | ||
|
|
8cdb05e4a2 | ||
|
|
66df922956 | ||
|
|
455cd74492 | ||
|
|
e57bd88bd2 | ||
|
|
af118fec99 | ||
|
|
9d3b19341d | ||
|
|
87607dd997 | ||
|
|
15d6c5fb7a | ||
|
|
718d3728dd | ||
|
|
7e0dc41bf7 | ||
|
|
72c7ace1a0 | ||
|
|
644ecdb1fb | ||
|
|
0abfa428c4 | ||
|
|
b01e312b8f | ||
|
|
78ade1d99d | ||
|
|
568e70b68b | ||
|
|
5367bc4d3b | ||
|
|
df069f3874 | ||
|
|
e927680cc2 | ||
|
|
4b95297bd4 | ||
|
|
068d9660b3 | ||
|
|
58737a64e1 | ||
|
|
cfe8a1722c | ||
|
|
e373ae1e27 | ||
|
|
9d42b8f0f2 | ||
|
|
54167d9693 | ||
|
|
747fdf38d8 | ||
|
|
0f0e401c98 | ||
|
|
80e22ff2ce | ||
|
|
d205685541 | ||
|
|
3b06d7b02b | ||
|
|
ca6ae24032 | ||
|
|
585ca082e3 | ||
|
|
4f6d7e56eb | ||
|
|
8527991cb2 | ||
|
|
9de83fdcfe | ||
|
|
c1ec84d4a9 | ||
|
|
1f2481211b | ||
|
|
d3086206b4 | ||
|
|
b1853d815b | ||
|
|
131f5ff018 | ||
|
|
efa5a95cf1 | ||
|
|
8d6d5eeed8 | ||
|
|
38025e1926 | ||
|
|
0e24cb3eac | ||
|
|
6cf195a27e | ||
|
|
39b37817a1 | ||
|
|
6d46ee7a01 | ||
|
|
90ca2fd59b | ||
|
|
a5602cc2e7 | ||
|
|
4fbab7f1b3 | ||
|
|
5d5edf6535 | ||
|
|
a599afa81c | ||
|
|
49dfb5dcaf | ||
|
|
1835787772 | ||
|
|
00060c9572 | ||
|
|
01c89398b7 | ||
|
|
fe7df910e2 | ||
|
|
1c8fb0636c | ||
|
|
5eeb8b0337 | ||
|
|
f4dfbcdf18 | ||
|
|
02171ce2a1 | ||
|
|
193bd6d05b | ||
|
|
0d7af90e8c | ||
|
|
4d5af1d191 | ||
|
|
df9a8d537f | ||
|
|
09caf729f1 | ||
|
|
f042b8adf5 | ||
|
|
8b97e3f8bb | ||
|
|
61d5dc4cb2 | ||
|
|
8ab885de93 | ||
|
|
1b1a93dd4b | ||
|
|
1e8a9a7df3 | ||
|
|
e2d3407f66 | ||
|
|
d42f57d726 | ||
|
|
d239c82ff5 | ||
|
|
a480e79e21 | ||
|
|
09e62df84a | ||
|
|
b939637266 | ||
|
|
ffe328dcf9 | ||
|
|
10a6ad92b7 | ||
|
|
ca1c7b38db | ||
|
|
dbeb7ec67c | ||
|
|
cb1b42a93e | ||
|
|
75fd703530 | ||
|
|
33aee7f830 | ||
|
|
4042e78757 | ||
|
|
1bb29ec5f7 | ||
|
|
b81c9dbeea | ||
|
|
0dab3e920f | ||
|
|
496c0c5921 | ||
|
|
ebade7b9b7 | ||
|
|
902a31d073 | ||
|
|
43825faa72 | ||
|
|
d40497aca5 | ||
|
|
c61e875bf8 | ||
|
|
24691f6c75 | ||
|
|
f5eb673898 | ||
|
|
d35e87ace7 | ||
|
|
95a722255b | ||
|
|
385365a78f | ||
|
|
0c6b7fdb59 | ||
|
|
580f098990 | ||
|
|
d733903a22 | ||
|
|
a2243d91d2 | ||
|
|
d4084c0b13 | ||
|
|
00f8dd9e6a | ||
|
|
77691a9631 | ||
|
|
7939f2d320 | ||
|
|
53be991cfe | ||
|
|
ce5272eae6 | ||
|
|
a4ec6f6763 | ||
|
|
cc037cb4b5 | ||
|
|
226dac7b24 | ||
|
|
ea5e35910f | ||
|
|
455dfa6caa | ||
|
|
c2c6f2a197 | ||
|
|
17139f1f82 | ||
|
|
574838740f | ||
|
|
f8681cfb6b | ||
|
|
5a69074b09 | ||
|
|
dea7ec7845 | ||
|
|
81272a8150 | ||
|
|
5dfe53f158 | ||
|
|
fe7a51f8f7 | ||
|
|
7ea96ad468 | ||
|
|
63494c94b7 | ||
|
|
0618822fe5 | ||
|
|
a306397586 | ||
|
|
785a8d9c77 | ||
|
|
a2e5639128 | ||
|
|
bc4d789da0 | ||
|
|
296995e8bd | ||
|
|
1f7a36490d | ||
|
|
f9fb9b120b | ||
|
|
fbe679e651 | ||
|
|
751ca57390 | ||
|
|
10f8983554 | ||
|
|
bfdd136a46 | ||
|
|
e74fb194bc | ||
|
|
17efb34c6a | ||
|
|
39c32fb5d0 | ||
|
|
87e3d62684 | ||
|
|
057933f030 | ||
|
|
73b9d25371 | ||
|
|
73b0506e96 | ||
|
|
d6f7692a7d | ||
|
|
973e6f4213 | ||
|
|
da1a19ce88 | ||
|
|
38f1393030 | ||
|
|
9a66364a9d | ||
|
|
67b826b438 | ||
|
|
428527052c | ||
|
|
4a9d39c3fa | ||
|
|
f29144ba91 | ||
|
|
49edaab861 | ||
|
|
e509bbfbb3 | ||
|
|
4b02e4ab57 | ||
|
|
60d2409321 | ||
|
|
9d420df632 | ||
|
|
71ef219ffb | ||
|
|
22cdc129ad | ||
|
|
048c394897 | ||
|
|
19ccb5e213 | ||
|
|
8006b24ae3 | ||
|
|
95dd3dbfbd | ||
|
|
7e0ef6dd7b | ||
|
|
a66d44d2ff | ||
|
|
d242932837 | ||
|
|
99701e4112 | ||
|
|
556cd40440 | ||
|
|
c7bb5a3e7b | ||
|
|
3c195eed9f | ||
|
|
c7db982ab1 | ||
|
|
54178a1982 | ||
|
|
b69b53f527 | ||
|
|
d0270a1bac | ||
|
|
438929007e | ||
|
|
b904ad68a6 | ||
|
|
b7ab6ba9bb | ||
|
|
b80127dd40 | ||
|
|
09c10a6f59 | ||
|
|
7478a2aa5e | ||
|
|
6a9423626c | ||
|
|
31a11cf6bb | ||
|
|
4321c63165 | ||
|
|
8c5d236066 | ||
|
|
81eb9bb78a | ||
|
|
762d8a287e | ||
|
|
c370cb4d93 | ||
|
|
736e7a33ac | ||
|
|
83bb3bba08 | ||
|
|
30610f1a62 | ||
|
|
a3e95ab171 | ||
|
|
275c43bd4a | ||
|
|
479ab8550d | ||
|
|
6924466c0b | ||
|
|
1808bed2e8 | ||
|
|
d5817fd87a | ||
|
|
8b51e8eb82 | ||
|
|
66245d3094 | ||
|
|
a057277823 | ||
|
|
7852f4f054 | ||
|
|
d0bbf8c711 | ||
|
|
c02f6b9ece | ||
|
|
e078ac1c80 | ||
|
|
5d82d89dc2 | ||
|
|
78e51d5159 | ||
|
|
2d1bc58bb2 | ||
|
|
5a47cd8f94 | ||
|
|
6c4bf5bf7d | ||
|
|
ca992246d1 | ||
|
|
ca0130fc64 | ||
|
|
b41a55f297 | ||
|
|
b3323a24e4 | ||
|
|
1b144470b0 | ||
|
|
3c77e5b005 | ||
|
|
660ca5f6ff | ||
|
|
aa28909313 | ||
|
|
e532f4c0c5 | ||
|
|
ce7ea1fbcb | ||
|
|
23a20b9db0 | ||
|
|
8bf426479c | ||
|
|
155faa0138 | ||
|
|
bba9d86078 | ||
|
|
09e5093b96 | ||
|
|
4da0e0dd80 | ||
|
|
90c02db386 | ||
|
|
ba4ae7c104 | ||
|
|
d2acd15783 | ||
|
|
7971cc5c17 | ||
|
|
6098ee8f7f | ||
|
|
41fa3e0473 | ||
|
|
3ad62f586b | ||
|
|
43f32f98a9 | ||
|
|
abcf8fea96 | ||
|
|
10f324110e | ||
|
|
0f335170c5 | ||
|
|
12a04bf42e | ||
|
|
4378dc62bb | ||
|
|
99de79dfc2 | ||
|
|
74308f3dad | ||
|
|
c3ff7ab247 | ||
|
|
74c7d9686a | ||
|
|
d566b465aa | ||
|
|
6aa972a6e9 | ||
|
|
917c6b405e | ||
|
|
deadf197a3 | ||
|
|
90f6d4ed05 | ||
|
|
70a9a72c25 | ||
|
|
d2698c2cb1 | ||
|
|
09e72bc0ae | ||
|
|
a38a7ad9b6 | ||
|
|
c3afe3f815 | ||
|
|
8f6b654696 | ||
|
|
3cb2475307 | ||
|
|
732f2fb828 | ||
|
|
2e2fe40d2a | ||
|
|
caa4e90fa0 | ||
|
|
bb0abf37e0 | ||
|
|
e3a3a47229 | ||
|
|
8b2c65a3fa | ||
|
|
e7347d15c1 | ||
|
|
1590c69590 | ||
|
|
d6b56d8794 | ||
|
|
d96fd7e06f | ||
|
|
4e8bbdaae7 | ||
|
|
cb7036382b | ||
|
|
725437571d | ||
|
|
9fd396cbe0 | ||
|
|
10ccb57062 | ||
|
|
1d3efecd99 | ||
|
|
6a003919c0 | ||
|
|
3f94657cdb | ||
|
|
dfecd4ad9d | ||
|
|
0b207e02ab | ||
|
|
59e3306a3c | ||
|
|
da8f486c5d | ||
|
|
1ac92b0493 | ||
|
|
11752e123d | ||
|
|
5c2eebf692 | ||
|
|
9b3b20c96b | ||
|
|
91d4948564 | ||
|
|
345f1db994 | ||
|
|
9360d3fabc | ||
|
|
1ca3d1fe3b | ||
|
|
216454d298 | ||
|
|
6b8359fc2f | ||
|
|
086d6951b4 | ||
|
|
8cdffd0dd0 | ||
|
|
9668d18203 | ||
|
|
5e02e6e4a4 | ||
|
|
6f872bf752 | ||
|
|
814f033d46 | ||
|
|
ff573e06b3 | ||
|
|
2cbcbddc52 | ||
|
|
68d56d5616 | ||
|
|
0b8aabbd16 | ||
|
|
8c57a92a65 | ||
|
|
77ee2191ed | ||
|
|
a3463e87b5 | ||
|
|
c494c38966 | ||
|
|
58c85ab03f | ||
|
|
64add6e907 | ||
|
|
10be375137 | ||
|
|
4c6cac90fd | ||
|
|
1d91387f58 | ||
|
|
d825cca9f2 | ||
|
|
694c7e8af5 | ||
|
|
2ed3a39b46 | ||
|
|
dd49f747a0 | ||
|
|
0eb7e22d1f | ||
|
|
677ff99eb4 | ||
|
|
12805954e0 | ||
|
|
0e1a2b899a | ||
|
|
bae9c03258 | ||
|
|
420c75f76f | ||
|
|
d58ea85584 | ||
|
|
37125102ab | ||
|
|
c6d77a1183 | ||
|
|
8f1fccefeb | ||
|
|
4b53875a71 | ||
|
|
b9e45e62c0 | ||
|
|
0675b9e8fa | ||
|
|
e64f9c6963 | ||
|
|
d0a7f7f4e9 | ||
|
|
e24c511aef | ||
|
|
a461df0e4d | ||
|
|
3aa6e8a457 | ||
|
|
a3cd858665 | ||
|
|
84c854bdf3 | ||
|
|
e243513a0d | ||
|
|
9a23056073 | ||
|
|
e7e716742a | ||
|
|
961c5589c1 | ||
|
|
1ca29df0de | ||
|
|
b717aabcc9 | ||
|
|
52a272e8e4 | ||
|
|
e41d33991a | ||
|
|
c565130b35 | ||
|
|
7782b1ddf4 | ||
|
|
9a891794d9 | ||
|
|
de8c37fd3d | ||
|
|
a7ca6e2eea | ||
|
|
e080c1f4c2 | ||
|
|
5fd11d8829 | ||
|
|
44311c163c | ||
|
|
ba7e2c9bc4 | ||
|
|
e9cda0c819 | ||
|
|
60976b6aca | ||
|
|
a98887deb0 | ||
|
|
b12c0def7d | ||
|
|
63fec77f82 | ||
|
|
462cfa2344 | ||
|
|
503b86d41c | ||
|
|
c6d6536078 | ||
|
|
d688f7b833 | ||
|
|
d54e5e5ed8 | ||
|
|
2997911fd4 | ||
|
|
9c5f04b1e0 | ||
|
|
b4b261b918 | ||
|
|
6d1746f99c | ||
|
|
cd21dd69f5 | ||
|
|
bf65b40241 | ||
|
|
fcca0cee37 | ||
|
|
1ea924aa13 | ||
|
|
d1671d72dc | ||
|
|
4cf38db19c | ||
|
|
77fd2d677a | ||
|
|
b0ab3fbe10 | ||
|
|
008cd9985a | ||
|
|
f9b3fb4321 | ||
|
|
9be1fd56d2 | ||
|
|
6d07064ca2 | ||
|
|
18607107a7 | ||
|
|
697b0c634d | ||
|
|
fd91c8e329 | ||
|
|
6f80e5b67b | ||
|
|
8d31d09d4a | ||
|
|
271b19bf09 | ||
|
|
99c7f2f70d | ||
|
|
e4921abfff | ||
|
|
cd15a5c082 | ||
|
|
6b976c4239 | ||
|
|
a8f52c1639 | ||
|
|
867475ad49 | ||
|
|
2ecd055d1e | ||
|
|
df1489bcee | ||
|
|
4c72ab896a | ||
|
|
6cd4b1c666 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,6 +35,7 @@ rsyslog.pid
|
||||
/tower-license
|
||||
/tower-license/**
|
||||
tools/prometheus/data
|
||||
tools/docker-compose/Dockerfile
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
|
||||
23
CHANGELOG.md
23
CHANGELOG.md
@@ -2,6 +2,27 @@
|
||||
|
||||
This is a list of high-level changes for each release of AWX. A full list of commits can be found at `https://github.com/ansible/awx/releases/tag/<version>`.
|
||||
|
||||
## 12.0.0 (Jun 9, 2020)
|
||||
- Removed memcached as a dependency of AWX (https://github.com/ansible/awx/pull/7240)
|
||||
- Moved to a single container image build instead of separate awx_web and awx_task images. The container image is just `awx` (https://github.com/ansible/awx/pull/7228)
|
||||
- Official AWX container image builds now use a two-stage container build process that notably reduces the size of our published images (https://github.com/ansible/awx/pull/7017)
|
||||
- Removed support for HipChat notifications ([EoL announcement](https://www.atlassian.com/partnerships/slack/faq#faq-98b17ca3-247f-423b-9a78-70a91681eff0)); all previously-created HipChat notification templates will be deleted due to this removal.
|
||||
- Fixed a bug which broke AWX installations with oc version 4.3 (https://github.com/ansible/awx/pull/6948/files)
|
||||
- Fixed a performance issue that caused notable delay of stdout processing for playbooks run against large numbers of hosts (https://github.com/ansible/awx/issues/6991)
|
||||
- Fixed a bug that caused CyberArk AIM credential plugin looks to hang forever in some environments (https://github.com/ansible/awx/issues/6986)
|
||||
- Fixed a bug that caused ANY/ALL converage settings not to properly save when editing approval nodes in the UI (https://github.com/ansible/awx/issues/6998)
|
||||
- Fixed a bug that broke support for the satellite6_group_prefix source variable (https://github.com/ansible/awx/issues/7031)
|
||||
- Fixed a bug that prevented changes to workflow node convergence settings when approval nodes were in use (https://github.com/ansible/awx/issues/7063)
|
||||
- Fixed a bug that caused notifications to fail on newer version of Mattermost (https://github.com/ansible/awx/issues/7264)
|
||||
- Fixed a bug (by upgrading to 0.8.1 of the foreman collection) that prevented host_filters from working properly with Foreman-based inventory (https://github.com/ansible/awx/issues/7225)
|
||||
- Fixed a bug that prevented the usage of the Conjur credential plugin with secrets that contain spaces (https://github.com/ansible/awx/issues/7191)
|
||||
- Fixed a bug in awx-manage run_wsbroadcast --status in kubernetes (https://github.com/ansible/awx/pull/7009)
|
||||
- Fixed a bug that broke notification toggles for system jobs in the UI (https://github.com/ansible/awx/pull/7042)
|
||||
- Fixed a bug that broke local pip installs of awxkit (https://github.com/ansible/awx/issues/7107)
|
||||
- Fixed a bug that prevented PagerDuty notifications from sending for workflow job template approvals (https://github.com/ansible/awx/issues/7094)
|
||||
- Fixed a bug that broke external log aggregation support for URL paths that include the = character (such as the tokens for SumoLogic) (https://github.com/ansible/awx/issues/7139)
|
||||
- Fixed a bug that prevented organization admins from removing labels from workflow job templates (https://github.com/ansible/awx/pull/7143)
|
||||
|
||||
## 11.2.0 (Apr 29, 2020)
|
||||
|
||||
- Inventory updates now use collection-based plugins by default (in Ansible 2.9+):
|
||||
@@ -103,7 +124,7 @@ This is a list of high-level changes for each release of AWX. A full list of com
|
||||
- Updated the bundled version of openstacksdk to address a known issue https://github.com/ansible/awx/issues/5821
|
||||
- Updated the bundled vmware_inventory plugin to the latest version to address a bug https://github.com/ansible/awx/pull/5668
|
||||
- Fixed a bug that can cause inventory updates to fail to properly save their output when run within a workflow https://github.com/ansible/awx/pull/5666
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
- Removed a number of pre-computed fields from the Host and Group models to improve AWX performance. As part of this change, inventory group UIs throughout the interface no longer display status icons https://github.com/ansible/awx/pull/5448
|
||||
|
||||
## 9.1.1 (Jan 14, 2020)
|
||||
|
||||
|
||||
@@ -157,8 +157,7 @@ If you start a second terminal session, you can take a look at the running conta
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
44251b476f98 gcr.io/ansible-tower-engineering/awx_devel:devel "/entrypoint.sh /bin…" 27 seconds ago Up 23 seconds 0.0.0.0:6899->6899/tcp, 0.0.0.0:7899-7999->7899-7999/tcp, 0.0.0.0:8013->8013/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 22/tcp, 0.0.0.0:8888->8888/tcp tools_awx_run_9e820694d57e
|
||||
b049a43817b4 memcached:alpine "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:11211->11211/tcp tools_memcached_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:6379->6379/tcp tools_redis_1
|
||||
40de380e3c2e redis:latest "docker-entrypoint.s…" 28 seconds ago Up 26 seconds
|
||||
b66a506d3007 postgres:10 "docker-entrypoint.s…" 28 seconds ago Up 26 seconds 0.0.0.0:5432->5432/tcp tools_postgres_1
|
||||
```
|
||||
**NOTE**
|
||||
|
||||
18
INSTALL.md
18
INSTALL.md
@@ -80,9 +80,11 @@ Before you can run a deployment, you'll need the following installed in your loc
|
||||
+ We use this module instead of `docker-py` because it is what the `docker-compose` Python module requires.
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Git](https://git-scm.com/) Requires Version 1.8.4+
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
- Python 3.6+
|
||||
- [Node 10.x LTS version](https://nodejs.org/en/download/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
- [NPM 6.x LTS](https://docs.npmjs.com/)
|
||||
+ This is only required if you're [building your own container images](#official-vs-building-images) with `use_container_for_build=false`
|
||||
|
||||
### System Requirements
|
||||
|
||||
@@ -107,7 +109,7 @@ In the sections below, you'll find deployment details and instructions for each
|
||||
|
||||
### Official vs Building Images
|
||||
|
||||
When installing AWX you have the option of building your own images or using the images provided on DockerHub (see [awx_web](https://hub.docker.com/r/ansible/awx_web/) and [awx_task](https://hub.docker.com/r/ansible/awx_task/))
|
||||
When installing AWX you have the option of building your own image or using the image provided on DockerHub (see [awx](https://hub.docker.com/r/ansible/awx/))
|
||||
|
||||
This is controlled by the following variables in the `inventory` file
|
||||
|
||||
@@ -120,12 +122,16 @@ If these variables are present then all deployments will use these hosted images
|
||||
|
||||
*dockerhub_base*
|
||||
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls container images named `ansible/awx_web:tag` and `ansible/awx_task:tag`)
|
||||
> The base location on DockerHub where the images are hosted (by default this pulls a container image named `ansible/awx:tag`)
|
||||
|
||||
*dockerhub_version*
|
||||
|
||||
> Multiple versions are provided. `latest` always pulls the most recent. You may also select version numbers at different granularities: 1, 1.0, 1.0.1, 1.0.0.123
|
||||
|
||||
*use_container_for_build*
|
||||
|
||||
> Use a local distribution build container image for building the AWX package. This is helpful if you don't want to bother installing the build-time dependencies as it is taken care of already.
|
||||
|
||||
|
||||
## Upgrading from previous versions
|
||||
|
||||
@@ -475,11 +481,11 @@ Before starting the install process, review the [inventory](./installer/inventor
|
||||
|
||||
*host_port*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. Defaults to *80*.
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container. If undefined no port will be exposed. Defaults to *80*.
|
||||
|
||||
*host_port_ssl*
|
||||
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
> Provide a port number that can be mapped from the Docker daemon host to the web server running inside the AWX container for SSL support. If undefined no port will be exposed. Defaults to *443*, only works if you also set `ssl_certificate` (see below).
|
||||
|
||||
*ssl_certificate*
|
||||
|
||||
|
||||
45
Makefile
45
Makefile
@@ -6,12 +6,14 @@ PACKER ?= packer
|
||||
PACKER_BUILD_OPTS ?= -var 'official=$(OFFICIAL)' -var 'aw_repo_url=$(AW_REPO_URL)'
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
DEPS_SCRIPT ?= packaging/bundle/deps.py
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
IMAGE_REPOSITORY_AUTH ?=
|
||||
IMAGE_REPOSITORY_BASE ?= https://gcr.io
|
||||
VERSION := $(shell cat VERSION)
|
||||
PYCURL_SSL_LIBRARY ?= openssl
|
||||
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
@@ -24,7 +26,7 @@ CELERY_SCHEDULE_FILE ?= /var/lib/awx/beat.db
|
||||
DEV_DOCKER_TAG_BASE ?= gcr.io/ansible-tower-engineering
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio,pycurl
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==19.3.1 setuptools==41.6.0
|
||||
@@ -173,9 +175,9 @@ virtualenv_awx:
|
||||
# --ignore-install flag is not used because *.txt files should specify exact versions
|
||||
requirements_ansible: virtualenv_ansible
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
@@ -183,9 +185,9 @@ requirements_ansible: virtualenv_ansible
|
||||
|
||||
requirements_ansible_py3: virtualenv_ansible_py3
|
||||
if [[ "$(PIP_OPTIONS)" == *"--no-index"* ]]; then \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_local.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) -r /dev/stdin ; \
|
||||
else \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
cat requirements/requirements_ansible.txt requirements/requirements_ansible_git.txt | PYCURL_SSL_LIBRARY=$(PYCURL_SSL_LIBRARY) $(VENV_BASE)/ansible/bin/pip3 install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) -r /dev/stdin ; \
|
||||
fi
|
||||
$(VENV_BASE)/ansible/bin/pip3 uninstall --yes -r requirements/requirements_ansible_uninstall.txt
|
||||
# Same effect as using --system-site-packages flag on venv creation
|
||||
@@ -353,8 +355,7 @@ swagger: reports
|
||||
check: flake8 pep8 # pyflakes pylint
|
||||
|
||||
awx-link:
|
||||
cp -R /tmp/awx.egg-info /awx_devel/ || true
|
||||
sed -i "s/placeholder/$(shell cat VERSION)/" /awx_devel/awx.egg-info/PKG-INFO
|
||||
[ -d "/awx_devel/awx.egg-info" ] || python3 /awx_devel/setup.py egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /venv/awx/lib/python$(PYTHON_VERSION)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
@@ -365,6 +366,7 @@ test:
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider -n auto $(TEST_DIRS)
|
||||
cmp VERSION awxkit/VERSION || "VERSION and awxkit/VERSION *must* match"
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py2,py3
|
||||
awx-manage check_migrations --dry-run --check -n 'vNNN_missing_migration_file'
|
||||
|
||||
@@ -378,7 +380,11 @@ test_collection:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONPATH=$PYTHONPATH:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
PYTHONPATH=$(PYTHONPATH):$(VENV_BASE)/awx/lib/python3.6/site-packages:/usr/lib/python3.6/site-packages py.test $(COLLECTION_TEST_DIRS)
|
||||
# The python path needs to be modified so that the tests can find Ansible within the container
|
||||
# First we will use anything expility set as PYTHONPATH
|
||||
# Second we will load any libraries out of the virtualenv (if it's unspecified that should be ok because python should not load out of an empty directory)
|
||||
# Finally we will add the system path so that the tests can find the ansible libraries
|
||||
|
||||
flake8_collection:
|
||||
flake8 awx_collection/ # Different settings, in main exclude list
|
||||
@@ -393,7 +399,7 @@ symlink_collection:
|
||||
ln -s $(shell pwd)/awx_collection $(COLLECTION_INSTALL)
|
||||
|
||||
build_collection:
|
||||
ansible-playbook -i localhost, awx_collection/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION)
|
||||
ansible-playbook -i localhost, awx_collection/tools/template_galaxy.yml -e collection_package=$(COLLECTION_PACKAGE) -e collection_namespace=$(COLLECTION_NAMESPACE) -e collection_version=$(VERSION) -e '{"awx_template_version":false}'
|
||||
ansible-galaxy collection build awx_collection --force --output-path=awx_collection
|
||||
|
||||
install_collection: build_collection
|
||||
@@ -547,11 +553,13 @@ jshint: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
$(NPM_BIN) run --prefix awx/ui lint
|
||||
|
||||
ui-zuul-lint-and-test: $(UI_DEPS_FLAG_FILE)
|
||||
$(NPM_BIN) run --prefix awx/ui jshint
|
||||
$(NPM_BIN) run --prefix awx/ui lint
|
||||
$(NPM_BIN) --prefix awx/ui run test:ci
|
||||
$(NPM_BIN) --prefix awx/ui run unit
|
||||
ui-zuul-lint-and-test:
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) ./awx/ui/build/zuul_download_chromium.sh
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 $(NPM_BIN) --unsafe-perm --prefix awx/ui ci --no-save awx/ui
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) $(NPM_BIN) run --prefix awx/ui jshint
|
||||
CHROMIUM_BIN=$(CHROMIUM_BIN) $(NPM_BIN) run --prefix awx/ui lint
|
||||
CHROME_BIN=$(CHROMIUM_BIN) $(NPM_BIN) --prefix awx/ui run test:ci
|
||||
CHROME_BIN=$(CHROMIUM_BIN) $(NPM_BIN) --prefix awx/ui run unit
|
||||
|
||||
# END UI TASKS
|
||||
# --------------------------------------
|
||||
@@ -640,7 +648,7 @@ docker-compose-runtest: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh
|
||||
|
||||
docker-compose-build-swagger: awx/projects
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports awx /start_tests.sh swagger
|
||||
cd tools && CURRENT_UID=$(shell id -u) TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose run --rm --service-ports --no-deps awx /start_tests.sh swagger
|
||||
|
||||
detect-schema-change: genschema
|
||||
curl https://s3.amazonaws.com/awx-public-ci-files/schema.json -o reference-schema.json
|
||||
@@ -650,17 +658,16 @@ detect-schema-change: genschema
|
||||
docker-compose-clean: awx/projects
|
||||
cd tools && TAG=$(COMPOSE_TAG) DEV_DOCKER_TAG_BASE=$(DEV_DOCKER_TAG_BASE) docker-compose rm -sf
|
||||
|
||||
docker-compose-build: awx-devel-build
|
||||
|
||||
# Base development image build
|
||||
awx-devel-build:
|
||||
docker-compose-build:
|
||||
ansible localhost -m template -a "src=installer/roles/image_build/templates/Dockerfile.j2 dest=tools/docker-compose/Dockerfile" -e build_dev=True
|
||||
docker build -t ansible/awx_devel -f tools/docker-compose/Dockerfile \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
docker tag ansible/awx_devel $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
# For use when developing on "isolated" AWX deployments
|
||||
docker-compose-isolated-build: awx-devel-build
|
||||
docker-compose-isolated-build: docker-compose-build
|
||||
docker build -t ansible/awx_isolated -f tools/docker-isolated/Dockerfile .
|
||||
docker tag ansible/awx_isolated $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
#docker push $(DEV_DOCKER_TAG_BASE)/awx_isolated:$(COMPOSE_TAG)
|
||||
|
||||
@@ -30,6 +30,7 @@ except ImportError:
|
||||
HAS_DJANGO = False
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
|
||||
|
||||
@@ -50,6 +51,7 @@ if HAS_DJANGO is True:
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
|
||||
@@ -806,7 +806,9 @@ class UnifiedJobSerializer(BaseSerializer):
|
||||
td = now() - obj.started
|
||||
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
|
||||
ret['elapsed'] = float(ret['elapsed'])
|
||||
|
||||
# Because this string is saved in the db in the source language,
|
||||
# it must be marked for translation after it is pulled from the db, not when set
|
||||
ret['job_explanation'] = _(obj.job_explanation)
|
||||
return ret
|
||||
|
||||
|
||||
@@ -2306,6 +2308,7 @@ class RoleSerializer(BaseSerializer):
|
||||
content_model = obj.content_type.model_class()
|
||||
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
|
||||
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
|
||||
ret['summary_fields']['resource_id'] = obj.object_id
|
||||
|
||||
return ret
|
||||
|
||||
@@ -2756,16 +2759,11 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
|
||||
if obj.organization_id:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization_id})
|
||||
if isinstance(obj, UnifiedJobTemplate):
|
||||
res['extra_credentials'] = self.reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': obj.pk}
|
||||
)
|
||||
res['credentials'] = self.reverse(
|
||||
'api:job_template_credentials_list',
|
||||
kwargs={'pk': obj.pk}
|
||||
)
|
||||
elif isinstance(obj, UnifiedJob):
|
||||
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
|
||||
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
|
||||
|
||||
return res
|
||||
@@ -2934,7 +2932,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
|
||||
all_creds = []
|
||||
# Organize credential data into multitude of deprecated fields
|
||||
extra_creds = []
|
||||
if obj.pk:
|
||||
for cred in obj.credentials.all():
|
||||
summarized_cred = {
|
||||
@@ -2945,10 +2942,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'cloud': cred.credential_type.kind == 'cloud'
|
||||
}
|
||||
all_creds.append(summarized_cred)
|
||||
if cred.credential_type.kind in ('cloud', 'net'):
|
||||
extra_creds.append(summarized_cred)
|
||||
if self.is_detail_view:
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
summary_fields['credentials'] = all_creds
|
||||
return summary_fields
|
||||
|
||||
@@ -3023,7 +3016,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
|
||||
all_creds = []
|
||||
# Organize credential data into multitude of deprecated fields
|
||||
extra_creds = []
|
||||
if obj.pk:
|
||||
for cred in obj.credentials.all():
|
||||
summarized_cred = {
|
||||
@@ -3034,10 +3026,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
'cloud': cred.credential_type.kind == 'cloud'
|
||||
}
|
||||
all_creds.append(summarized_cred)
|
||||
if cred.credential_type.kind in ('cloud', 'net'):
|
||||
extra_creds.append(summarized_cred)
|
||||
if self.is_detail_view:
|
||||
summary_fields['extra_credentials'] = extra_creds
|
||||
summary_fields['credentials'] = all_creds
|
||||
return summary_fields
|
||||
|
||||
@@ -3612,7 +3600,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
|
||||
ujt = self.instance.unified_job_template
|
||||
if ujt is None:
|
||||
ret = {}
|
||||
for fd in ('workflow_job_template', 'identifier'):
|
||||
for fd in ('workflow_job_template', 'identifier', 'all_parents_must_converge'):
|
||||
if fd in attrs:
|
||||
ret[fd] = attrs[fd]
|
||||
return ret
|
||||
@@ -3899,15 +3887,23 @@ class ProjectUpdateEventSerializer(JobEventSerializer):
|
||||
return UriCleaner.remove_sensitive(obj.stdout)
|
||||
|
||||
def get_event_data(self, obj):
|
||||
try:
|
||||
return json.loads(
|
||||
UriCleaner.remove_sensitive(
|
||||
json.dumps(obj.event_data)
|
||||
# the project update playbook uses the git, hg, or svn modules
|
||||
# to clone repositories, and those modules are prone to printing
|
||||
# raw SCM URLs in their stdout (which *could* contain passwords)
|
||||
# attempt to detect and filter HTTP basic auth passwords in the stdout
|
||||
# of these types of events
|
||||
if obj.event_data.get('task_action') in ('git', 'hg', 'svn'):
|
||||
try:
|
||||
return json.loads(
|
||||
UriCleaner.remove_sensitive(
|
||||
json.dumps(obj.event_data)
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to sanitize event_data")
|
||||
return {}
|
||||
except Exception:
|
||||
logger.exception("Failed to sanitize event_data")
|
||||
return {}
|
||||
else:
|
||||
return obj.event_data
|
||||
|
||||
|
||||
class AdHocCommandEventSerializer(BaseSerializer):
|
||||
|
||||
@@ -23,9 +23,7 @@ from awx.api.views import (
|
||||
UnifiedJobList,
|
||||
HostAnsibleFactsDetail,
|
||||
JobCredentialsList,
|
||||
JobExtraCredentialsList,
|
||||
JobTemplateCredentialsList,
|
||||
JobTemplateExtraCredentialsList,
|
||||
SchedulePreview,
|
||||
ScheduleZoneInfo,
|
||||
OAuth2ApplicationList,
|
||||
@@ -83,9 +81,7 @@ v2_urls = [
|
||||
url(r'^credential_types/', include(credential_type_urls)),
|
||||
url(r'^credential_input_sources/', include(credential_input_source_urls)),
|
||||
url(r'^hosts/(?P<pk>[0-9]+)/ansible_facts/$', HostAnsibleFactsDetail.as_view(), name='host_ansible_facts_detail'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/extra_credentials/$', JobExtraCredentialsList.as_view(), name='job_extra_credentials_list'),
|
||||
url(r'^jobs/(?P<pk>[0-9]+)/credentials/$', JobCredentialsList.as_view(), name='job_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/extra_credentials/$', JobTemplateExtraCredentialsList.as_view(), name='job_template_extra_credentials_list'),
|
||||
url(r'^job_templates/(?P<pk>[0-9]+)/credentials/$', JobTemplateCredentialsList.as_view(), name='job_template_credentials_list'),
|
||||
url(r'^schedules/preview/$', SchedulePreview.as_view(), name='schedule_rrule'),
|
||||
url(r'^schedules/zoneinfo/$', ScheduleZoneInfo.as_view(), name='schedule_zoneinfo'),
|
||||
|
||||
@@ -12,7 +12,7 @@ import socket
|
||||
import sys
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict, Iterable
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
# Django
|
||||
@@ -2337,70 +2337,24 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
old field structure to launch endpoint
|
||||
TODO: delete this method with future API version changes
|
||||
'''
|
||||
ignored_fields = {}
|
||||
modern_data = data.copy()
|
||||
|
||||
id_fd = '{}_id'.format('inventory')
|
||||
if 'inventory' not in modern_data and id_fd in modern_data:
|
||||
modern_data['inventory'] = modern_data[id_fd]
|
||||
|
||||
# Automatically convert legacy launch credential arguments into a list of `.credentials`
|
||||
if 'credentials' in modern_data and 'extra_credentials' in modern_data:
|
||||
raise ParseError({"error": _(
|
||||
"'credentials' cannot be used in combination with 'extra_credentials'."
|
||||
)})
|
||||
|
||||
if 'extra_credentials' in modern_data:
|
||||
# make a list of the current credentials
|
||||
existing_credentials = obj.credentials.all()
|
||||
template_credentials = list(existing_credentials) # save copy of existing
|
||||
new_credentials = []
|
||||
if 'extra_credentials' in modern_data:
|
||||
existing_credentials = [
|
||||
cred for cred in existing_credentials
|
||||
if cred.credential_type.kind not in ('cloud', 'net')
|
||||
]
|
||||
prompted_value = modern_data.pop('extra_credentials')
|
||||
|
||||
# validate type, since these are not covered by a serializer
|
||||
if not isinstance(prompted_value, Iterable):
|
||||
msg = _(
|
||||
"Incorrect type. Expected a list received {}."
|
||||
).format(prompted_value.__class__.__name__)
|
||||
raise ParseError({'extra_credentials': [msg], 'credentials': [msg]})
|
||||
|
||||
# add the deprecated credential specified in the request
|
||||
if not isinstance(prompted_value, Iterable) or isinstance(prompted_value, str):
|
||||
prompted_value = [prompted_value]
|
||||
|
||||
# If user gave extra_credentials, special case to use exactly
|
||||
# the given list without merging with JT credentials
|
||||
if prompted_value:
|
||||
obj._deprecated_credential_launch = True # signal to not merge credentials
|
||||
new_credentials.extend(prompted_value)
|
||||
|
||||
# combine the list of "new" and the filtered list of "old"
|
||||
new_credentials.extend([cred.pk for cred in existing_credentials])
|
||||
if new_credentials:
|
||||
# If provided list doesn't contain the pre-existing credentials
|
||||
# defined on the template, add them back here
|
||||
for cred_obj in template_credentials:
|
||||
if cred_obj.pk not in new_credentials:
|
||||
new_credentials.append(cred_obj.pk)
|
||||
modern_data['credentials'] = new_credentials
|
||||
|
||||
# credential passwords were historically provided as top-level attributes
|
||||
if 'credential_passwords' not in modern_data:
|
||||
modern_data['credential_passwords'] = data.copy()
|
||||
|
||||
return (modern_data, ignored_fields)
|
||||
return modern_data
|
||||
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
|
||||
try:
|
||||
modern_data, ignored_fields = self.modernize_launch_payload(
|
||||
modern_data = self.modernize_launch_payload(
|
||||
data=request.data, obj=obj
|
||||
)
|
||||
except ParseError as exc:
|
||||
@@ -2410,8 +2364,6 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
if not serializer.is_valid():
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
ignored_fields.update(serializer._ignored_fields)
|
||||
|
||||
if not request.user.can_access(models.JobLaunchConfig, 'add', serializer.validated_data, template=obj):
|
||||
raise PermissionDenied()
|
||||
|
||||
@@ -2427,11 +2379,11 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
data = OrderedDict()
|
||||
if isinstance(new_job, models.WorkflowJob):
|
||||
data['workflow_job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
|
||||
data.update(serializers.WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
else:
|
||||
data['job'] = new_job.id
|
||||
data['ignored_fields'] = self.sanitize_for_response(ignored_fields)
|
||||
data['ignored_fields'] = self.sanitize_for_response(serializer._ignored_fields)
|
||||
data.update(serializers.JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job))
|
||||
headers = {'Location': new_job.get_absolute_url(request)}
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
@@ -2711,22 +2663,6 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
|
||||
|
||||
class JobTemplateExtraCredentialsList(JobTemplateCredentialsList):
|
||||
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
sublist_qs = super(JobTemplateExtraCredentialsList, self).get_queryset()
|
||||
sublist_qs = sublist_qs.filter(credential_type__kind__in=['cloud', 'net'])
|
||||
return sublist_qs
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
valid = super(JobTemplateExtraCredentialsList, self).is_valid_relation(parent, sub, created)
|
||||
if sub.credential_type.kind not in ('cloud', 'net'):
|
||||
return {'error': _('Extra credentials must be network or cloud.')}
|
||||
return valid
|
||||
|
||||
|
||||
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
model = models.Label
|
||||
@@ -3543,16 +3479,6 @@ class JobCredentialsList(SubListAPIView):
|
||||
relationship = 'credentials'
|
||||
|
||||
|
||||
class JobExtraCredentialsList(JobCredentialsList):
|
||||
|
||||
deprecated = True
|
||||
|
||||
def get_queryset(self):
|
||||
sublist_qs = super(JobExtraCredentialsList, self).get_queryset()
|
||||
sublist_qs = sublist_qs.filter(credential_type__kind__in=['cloud', 'net'])
|
||||
return sublist_qs
|
||||
|
||||
|
||||
class JobLabelList(SubListAPIView):
|
||||
|
||||
model = models.Label
|
||||
|
||||
@@ -4,12 +4,11 @@ import os
|
||||
import logging
|
||||
import django
|
||||
from awx import __version__ as tower_version
|
||||
|
||||
# Prepare the AWX environment.
|
||||
from awx import prepare_env, MODE
|
||||
from channels.routing import get_default_application # noqa
|
||||
prepare_env() # NOQA
|
||||
|
||||
from channels.routing import get_default_application
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@@ -11,7 +11,7 @@ from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
@@ -74,10 +74,19 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError:
|
||||
except DBError as exc:
|
||||
if trans_safe:
|
||||
if 'migrate' not in sys.argv and 'check_migrations' not in sys.argv:
|
||||
logger.exception('Database settings are not available, using defaults.')
|
||||
level = logger.exception
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level('Database settings are not available, using defaults.')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
|
||||
@@ -10,6 +10,7 @@ import socket
|
||||
from socket import SHUT_RDWR
|
||||
|
||||
# Django
|
||||
from django.db import connection
|
||||
from django.conf import settings
|
||||
from django.http import Http404
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -130,7 +131,8 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@@ -145,7 +147,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
handle_setting_changes.delay(settings_change_list)
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -495,7 +495,7 @@ class NotificationAttachMixin(BaseAccess):
|
||||
# due to this special case, we use symmetrical logic with attach permission
|
||||
return self._can_attach(notification_template=sub_obj, resource_obj=obj)
|
||||
return super(NotificationAttachMixin, self).can_unattach(
|
||||
obj, sub_obj, relationship, relationship, data=data
|
||||
obj, sub_obj, relationship, data=data
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -246,18 +246,6 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_VERBOSITY',
|
||||
field_class=fields.IntegerField,
|
||||
min_value=0,
|
||||
max_value=5,
|
||||
label=_('Verbosity level for isolated node management tasks'),
|
||||
help_text=_('This can be raised to aid in debugging connection issues for isolated task execution'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
default=0
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_ISOLATED_CHECK_INTERVAL',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -435,6 +423,19 @@ register(
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_SHOW_PLAYBOOK_LINKS',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Follow symlinks'),
|
||||
help_text=_(
|
||||
'Follow symbolic links when scanning for playbooks. Be aware that setting this to True can lead '
|
||||
'to infinite recursion if a link points to a parent directory of itself.'
|
||||
),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'PRIMARY_GALAXY_URL',
|
||||
field_class=fields.URLField,
|
||||
@@ -777,16 +778,6 @@ register(
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_AUDIT',
|
||||
field_class=fields.BooleanField,
|
||||
allow_null=True,
|
||||
default=False,
|
||||
label=_('Enabled external log aggregation auditing'),
|
||||
help_text=_('When enabled, all external logs emitted by Tower will also be written to /var/log/tower/external.log. This is an experimental setting intended to be used for debugging external log aggregation issues (and may be subject to change in the future).'), # noqa
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
field_class=fields.IntegerField,
|
||||
@@ -822,15 +813,6 @@ register(
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'BROKER_DURABILITY',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Message Durability'),
|
||||
help_text=_('When set (the default), underlying queues will be persisted to disk. Disable this to enable higher message bus throughput.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_GATHER',
|
||||
|
||||
@@ -104,7 +104,7 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
logger.info(f"client '{self.channel_name}' joined the broadcast group.")
|
||||
|
||||
async def disconnect(self, code):
|
||||
logger.info("client '{self.channel_name}' disconnected from the broadcast group.")
|
||||
logger.info(f"client '{self.channel_name}' disconnected from the broadcast group.")
|
||||
await self.channel_layer.group_discard(settings.BROADCAST_WEBSOCKET_GROUP_NAME, self.channel_name)
|
||||
|
||||
async def internal_message(self, event):
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
from urllib.parse import quote, urlencode, urljoin
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
aim_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
@@ -81,21 +76,13 @@ def aim_backend(**kwargs):
|
||||
request_qs = '?' + urlencode(query_params, quote_via=quote)
|
||||
request_url = urljoin(url, '/'.join(['AIMWebService', 'api', 'Accounts']))
|
||||
|
||||
cert = None
|
||||
if client_cert and client_key:
|
||||
cert = (
|
||||
create_temporary_fifo(client_cert.encode()),
|
||||
create_temporary_fifo(client_key.encode())
|
||||
with CertFiles(client_cert, client_key) as cert:
|
||||
res = requests.get(
|
||||
request_url + request_qs,
|
||||
timeout=30,
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
)
|
||||
elif client_cert:
|
||||
cert = create_temporary_fifo(client_cert.encode())
|
||||
|
||||
res = requests.get(
|
||||
request_url + request_qs,
|
||||
timeout=30,
|
||||
cert=cert,
|
||||
verify=verify,
|
||||
)
|
||||
res.raise_for_status()
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
import base64
|
||||
from urllib.parse import urljoin, quote_plus
|
||||
from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
import requests
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
'fields': [{
|
||||
@@ -55,9 +50,9 @@ conjur_inputs = {
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
account = quote_plus(kwargs['account'])
|
||||
username = quote_plus(kwargs['username'])
|
||||
secret_path = quote_plus(kwargs['secret_path'])
|
||||
account = quote(kwargs['account'], safe='')
|
||||
username = quote(kwargs['username'], safe='')
|
||||
secret_path = quote(kwargs['secret_path'], safe='')
|
||||
version = kwargs.get('secret_version')
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
@@ -65,22 +60,20 @@ def conjur_backend(**kwargs):
|
||||
'headers': {'Content-Type': 'text/plain'},
|
||||
'data': api_key
|
||||
}
|
||||
if cacert:
|
||||
auth_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
resp = requests.post(
|
||||
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
|
||||
**auth_kwargs
|
||||
)
|
||||
with CertFiles(cacert) as cert:
|
||||
# https://www.conjur.org/api.html#authentication-authenticate-post
|
||||
auth_kwargs['verify'] = cert
|
||||
resp = requests.post(
|
||||
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
|
||||
**auth_kwargs
|
||||
)
|
||||
resp.raise_for_status()
|
||||
token = base64.b64encode(resp.content).decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
}
|
||||
if cacert:
|
||||
lookup_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
|
||||
path = urljoin(url, '/'.join([
|
||||
@@ -92,7 +85,9 @@ def conjur_backend(**kwargs):
|
||||
if version:
|
||||
path = '?'.join([path, version])
|
||||
|
||||
resp = requests.get(path, timeout=30, **lookup_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
lookup_kwargs['verify'] = cert
|
||||
resp = requests.get(path, timeout=30, **lookup_kwargs)
|
||||
resp.raise_for_status()
|
||||
return resp.text
|
||||
|
||||
|
||||
@@ -3,16 +3,11 @@ import os
|
||||
import pathlib
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
from .plugin import CredentialPlugin, CertFiles
|
||||
|
||||
import requests
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
create_temporary_fifo,
|
||||
)
|
||||
|
||||
base_inputs = {
|
||||
'fields': [{
|
||||
'id': 'url',
|
||||
@@ -129,14 +124,13 @@ def approle_auth(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
# AppRole Login
|
||||
request_kwargs['json'] = {'role_id': role_id, 'secret_id': secret_id}
|
||||
sess = requests.Session()
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
@@ -152,8 +146,6 @@ def kv_backend(**kwargs):
|
||||
api_version = kwargs['api_version']
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||
@@ -180,7 +172,9 @@ def kv_backend(**kwargs):
|
||||
path_segments = [secret_path]
|
||||
|
||||
request_url = urljoin(url, '/'.join(['v1'] + path_segments)).rstrip('/')
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
response = sess.get(request_url, **request_kwargs)
|
||||
response.raise_for_status()
|
||||
|
||||
json = response.json()
|
||||
@@ -205,8 +199,6 @@ def ssh_backend(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
request_kwargs = {'timeout': 30}
|
||||
if cacert:
|
||||
request_kwargs['verify'] = create_temporary_fifo(cacert.encode())
|
||||
|
||||
request_kwargs['json'] = {'public_key': kwargs['public_key']}
|
||||
if kwargs.get('valid_principals'):
|
||||
@@ -218,7 +210,10 @@ def ssh_backend(**kwargs):
|
||||
sess.headers['X-Vault-Token'] = token
|
||||
# https://www.vaultproject.io/api/secret/ssh/index.html#sign-ssh-key
|
||||
request_url = '/'.join([url, secret_path, 'sign', role]).rstrip('/')
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
|
||||
resp.raise_for_status()
|
||||
return resp.json()['data']['signed_key']
|
||||
|
||||
@@ -1,3 +1,45 @@
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
CredentialPlugin = namedtuple('CredentialPlugin', ['name', 'inputs', 'backend'])
|
||||
|
||||
|
||||
class CertFiles():
|
||||
"""
|
||||
A context manager used for writing a certificate and (optional) key
|
||||
to $TMPDIR, and cleaning up afterwards.
|
||||
|
||||
This is particularly useful as a shared resource for credential plugins
|
||||
that want to pull cert/key data out of the database and persist it
|
||||
temporarily to the file system so that it can loaded into the openssl
|
||||
certificate chain (generally, for HTTPS requests plugins make via the
|
||||
Python requests library)
|
||||
|
||||
with CertFiles(cert_data, key_data) as cert:
|
||||
# cert is string representing a path to the cert or pemfile
|
||||
# temporarily written to disk
|
||||
requests.post(..., cert=cert)
|
||||
"""
|
||||
|
||||
certfile = None
|
||||
|
||||
def __init__(self, cert, key=None):
|
||||
self.cert = cert
|
||||
self.key = key
|
||||
|
||||
def __enter__(self):
|
||||
if not self.cert:
|
||||
return None
|
||||
self.certfile = tempfile.NamedTemporaryFile('wb', delete=False)
|
||||
self.certfile.write(self.cert.encode())
|
||||
if self.key:
|
||||
self.certfile.write(b'\n')
|
||||
self.certfile.write(self.key.encode())
|
||||
self.certfile.flush()
|
||||
return str(self.certfile.name)
|
||||
|
||||
def __exit__(self, *args):
|
||||
if self.certfile and os.path.exists(self.certfile.name):
|
||||
os.remove(self.certfile.name)
|
||||
|
||||
@@ -8,6 +8,7 @@ import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
import time
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
|
||||
@@ -34,6 +35,7 @@ class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGINT, self.exit_gracefully)
|
||||
|
||||
def exit_gracefully(self, *args, **kwargs):
|
||||
@@ -116,18 +118,23 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
time_to_sleep = 1
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
queue = redis.Redis.from_url(settings.BROKER_URL)
|
||||
while True:
|
||||
try:
|
||||
res = queue.blpop(self.queues)
|
||||
time_to_sleep = 1
|
||||
res = json.loads(res[1])
|
||||
self.process_task(res)
|
||||
except redis.exceptions.RedisError:
|
||||
time_to_sleep = min(time_to_sleep * 2, 30)
|
||||
logger.exception(f"encountered an error communicating with redis. Reconnect attempt in {time_to_sleep} seconds")
|
||||
time.sleep(time_to_sleep)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
if self.should_stop:
|
||||
return
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
@@ -58,7 +58,7 @@ class IsolatedManager(object):
|
||||
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
for host in hosts:
|
||||
inventory['all']['hosts'][host] = {
|
||||
"ansible_connection": "kubectl",
|
||||
"ansible_connection": "community.kubernetes.kubectl",
|
||||
"ansible_kubectl_config": path,
|
||||
}
|
||||
else:
|
||||
@@ -74,6 +74,7 @@ class IsolatedManager(object):
|
||||
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
|
||||
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
|
||||
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
|
||||
|
||||
def finished_callback(runner_obj):
|
||||
@@ -109,7 +110,6 @@ class IsolatedManager(object):
|
||||
'cancel_callback': self.canceled_callback,
|
||||
'settings': {
|
||||
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ class Command(BaseCommand):
|
||||
if clear:
|
||||
for i in range(12):
|
||||
sys.stdout.write('\x1b[1A\x1b[2K')
|
||||
for l in lines:
|
||||
print(l)
|
||||
for line in lines:
|
||||
print(line)
|
||||
clear = True
|
||||
time.sleep(.25)
|
||||
|
||||
@@ -169,7 +169,7 @@ class AnsibleInventoryLoader(object):
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.AWX_ANSIBLE_COLLECTIONS_PATHS]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
@@ -649,11 +649,12 @@ class Command(BaseCommand):
|
||||
if group_name in existing_group_names:
|
||||
continue
|
||||
mem_group = self.all_group.all_groups[group_name]
|
||||
group_desc = mem_group.variables.pop('_awx_description', 'imported')
|
||||
group = self.inventory.groups.update_or_create(
|
||||
name=group_name,
|
||||
defaults={
|
||||
'variables':json.dumps(mem_group.variables),
|
||||
'description':'imported'
|
||||
'description':group_desc
|
||||
}
|
||||
)[0]
|
||||
logger.debug('Group "%s" added', group.name)
|
||||
@@ -776,8 +777,9 @@ class Command(BaseCommand):
|
||||
# Create any new hosts.
|
||||
for mem_host_name in sorted(mem_host_names_to_update):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
host_attrs = dict(variables=json.dumps(mem_host.variables),
|
||||
description='imported')
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
@@ -1078,7 +1080,7 @@ class Command(BaseCommand):
|
||||
if settings.SQL_DEBUG:
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
# Check if the license is valid.
|
||||
# Check if the license is valid.
|
||||
# If the license is not valid, a CommandError will be thrown,
|
||||
# and inventory update will be marked as invalid.
|
||||
# with transaction.atomic() will roll back the changes.
|
||||
|
||||
@@ -16,31 +16,24 @@ class InstanceNotFound(Exception):
|
||||
super(InstanceNotFound, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
class RegisterQueue:
|
||||
def __init__(self, queuename, controller, instance_percent, inst_min, hostname_list):
|
||||
self.instance_not_found_err = None
|
||||
self.queuename = queuename
|
||||
self.controller = controller
|
||||
self.instance_percent = instance_percent
|
||||
self.instance_min = inst_min
|
||||
self.hostname_list = hostname_list
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=str,
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=str,
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
help='The percentage of active instances that will be assigned to this group'),
|
||||
parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances')
|
||||
|
||||
|
||||
def get_create_update_instance_group(self, queuename, instance_percent, instance_min):
|
||||
def get_create_update_instance_group(self):
|
||||
created = False
|
||||
changed = False
|
||||
|
||||
(ig, created) = InstanceGroup.objects.get_or_create(name=queuename)
|
||||
if ig.policy_instance_percentage != instance_percent:
|
||||
ig.policy_instance_percentage = instance_percent
|
||||
(ig, created) = InstanceGroup.objects.get_or_create(name=self.queuename)
|
||||
if ig.policy_instance_percentage != self.instance_percent:
|
||||
ig.policy_instance_percentage = self.instance_percent
|
||||
changed = True
|
||||
if ig.policy_instance_minimum != instance_min:
|
||||
ig.policy_instance_minimum = instance_min
|
||||
if ig.policy_instance_minimum != self.instance_min:
|
||||
ig.policy_instance_minimum = self.instance_min
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
@@ -48,12 +41,12 @@ class Command(BaseCommand):
|
||||
|
||||
return (ig, created, changed)
|
||||
|
||||
def update_instance_group_controller(self, ig, controller):
|
||||
def update_instance_group_controller(self, ig):
|
||||
changed = False
|
||||
control_ig = None
|
||||
|
||||
if controller:
|
||||
control_ig = InstanceGroup.objects.filter(name=controller).first()
|
||||
if self.controller:
|
||||
control_ig = InstanceGroup.objects.filter(name=self.controller).first()
|
||||
|
||||
if control_ig and ig.controller_id != control_ig.pk:
|
||||
ig.controller = control_ig
|
||||
@@ -62,10 +55,10 @@ class Command(BaseCommand):
|
||||
|
||||
return (control_ig, changed)
|
||||
|
||||
def add_instances_to_group(self, ig, hostname_list):
|
||||
def add_instances_to_group(self, ig):
|
||||
changed = False
|
||||
|
||||
instance_list_unique = set([x.strip() for x in hostname_list if x])
|
||||
instance_list_unique = set([x.strip() for x in self.hostname_list if x])
|
||||
instances = []
|
||||
for inst_name in instance_list_unique:
|
||||
instance = Instance.objects.filter(hostname=inst_name)
|
||||
@@ -86,43 +79,61 @@ class Command(BaseCommand):
|
||||
|
||||
return (instances, changed)
|
||||
|
||||
def handle(self, **options):
|
||||
instance_not_found_err = None
|
||||
queuename = options.get('queuename')
|
||||
if not queuename:
|
||||
raise CommandError("Specify `--queuename` to use this command.")
|
||||
ctrl = options.get('controller')
|
||||
inst_per = options.get('instance_percent')
|
||||
inst_min = options.get('instance_minimum')
|
||||
hostname_list = []
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
def register(self):
|
||||
with advisory_lock('cluster_policy_lock'):
|
||||
with transaction.atomic():
|
||||
changed2 = False
|
||||
changed3 = False
|
||||
(ig, created, changed1) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
|
||||
(ig, created, changed1) = self.get_create_update_instance_group()
|
||||
if created:
|
||||
print("Creating instance group {}".format(ig.name))
|
||||
elif not created:
|
||||
print("Instance Group already registered {}".format(ig.name))
|
||||
|
||||
if ctrl:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig, ctrl)
|
||||
if self.controller:
|
||||
(ig_ctrl, changed2) = self.update_instance_group_controller(ig)
|
||||
if changed2:
|
||||
print("Set controller group {} on {}.".format(ctrl, queuename))
|
||||
print("Set controller group {} on {}.".format(self.controller, self.queuename))
|
||||
|
||||
try:
|
||||
(instances, changed3) = self.add_instances_to_group(ig, hostname_list)
|
||||
(instances, changed3) = self.add_instances_to_group(ig)
|
||||
for i in instances:
|
||||
print("Added instance {} to {}".format(i.hostname, ig.name))
|
||||
except InstanceNotFound as e:
|
||||
instance_not_found_err = e
|
||||
self.instance_not_found_err = e
|
||||
|
||||
if any([changed1, changed2, changed3]):
|
||||
print('(changed: True)')
|
||||
|
||||
if instance_not_found_err:
|
||||
print(instance_not_found_err.message)
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--queuename', dest='queuename', type=str,
|
||||
help='Queue to create/update')
|
||||
parser.add_argument('--hostnames', dest='hostnames', type=str,
|
||||
help='Comma-Delimited Hosts to add to the Queue (will not remove already assigned instances)')
|
||||
parser.add_argument('--controller', dest='controller', type=str,
|
||||
default='', help='The controlling group (makes this an isolated group)')
|
||||
parser.add_argument('--instance_percent', dest='instance_percent', type=int, default=0,
|
||||
help='The percentage of active instances that will be assigned to this group'),
|
||||
parser.add_argument('--instance_minimum', dest='instance_minimum', type=int, default=0,
|
||||
help='The minimum number of instance that will be retained for this group from available instances')
|
||||
|
||||
|
||||
def handle(self, **options):
|
||||
queuename = options.get('queuename')
|
||||
if not queuename:
|
||||
raise CommandError("Specify `--queuename` to use this command.")
|
||||
ctrl = options.get('controller')
|
||||
inst_per = options.get('instance_percent')
|
||||
instance_min = options.get('instance_minimum')
|
||||
hostname_list = []
|
||||
if options.get('hostnames'):
|
||||
hostname_list = options.get('hostnames').split(",")
|
||||
|
||||
rq = RegisterQueue(queuename, ctrl, inst_per, instance_min, hostname_list)
|
||||
rq.register()
|
||||
if rq.instance_not_found_err:
|
||||
print(rq.instance_not_found_err.message)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -5,17 +5,19 @@ import asyncio
|
||||
import datetime
|
||||
import re
|
||||
import redis
|
||||
import time
|
||||
from datetime import datetime as dt
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection
|
||||
from django.db.models import Q
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
@@ -91,6 +93,36 @@ class Command(BaseCommand):
|
||||
return host_stats
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
# it's necessary to delay this import in case
|
||||
# database migrations are still running
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
registered = False
|
||||
|
||||
if not migrating:
|
||||
try:
|
||||
Instance.objects.me()
|
||||
registered = True
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
if migrating or not registered:
|
||||
# In containerized deployments, migrations happen in the task container,
|
||||
# and the services running there don't start until migrations are
|
||||
# finished.
|
||||
# *This* service runs in the web container, and it's possible that it can
|
||||
# start _before_ migrations are finished, thus causing issues with the ORM
|
||||
# queries it makes (specifically, conf.settings queries).
|
||||
# This block is meant to serve as a sort of bail-out for the situation
|
||||
# where migrations aren't yet finished (similar to the migration
|
||||
# detection middleware that the uwsgi processes have) or when instance
|
||||
# registration isn't done yet
|
||||
logger.error('AWX is currently installing/upgrading. Trying again in 5s...')
|
||||
time.sleep(5)
|
||||
return
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
@@ -107,6 +139,7 @@ class Command(BaseCommand):
|
||||
break
|
||||
else:
|
||||
data[family.name] = family.samples[0].value
|
||||
|
||||
me = Instance.objects.me()
|
||||
hostnames = [i.hostname for i in Instance.objects.exclude(Q(hostname=me.hostname) | Q(rampart_groups__controller__isnull=False))]
|
||||
|
||||
|
||||
@@ -149,8 +149,11 @@ class InstanceManager(models.Manager):
|
||||
|
||||
def get_or_register(self):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
pod_ip = os.environ.get('MY_POD_IP')
|
||||
return self.register(ip_address=pod_ip)
|
||||
registered = self.register(ip_address=pod_ip)
|
||||
RegisterQueue('tower', None, 100, 0, []).register()
|
||||
return registered
|
||||
else:
|
||||
return (False, self.me())
|
||||
|
||||
|
||||
24
awx/main/migrations/0115_v370_schedule_set_null.py
Normal file
24
awx/main/migrations/0115_v370_schedule_set_null.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-04 02:26
|
||||
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0114_v370_remove_deprecated_manual_inventory_sources'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjob',
|
||||
name='schedule',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, to='main.Schedule'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='next_schedule',
|
||||
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplate_as_next_schedule+', to='main.Schedule'),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,34 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-19 02:27
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
def remove_hipchat_notifications(apps, schema_editor):
|
||||
'''
|
||||
HipChat notifications are no longer in service, remove any that are found.
|
||||
'''
|
||||
Notification = apps.get_model('main', 'Notification')
|
||||
Notification.objects.filter(notification_type='hipchat').delete()
|
||||
NotificationTemplate = apps.get_model('main', 'NotificationTemplate')
|
||||
NotificationTemplate.objects.filter(notification_type='hipchat').delete()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0115_v370_schedule_set_null'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(remove_hipchat_notifications),
|
||||
migrations.AlterField(
|
||||
model_name='notification',
|
||||
name='notification_type',
|
||||
field=models.CharField(choices=[('email', 'Email'), ('grafana', 'Grafana'), ('irc', 'IRC'), ('mattermost', 'Mattermost'), ('pagerduty', 'Pagerduty'), ('rocketchat', 'Rocket.Chat'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('webhook', 'Webhook')], max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='notificationtemplate',
|
||||
name='notification_type',
|
||||
field=models.CharField(choices=[('email', 'Email'), ('grafana', 'Grafana'), ('irc', 'IRC'), ('mattermost', 'Mattermost'), ('pagerduty', 'Pagerduty'), ('rocketchat', 'Rocket.Chat'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('webhook', 'Webhook')], max_length=32),
|
||||
),
|
||||
]
|
||||
@@ -7,7 +7,7 @@ from collections import defaultdict
|
||||
from django.db import models, DatabaseError, connection
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc
|
||||
from django.utils.timezone import utc, now
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.encoding import force_text
|
||||
|
||||
@@ -407,11 +407,14 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
host_map = kwargs.pop('host_map', {})
|
||||
|
||||
sanitize_event_keys(kwargs, cls.VALID_KEYS)
|
||||
workflow_job_id = kwargs.pop('workflow_job_id', None)
|
||||
event = cls(**kwargs)
|
||||
if workflow_job_id:
|
||||
setattr(event, 'workflow_job_id', workflow_job_id)
|
||||
setattr(event, 'host_map', host_map)
|
||||
event._update_from_event_data()
|
||||
return event
|
||||
|
||||
@@ -484,29 +487,45 @@ class JobEvent(BasePlaybookEvent):
|
||||
if not self.job or not self.job.inventory:
|
||||
logger.info('Event {} missing job or inventory, host summaries not updated'.format(self.pk))
|
||||
return
|
||||
qs = self.job.inventory.hosts.filter(name__in=hostnames)
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
all_hosts = Host.objects.filter(
|
||||
pk__in=self.host_map.values()
|
||||
).only('id')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
for host in hostnames:
|
||||
host_id = self.host_map.get(host, None)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
if qs.filter(name=host).exists():
|
||||
host_actual = qs.get(name=host)
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host=host_actual, host_name=host_actual.name, defaults=host_stats)
|
||||
else:
|
||||
host_summary, created = job.job_host_summaries.get_or_create(host_name=host, defaults=host_stats)
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
JobHostSummary.objects.bulk_create(summaries.values())
|
||||
|
||||
# update the last_job_id and last_job_host_summary_id
|
||||
# in single queries
|
||||
host_mapping = dict(
|
||||
(summary['host_id'], summary['id'])
|
||||
for summary in JobHostSummary.objects.filter(job_id=job.id).values('id', 'host_id')
|
||||
)
|
||||
for h in all_hosts:
|
||||
h.last_job_id = job.id
|
||||
if h.id in host_mapping:
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
Host.objects.bulk_update(all_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
||||
|
||||
if not created:
|
||||
update_fields = []
|
||||
for stat, value in host_stats.items():
|
||||
if getattr(host_summary, stat) != value:
|
||||
setattr(host_summary, stat, value)
|
||||
update_fields.append(stat)
|
||||
if update_fields:
|
||||
host_summary.save(update_fields=update_fields)
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import datetime
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import copy
|
||||
@@ -1704,7 +1705,7 @@ class PluginFileInjector(object):
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.INVENTORY_COLLECTIONS_ROOT
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
return env
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
@@ -2274,7 +2275,7 @@ class vmware(PluginFileInjector):
|
||||
"customValue", # optional
|
||||
"datastore",
|
||||
"effectiveRole",
|
||||
"guestHeartbeatStatus", # optonal
|
||||
"guestHeartbeatStatus", # optional
|
||||
"layout", # optional
|
||||
"layoutEx", # optional
|
||||
"name",
|
||||
@@ -2286,7 +2287,6 @@ class vmware(PluginFileInjector):
|
||||
"resourcePool",
|
||||
"rootSnapshot",
|
||||
"snapshot", # optional
|
||||
"tag",
|
||||
"triggeredAlarmState",
|
||||
"value"
|
||||
]
|
||||
@@ -2355,7 +2355,7 @@ class vmware(PluginFileInjector):
|
||||
})
|
||||
else:
|
||||
# default groups from script
|
||||
for entry in ('guest.guestId', '"templates" if config.template else "guests"'):
|
||||
for entry in ('config.guestId', '"templates" if config.template else "guests"'):
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': entry
|
||||
@@ -2491,13 +2491,33 @@ class rhv(PluginFileInjector):
|
||||
"""
|
||||
plugin_name = 'ovirt'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.9'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt_collection'
|
||||
collection = 'ovirt'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'ovirt4.py' # exception
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(rhv, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['ovirt_insecure'] = False # Default changed from script
|
||||
# TODO: process strict option upstream
|
||||
ret['compose'] = {
|
||||
'ansible_host': '(devices.values() | list)[0][0] if devices else None'
|
||||
}
|
||||
ret['keyed_groups'] = []
|
||||
for key in ('cluster', 'status'):
|
||||
ret['keyed_groups'].append({'prefix': key, 'separator': '_', 'key': key})
|
||||
ret['keyed_groups'].append({'prefix': 'tag', 'separator': '_', 'key': 'tags'})
|
||||
ret['ovirt_hostname_preference'] = ['name', 'fqdn']
|
||||
source_vars = inventory_update.source_vars_dict
|
||||
for key, value in source_vars.items():
|
||||
if key == 'plugin':
|
||||
continue
|
||||
ret[key] = value
|
||||
return ret
|
||||
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
@@ -2577,40 +2597,103 @@ class satellite6(PluginFileInjector):
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = False
|
||||
want_ansible_ssh_host = False
|
||||
want_facts = True
|
||||
|
||||
foreman_opts = inventory_update.source_vars_dict.copy()
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
ret[k] = str(v)
|
||||
|
||||
# Compatibility content
|
||||
group_by_hostvar = {
|
||||
"environment": {"prefix": "foreman_environment_",
|
||||
"environment": {"prefix": "{}environment_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | "
|
||||
"regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')"}, # NOQA: W605
|
||||
"location": {"prefix": "foreman_location_",
|
||||
"regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')"},
|
||||
"location": {"prefix": "{}location_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"organization": {"prefix": "foreman_organization_",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"organization": {"prefix": "{}organization_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "foreman_lifecycle_environment_",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "{}lifecycle_environment_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['lifecycle_environment_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"content_view": {"prefix": "foreman_content_view_",
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"},
|
||||
"content_view": {"prefix": "{}content_view_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"}
|
||||
}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
ret['legacy_hostvars'] = True
|
||||
ret['want_facts'] = True
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')"}
|
||||
}
|
||||
|
||||
ret['legacy_hostvars'] = True # convert hostvar structure to the form used by the script
|
||||
ret['want_params'] = True
|
||||
ret['group_prefix'] = group_prefix
|
||||
ret['want_hostcollections'] = want_hostcollections
|
||||
ret['want_facts'] = want_facts
|
||||
|
||||
if want_ansible_ssh_host:
|
||||
ret['compose'] = {'ansible_ssh_host': "foreman['ip6'] | default(foreman['ip'], true)"}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
|
||||
def form_keyed_group(group_pattern):
|
||||
"""
|
||||
Converts foreman group_pattern to
|
||||
inventory plugin keyed_group
|
||||
|
||||
e.g. {app_param}-{tier_param}-{dc_param}
|
||||
becomes
|
||||
"%s-%s-%s" | format(app_param, tier_param, dc_param)
|
||||
"""
|
||||
if type(group_pattern) is not str:
|
||||
return None
|
||||
params = re.findall('{[^}]*}', group_pattern)
|
||||
if len(params) == 0:
|
||||
return None
|
||||
|
||||
param_names = []
|
||||
for p in params:
|
||||
param_names.append(p[1:-1].strip()) # strip braces and space
|
||||
|
||||
# form keyed_group key by
|
||||
# replacing curly braces with '%s'
|
||||
# (for use with jinja's format filter)
|
||||
key = group_pattern
|
||||
for p in params:
|
||||
key = key.replace(p, '%s', 1)
|
||||
|
||||
# apply jinja filter to key
|
||||
key = '"{}" | format({})'.format(key, ', '.join(param_names))
|
||||
|
||||
keyed_group = {'key': key,
|
||||
'separator': ''}
|
||||
return keyed_group
|
||||
|
||||
try:
|
||||
group_patterns = json.loads(group_patterns)
|
||||
|
||||
if type(group_patterns) is list:
|
||||
for group_pattern in group_patterns:
|
||||
keyed_group = form_keyed_group(group_pattern)
|
||||
if keyed_group:
|
||||
ret['keyed_groups'].append(keyed_group)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning('Could not parse group_patterns. Expected JSON-formatted string, found: {}'
|
||||
.format(group_patterns))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@@ -439,13 +439,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
field = self._meta.get_field(field_name)
|
||||
if isinstance(field, models.ManyToManyField):
|
||||
old_value = set(old_value.all())
|
||||
if getattr(self, '_deprecated_credential_launch', False):
|
||||
# TODO: remove this code branch when support for `extra_credentials` goes away
|
||||
new_value = set(kwargs[field_name])
|
||||
else:
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
|
||||
if new_value == old_value:
|
||||
# no-op case: Fields the same as template's value
|
||||
@@ -1133,20 +1129,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
self.failed = bool(self.dark or self.failures)
|
||||
update_fields.append('failed')
|
||||
super(JobHostSummary, self).save(*args, **kwargs)
|
||||
self.update_host_last_job_summary()
|
||||
|
||||
def update_host_last_job_summary(self):
|
||||
update_fields = []
|
||||
if self.host is None:
|
||||
return
|
||||
if self.host.last_job_id != self.job_id:
|
||||
self.host.last_job_id = self.job_id
|
||||
update_fields.append('last_job_id')
|
||||
if self.host.last_job_host_summary_id != self.id:
|
||||
self.host.last_job_host_summary_id = self.id
|
||||
update_fields.append('last_job_host_summary_id')
|
||||
if update_fields:
|
||||
self.host.save(update_fields=update_fields)
|
||||
|
||||
|
||||
class SystemJobOptions(BaseModel):
|
||||
|
||||
@@ -23,7 +23,6 @@ from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
from awx.main.notifications.twilio_backend import TwilioBackend
|
||||
from awx.main.notifications.pagerduty_backend import PagerDutyBackend
|
||||
from awx.main.notifications.hipchat_backend import HipChatBackend
|
||||
from awx.main.notifications.webhook_backend import WebhookBackend
|
||||
from awx.main.notifications.mattermost_backend import MattermostBackend
|
||||
from awx.main.notifications.grafana_backend import GrafanaBackend
|
||||
@@ -44,7 +43,6 @@ class NotificationTemplate(CommonModelNameNotUnique):
|
||||
('twilio', _('Twilio'), TwilioBackend),
|
||||
('pagerduty', _('Pagerduty'), PagerDutyBackend),
|
||||
('grafana', _('Grafana'), GrafanaBackend),
|
||||
('hipchat', _('HipChat'), HipChatBackend),
|
||||
('webhook', _('Webhook'), WebhookBackend),
|
||||
('mattermost', _('Mattermost'), MattermostBackend),
|
||||
('rocketchat', _('Rocket.Chat'), RocketChatBackend),
|
||||
|
||||
@@ -199,7 +199,7 @@ class ProjectOptions(models.Model):
|
||||
results = []
|
||||
project_path = self.get_project_path()
|
||||
if project_path:
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=True):
|
||||
for dirpath, dirnames, filenames in os.walk(smart_str(project_path), followlinks=settings.AWX_SHOW_PLAYBOOK_LINKS):
|
||||
if skip_directory(dirpath):
|
||||
continue
|
||||
for filename in filenames:
|
||||
|
||||
@@ -150,7 +150,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
default=None,
|
||||
editable=False,
|
||||
related_name='%(class)s_as_next_schedule+',
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
status = models.CharField(
|
||||
max_length=32,
|
||||
@@ -413,9 +413,8 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, Notificatio
|
||||
if 'extra_vars' in validated_kwargs:
|
||||
unified_job.handle_extra_data(validated_kwargs['extra_vars'])
|
||||
|
||||
if not getattr(self, '_deprecated_credential_launch', False):
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
# Create record of provided prompts for relaunch and rescheduling
|
||||
unified_job.create_config_from_prompts(kwargs, parent=self)
|
||||
|
||||
# manually issue the create activity stream entry _after_ M2M relations
|
||||
# have been associated to the UJ
|
||||
@@ -587,7 +586,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique
|
||||
null=True,
|
||||
default=None,
|
||||
editable=False,
|
||||
on_delete=models.SET_NULL,
|
||||
on_delete=polymorphic.SET_NULL,
|
||||
)
|
||||
dependent_jobs = models.ManyToManyField(
|
||||
'self',
|
||||
|
||||
@@ -13,6 +13,19 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
|
||||
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.grafana_backend')
|
||||
|
||||
@@ -25,31 +38,13 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
sender_parameter = None
|
||||
|
||||
DEFAULT_BODY = "{{ job_metadata }}"
|
||||
default_messages = {
|
||||
"started": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"success": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"error": {
|
||||
"body": DEFAULT_BODY, "message": CustomNotificationBase.DEFAULT_MSG
|
||||
},
|
||||
"workflow_approval": {
|
||||
"running": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG, "body": None
|
||||
},
|
||||
"approved": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG, "body": None
|
||||
},
|
||||
"timed_out": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG, "body": None
|
||||
},
|
||||
"denied": {
|
||||
"message": CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG, "body": None
|
||||
}
|
||||
}
|
||||
}
|
||||
default_messages = {"started": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"success": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"error": {"body": DEFAULT_BODY, "message": DEFAULT_MSG},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG,"body": DEFAULT_APPROVAL_APPROVED_BODY},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY}}}
|
||||
|
||||
def __init__(self, grafana_key,dashboardId=None, panelId=None, annotation_tags=None, grafana_no_verify_ssl=False, isRegion=True,
|
||||
fail_silently=False, **kwargs):
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# Copyright (c) 2016 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.hipchat_backend')
|
||||
|
||||
|
||||
class HipChatBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
|
||||
init_parameters = {"token": {"label": "Token", "type": "password"},
|
||||
"rooms": {"label": "Destination Rooms", "type": "list"},
|
||||
"color": {"label": "Notification Color", "type": "string"},
|
||||
"api_url": {"label": "API Url (e.g: https://mycompany.hipchat.com)", "type": "string"},
|
||||
"notify": {"label": "Notify room", "type": "bool"},
|
||||
"message_from": {"label": "Label to be shown with notification", "type": "string"}}
|
||||
recipient_parameter = "rooms"
|
||||
sender_parameter = "message_from"
|
||||
|
||||
def __init__(self, token, color, api_url, notify, fail_silently=False, **kwargs):
|
||||
super(HipChatBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.token = token
|
||||
if color is not None:
|
||||
self.color = color.lower()
|
||||
self.api_url = api_url
|
||||
self.notify = notify
|
||||
|
||||
def send_messages(self, messages):
|
||||
sent_messages = 0
|
||||
|
||||
for m in messages:
|
||||
for rcp in m.recipients():
|
||||
r = requests.post("{}/v2/room/{}/notification".format(self.api_url, rcp),
|
||||
params={"auth_token": self.token},
|
||||
verify=False,
|
||||
json={"color": self.color,
|
||||
"message": m.subject,
|
||||
"notify": self.notify,
|
||||
"from": m.from_email,
|
||||
"message_format": "text"})
|
||||
if r.status_code != 204:
|
||||
logger.error(smart_text(_("Error sending messages: {}").format(r.text)))
|
||||
if not self.fail_silently:
|
||||
raise Exception(smart_text(_("Error sending message to hipchat: {}").format(r.text)))
|
||||
sent_messages += 1
|
||||
return sent_messages
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
import logging
|
||||
import requests
|
||||
import json
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
@@ -45,7 +44,7 @@ class MattermostBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
payload['text'] = m.subject
|
||||
|
||||
r = requests.post("{}".format(m.recipients()[0]),
|
||||
data=json.dumps(payload), verify=(not self.mattermost_no_verify_ssl))
|
||||
json=payload, verify=(not self.mattermost_no_verify_ssl))
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_text(_("Error sending notification mattermost: {}").format(r.text)))
|
||||
if not self.fail_silently:
|
||||
|
||||
@@ -11,9 +11,20 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from awx.main.notifications.base import AWXBaseEmailBackend
|
||||
from awx.main.notifications.custom_notification_base import CustomNotificationBase
|
||||
|
||||
DEFAULT_BODY = CustomNotificationBase.DEFAULT_BODY
|
||||
DEFAULT_MSG = CustomNotificationBase.DEFAULT_MSG
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_MSG
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = CustomNotificationBase.DEFAULT_APPROVAL_RUNNING_BODY
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_MSG
|
||||
DEFAULT_APPROVAL_APPROVED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_APPROVED_BODY
|
||||
|
||||
DEFAULT_APPROVAL_TIMEOUT_MSG = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_MSG
|
||||
DEFAULT_APPROVAL_TIMEOUT_BODY = CustomNotificationBase.DEFAULT_APPROVAL_TIMEOUT_BODY
|
||||
|
||||
DEFAULT_APPROVAL_DENIED_MSG = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_MSG
|
||||
DEFAULT_APPROVAL_DENIED_BODY = CustomNotificationBase.DEFAULT_APPROVAL_DENIED_BODY
|
||||
|
||||
logger = logging.getLogger('awx.main.notifications.pagerduty_backend')
|
||||
|
||||
|
||||
@@ -30,10 +41,10 @@ class PagerDutyBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
default_messages = {"started": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"success": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"error": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"workflow_approval": {"running": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"approved": {"message": DEFAULT_MSG,"body": DEFAULT_BODY},
|
||||
"timed_out": {"message": DEFAULT_MSG, "body": DEFAULT_BODY},
|
||||
"denied": {"message": DEFAULT_MSG, "body": DEFAULT_BODY}}}
|
||||
"workflow_approval": {"running": {"message": DEFAULT_APPROVAL_RUNNING_MSG, "body": DEFAULT_APPROVAL_RUNNING_BODY},
|
||||
"approved": {"message": DEFAULT_APPROVAL_APPROVED_MSG,"body": DEFAULT_APPROVAL_APPROVED_BODY},
|
||||
"timed_out": {"message": DEFAULT_APPROVAL_TIMEOUT_MSG, "body": DEFAULT_APPROVAL_TIMEOUT_BODY},
|
||||
"denied": {"message": DEFAULT_APPROVAL_DENIED_MSG, "body": DEFAULT_APPROVAL_DENIED_BODY}}}
|
||||
|
||||
def __init__(self, subdomain, token, fail_silently=False, **kwargs):
|
||||
super(PagerDutyBackend, self).__init__(fail_silently=fail_silently)
|
||||
|
||||
@@ -1,14 +1,37 @@
|
||||
import redis
|
||||
import logging
|
||||
|
||||
from django.conf.urls import url
|
||||
from django.conf import settings
|
||||
|
||||
from channels.auth import AuthMiddlewareStack
|
||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||
|
||||
from . import consumers
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.routing')
|
||||
|
||||
|
||||
class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
for k in r.scan_iter('asgi:*', 500):
|
||||
logger.debug(f"cleaning up Redis key {k}")
|
||||
r.delete(k)
|
||||
except redis.exceptions.RedisError as e:
|
||||
logger.warn("encountered an error communicating with redis.")
|
||||
raise e
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
websocket_urlpatterns = [
|
||||
url(r'websocket/$', consumers.EventConsumer),
|
||||
url(r'websocket/broadcast/$', consumers.BroadcastConsumer),
|
||||
]
|
||||
|
||||
application = ProtocolTypeRouter({
|
||||
application = AWXProtocolTypeRouter({
|
||||
'websocket': AuthMiddlewareStack(
|
||||
URLRouter(websocket_urlpatterns)
|
||||
),
|
||||
|
||||
@@ -152,8 +152,8 @@ class SimpleDAG(object):
|
||||
return self._get_children_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_children_by_label(this_ord, l))
|
||||
for label_obj in self.node_from_edges_by_label.keys():
|
||||
nodes.extend(self._get_children_by_label(this_ord, label_obj))
|
||||
return nodes
|
||||
|
||||
def _get_parents_by_label(self, node_index, label):
|
||||
@@ -168,8 +168,8 @@ class SimpleDAG(object):
|
||||
return self._get_parents_by_label(this_ord, label)
|
||||
else:
|
||||
nodes = []
|
||||
for l in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_parents_by_label(this_ord, l))
|
||||
for label_obj in self.node_to_edges_by_label.keys():
|
||||
nodes.extend(self._get_parents_by_label(this_ord, label_obj))
|
||||
return nodes
|
||||
|
||||
def get_root_nodes(self):
|
||||
|
||||
@@ -10,7 +10,7 @@ import random
|
||||
|
||||
# Django
|
||||
from django.db import transaction, connection
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
# AWX
|
||||
@@ -114,7 +114,7 @@ class TaskManager():
|
||||
logger.info('Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'.format(
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
display_list = [spawn_node.unified_job_template] + workflow_ancestors
|
||||
job.job_explanation = _(
|
||||
job.job_explanation = gettext_noop(
|
||||
"Workflow Job spawned from workflow could not start because it "
|
||||
"would result in recursion (spawn order, most recent first: {})"
|
||||
).format(', '.join(['<{}>'.format(tmp) for tmp in display_list]))
|
||||
@@ -123,8 +123,8 @@ class TaskManager():
|
||||
job.id, spawn_node.unified_job_template.pk, [wa.pk for wa in workflow_ancestors]))
|
||||
if not job._resources_sufficient_for_launch():
|
||||
can_start = False
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
job.job_explanation = gettext_noop("Job spawned from workflow could not start because it "
|
||||
"was missing a related resource such as project or inventory")
|
||||
if can_start:
|
||||
if workflow_job.start_args:
|
||||
start_args = json.loads(decrypt_field(workflow_job, 'start_args'))
|
||||
@@ -132,8 +132,8 @@ class TaskManager():
|
||||
start_args = {}
|
||||
can_start = job.signal_start(**start_args)
|
||||
if not can_start:
|
||||
job.job_explanation = _("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
job.job_explanation = gettext_noop("Job spawned from workflow could not start because it "
|
||||
"was not in the right state or required manual credentials")
|
||||
if not can_start:
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
@@ -173,7 +173,7 @@ class TaskManager():
|
||||
workflow_job.status = new_status
|
||||
if reason:
|
||||
logger.info(reason)
|
||||
workflow_job.job_explanation = _("No error handling paths found, marking workflow as failed")
|
||||
workflow_job.job_explanation = gettext_noop("No error handling paths found, marking workflow as failed")
|
||||
update_fields.append('job_explanation')
|
||||
workflow_job.start_args = '' # blank field to remove encrypted passwords
|
||||
workflow_job.save(update_fields=update_fields)
|
||||
@@ -581,3 +581,4 @@ class TaskManager():
|
||||
logger.debug("Starting Scheduler")
|
||||
with task_manager_bulk_reschedule():
|
||||
self._schedule()
|
||||
logger.debug("Finishing Scheduler")
|
||||
|
||||
@@ -150,9 +150,9 @@ def rbac_activity_stream(instance, sender, **kwargs):
|
||||
|
||||
|
||||
def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
||||
for l in instance.labels.all():
|
||||
if l.is_candidate_for_detach():
|
||||
l.delete()
|
||||
for label in instance.labels.all():
|
||||
if label.is_candidate_for_detach():
|
||||
label.delete()
|
||||
|
||||
|
||||
def save_related_job_templates(sender, instance, **kwargs):
|
||||
@@ -393,7 +393,7 @@ def activity_stream_create(sender, instance, created, **kwargs):
|
||||
'{} ({})'.format(c.name, c.id)
|
||||
for c in instance.credentials.iterator()
|
||||
]
|
||||
changes['labels'] = [l.name for l in instance.labels.iterator()]
|
||||
changes['labels'] = [label.name for label in instance.labels.iterator()]
|
||||
if 'extra_vars' in changes:
|
||||
changes['extra_vars'] = instance.display_extra_vars()
|
||||
if type(instance) == OAuth2AccessToken:
|
||||
|
||||
@@ -31,7 +31,7 @@ from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
from django.utils.translation import ugettext_lazy as _, gettext_noop
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
|
||||
@@ -67,7 +67,7 @@ from awx.main.queue import CallbackQueueDispatcher
|
||||
from awx.main.isolated import manager as isolated_manager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
from awx.main.utils import (update_scm_url,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager,
|
||||
get_awx_version)
|
||||
@@ -141,7 +141,7 @@ def dispatch_startup():
|
||||
# and Tower fall out of use/support, we can probably just _assume_ that
|
||||
# everybody has moved to bigint, and remove this code entirely
|
||||
enforce_bigint_pk_migration()
|
||||
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
@@ -288,7 +288,7 @@ def handle_setting_changes(setting_keys):
|
||||
setting.startswith('LOG_AGGREGATOR')
|
||||
for setting in setting_keys
|
||||
]):
|
||||
connection.on_commit(reconfigure_rsyslog)
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
@@ -358,6 +358,9 @@ def gather_analytics():
|
||||
from rest_framework.fields import DateTimeField
|
||||
if not settings.INSIGHTS_TRACKING_STATE:
|
||||
return
|
||||
if not (settings.AUTOMATION_ANALYTICS_URL and settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD):
|
||||
logger.debug('Not gathering analytics, configuration is invalid')
|
||||
return
|
||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
||||
if last_gather:
|
||||
last_time = DateTimeField().to_internal_value(last_gather.value)
|
||||
@@ -558,7 +561,8 @@ def awx_periodic_scheduler():
|
||||
continue
|
||||
if not can_start:
|
||||
new_unified_job.status = 'failed'
|
||||
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
|
||||
new_unified_job.job_explanation = gettext_noop("Scheduled job could not start because it \
|
||||
was not in the right state or required manual credentials")
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
@@ -897,21 +901,14 @@ class BaseTask(object):
|
||||
private_data = self.build_private_data(instance, private_data_dir)
|
||||
private_data_files = {'credentials': {}}
|
||||
if private_data is not None:
|
||||
ssh_ver = get_ssh_version()
|
||||
ssh_too_old = True if ssh_ver == "unknown" else Version(ssh_ver) < Version("6.0")
|
||||
openssh_keys_supported = ssh_ver != "unknown" and Version(ssh_ver) >= Version("6.5")
|
||||
for credential, data in private_data.get('credentials', {}).items():
|
||||
# Bail out now if a private key was provided in OpenSSH format
|
||||
# and we're running an earlier version (<6.5).
|
||||
if 'OPENSSH PRIVATE KEY' in data and not openssh_keys_supported:
|
||||
raise RuntimeError(OPENSSH_KEY_ERROR)
|
||||
# OpenSSH formatted keys must have a trailing newline to be
|
||||
# accepted by ssh-add.
|
||||
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
|
||||
data += '\n'
|
||||
# For credentials used with ssh-add, write to a named pipe which
|
||||
# will be read then closed, instead of leaving the SSH key on disk.
|
||||
if credential and credential.credential_type.namespace in ('ssh', 'scm') and not ssh_too_old:
|
||||
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
|
||||
try:
|
||||
os.mkdir(os.path.join(private_data_dir, 'env'))
|
||||
except OSError as e:
|
||||
@@ -1222,6 +1219,8 @@ class BaseTask(object):
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
if event_data.get('event') == 'playbook_on_stats':
|
||||
event_data['host_map'] = self.host_map
|
||||
|
||||
if isinstance(self, RunProjectUpdate):
|
||||
# it's common for Ansible's SCM modules to print
|
||||
@@ -1232,10 +1231,12 @@ class BaseTask(object):
|
||||
# this is a _little_ expensive to filter
|
||||
# with regex, but project updates don't have many events,
|
||||
# so it *should* have a negligible performance impact
|
||||
task = event_data.get('event_data', {}).get('task_action')
|
||||
try:
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
if task in ('git', 'hg', 'svn'):
|
||||
event_data_json = json.dumps(event_data)
|
||||
event_data_json = UriCleaner.remove_sensitive(event_data_json)
|
||||
event_data = json.loads(event_data_json)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
@@ -1421,7 +1422,6 @@ class BaseTask(object):
|
||||
'status_handler': self.status_handler,
|
||||
'settings': {
|
||||
'job_timeout': self.get_instance_timeout(self.instance),
|
||||
'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5),
|
||||
'suppress_ansible_output': True,
|
||||
**process_isolation_params,
|
||||
**resource_profiling_params,
|
||||
@@ -2412,7 +2412,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.AWX_ANSIBLE_COLLECTIONS_PATHS]
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
"""
|
||||
|
||||
@@ -107,11 +107,6 @@ def workflow_job_template_factory():
|
||||
return create_workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_ssh_version(mocker):
|
||||
return mocker.patch('awx.main.tasks.get_ssh_version', return_value='OpenSSH_6.9p1, LibreSSL 2.1.8')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory):
|
||||
return job_template_with_survey_passwords_factory(persisted=False)
|
||||
|
||||
@@ -1 +1,20 @@
|
||||
plugin: ovirt.ovirt_collection.ovirt
|
||||
base_source_var: value_of_var
|
||||
compose:
|
||||
ansible_host: (devices.values() | list)[0][0] if devices else None
|
||||
groups:
|
||||
dev: '"dev" in tags'
|
||||
keyed_groups:
|
||||
- key: cluster
|
||||
prefix: cluster
|
||||
separator: _
|
||||
- key: status
|
||||
prefix: status
|
||||
separator: _
|
||||
- key: tags
|
||||
prefix: tag
|
||||
separator: _
|
||||
ovirt_hostname_preference:
|
||||
- name
|
||||
- fqdn
|
||||
ovirt_insecure: false
|
||||
plugin: ovirt.ovirt.ovirt
|
||||
|
||||
@@ -1,22 +1,29 @@
|
||||
base_source_var: value_of_var
|
||||
compose:
|
||||
ansible_ssh_host: foreman['ip6'] | default(foreman['ip'], true)
|
||||
group_prefix: foo_group_prefix
|
||||
keyed_groups:
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')
|
||||
prefix: foreman_environment_
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')
|
||||
prefix: foo_group_prefixenvironment_
|
||||
separator: ''
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_location_
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixlocation_
|
||||
separator: ''
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_organization_
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixorganization_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_lifecycle_environment_
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixlifecycle_environment_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_content_view_
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')
|
||||
prefix: foo_group_prefixcontent_view_
|
||||
separator: ''
|
||||
- key: '"%s-%s-%s" | format(app, tier, color)'
|
||||
separator: ''
|
||||
- key: '"%s-%s" | format(app, color)'
|
||||
separator: ''
|
||||
legacy_hostvars: true
|
||||
plugin: theforeman.foreman.foreman
|
||||
want_facts: true
|
||||
want_hostcollections: true
|
||||
want_params: true
|
||||
|
||||
@@ -43,7 +43,6 @@ properties:
|
||||
- resourcePool
|
||||
- rootSnapshot
|
||||
- snapshot
|
||||
- tag
|
||||
- triggeredAlarmState
|
||||
- value
|
||||
- capability
|
||||
|
||||
@@ -6,12 +6,12 @@ user = fooo
|
||||
password = fooo
|
||||
|
||||
[ansible]
|
||||
group_patterns = foo_group_patterns
|
||||
group_patterns = ["{app}-{tier}-{color}", "{app}-{color}"]
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = True
|
||||
rich_params = False
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
|
||||
@@ -105,9 +105,6 @@ class TestSwaggerGeneration():
|
||||
'get', 'put', 'patch', 'delete'
|
||||
]
|
||||
|
||||
# Test deprecated paths
|
||||
assert paths['/api/v2/jobs/{id}/extra_credentials/']['get']['deprecated'] is True
|
||||
|
||||
@pytest.mark.parametrize('path', [
|
||||
'/api/',
|
||||
'/api/v2/',
|
||||
@@ -177,7 +174,7 @@ class TestSwaggerGeneration():
|
||||
data
|
||||
)
|
||||
data = re.sub(
|
||||
r'"action_node": "awx-[^"]+"',
|
||||
r'"action_node": "[^"]+"',
|
||||
'"action_node": "awx"',
|
||||
data
|
||||
)
|
||||
|
||||
@@ -319,11 +319,11 @@ def create_organization(name, roles=None, persisted=True, **kwargs):
|
||||
users = generate_users(org, teams, False, persisted, users=kwargs.get('users'))
|
||||
|
||||
if 'labels' in kwargs:
|
||||
for l in kwargs['labels']:
|
||||
if type(l) is Label:
|
||||
labels[l.name] = l
|
||||
for label_obj in kwargs['labels']:
|
||||
if type(label_obj) is Label:
|
||||
labels[label_obj.name] = label_obj
|
||||
else:
|
||||
labels[l] = mk_label(l, organization=org, persisted=persisted)
|
||||
labels[label_obj] = mk_label(label_obj, organization=org, persisted=persisted)
|
||||
|
||||
if 'notification_templates' in kwargs:
|
||||
for nt in kwargs['notification_templates']:
|
||||
|
||||
@@ -88,7 +88,7 @@ def test_copy_tables_unified_job_query(
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
collectors.copy_tables(time_start, tmpdir, subset="unified_jobs")
|
||||
with open(os.path.join(tmpdir, "unified_jobs_table.csv")) as f:
|
||||
lines = "".join([l for l in f])
|
||||
lines = "".join([line for line in f])
|
||||
|
||||
assert project_update_name in lines
|
||||
assert inventory_update_name in lines
|
||||
@@ -139,9 +139,9 @@ def test_copy_tables_workflow_job_node_query(sqlite_copy_expert, workflow_job):
|
||||
reader = csv.reader(f)
|
||||
# Pop the headers
|
||||
next(reader)
|
||||
lines = [l for l in reader]
|
||||
lines = [line for line in reader]
|
||||
|
||||
ids = [int(l[0]) for l in lines]
|
||||
ids = [int(line[0]) for line in lines]
|
||||
|
||||
assert ids == list(
|
||||
workflow_job.workflow_nodes.all().values_list("id", flat=True)
|
||||
|
||||
@@ -24,41 +24,6 @@ def job_template(job_template, project, inventory):
|
||||
return job_template
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_filtering(get, job_template, admin,
|
||||
machine_credential, vault_credential, credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
job_template.credentials.add(vault_credential)
|
||||
job_template.credentials.add(credential)
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
resp = get(url, admin, expect=200)
|
||||
assert resp.data['count'] == 1
|
||||
assert resp.data['results'][0]['id'] == credential.pk
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_requires_cloud_or_net(get, post, job_template, admin,
|
||||
machine_credential, vault_credential, credential,
|
||||
net_credential):
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
|
||||
for cred in (machine_credential, vault_credential):
|
||||
resp = post(url, {'associate': True, 'id': cred.pk}, admin, expect=400)
|
||||
assert 'Extra credentials must be network or cloud.' in smart_str(resp.content)
|
||||
|
||||
post(url, {'associate': True, 'id': credential.pk}, admin, expect=204)
|
||||
assert get(url, admin).data['count'] == 1
|
||||
|
||||
post(url, {'associate': True, 'id': net_credential.pk}, admin, expect=204)
|
||||
assert get(url, admin).data['count'] == 2
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_prevent_multiple_machine_creds(get, post, job_template, admin, machine_credential):
|
||||
url = reverse(
|
||||
@@ -115,52 +80,6 @@ def test_prevent_multiple_machine_creds_at_launch(get, post, job_template, admin
|
||||
assert 'Cannot assign multiple Machine credentials.' in smart_str(resp.content)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_unique_by_kind(get, post, job_template, admin,
|
||||
credentialtype_aws):
|
||||
url = reverse(
|
||||
'api:job_template_extra_credentials_list',
|
||||
kwargs={'pk': job_template.pk}
|
||||
)
|
||||
|
||||
def _new_cred(name):
|
||||
return {
|
||||
'name': name,
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}
|
||||
|
||||
post(url, _new_cred('First Cred'), admin, expect=201)
|
||||
assert get(url, admin).data['count'] == 1
|
||||
|
||||
resp = post(url, _new_cred('Second Cred'), admin, expect=400)
|
||||
assert 'Cannot assign multiple Amazon Web Services credentials.' in smart_str(resp.content)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials_at_launch(get, post, job_template, admin, credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
|
||||
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
|
||||
|
||||
assert len(summary_fields['credentials']) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_modify_extra_credentials_at_launch(get, post, job_template, admin,
|
||||
machine_credential, vault_credential, credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
job_template.credentials.add(vault_credential)
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
pk = post(url, {'extra_credentials': [credential.pk]}, admin, expect=201).data['job']
|
||||
|
||||
summary_fields = get(reverse('api:job_detail', kwargs={'pk': pk}), admin).data['summary_fields']
|
||||
assert len(summary_fields['credentials']) == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ssh_password_prompted_at_launch(get, post, job_template, admin, machine_credential):
|
||||
job_template.credentials.add(machine_credential)
|
||||
@@ -229,25 +148,6 @@ def test_vault_credential_with_password_at_launch(get, post, job_template, admin
|
||||
signal_start.assert_called_with(vault_password='testing123')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_creds_prompted_at_launch(get, post, job_template, admin, net_credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
resp = post(url, {'extra_credentials': [net_credential.pk]}, admin, expect=201)
|
||||
|
||||
summary_fields = get(
|
||||
reverse('api:job_detail', kwargs={'pk': resp.data['job']}),
|
||||
admin
|
||||
).data['summary_fields']
|
||||
assert len(summary_fields['credentials']) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_invalid_mixed_credentials_specification(get, post, job_template, admin, net_credential):
|
||||
url = reverse('api:job_template_launch', kwargs={'pk': job_template.pk})
|
||||
post(url=url, data={'credentials': [net_credential.pk], 'extra_credentials': [net_credential.pk]},
|
||||
user=admin, expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_deprecated_credential_activity_stream(patch, admin_user, machine_credential, job_template):
|
||||
job_template.credentials.add(machine_credential)
|
||||
|
||||
@@ -22,20 +22,6 @@ from awx.main.models import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credentials(get, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(credential)
|
||||
jt.save()
|
||||
job = jt.create_unified_job()
|
||||
|
||||
url = reverse('api:job_extra_credentials_list', kwargs={'pk': job.pk})
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_relaunch_permission_denied_response(
|
||||
post, get, inventory, project, credential, net_credential, machine_credential):
|
||||
@@ -50,7 +36,7 @@ def test_job_relaunch_permission_denied_response(
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
# Job has prompted credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
assert 'launched with prompted fields you do not have access to' in r.data['detail']
|
||||
@@ -70,7 +56,7 @@ def test_job_relaunch_prompts_not_accepted_response(
|
||||
r = get(job.get_absolute_url(), jt_user, expect=200)
|
||||
assert r.data['summary_fields']['user_capabilities']['start']
|
||||
|
||||
# Job has prompted extra_credential, launch denied w/ message
|
||||
# Job has prompted credential, launch denied w/ message
|
||||
job.launch_config.credentials.add(net_credential)
|
||||
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ def test_job_launch_with_default_creds(machine_credential, vault_credential, dep
|
||||
@pytest.mark.django_db
|
||||
def test_job_launch_JT_enforces_unique_credentials_kinds(machine_credential, credentialtype_aws, deploy_jobtemplate):
|
||||
"""
|
||||
JT launching should require that extra_credentials have distinct CredentialTypes
|
||||
JT launching should require that credentials have distinct CredentialTypes
|
||||
"""
|
||||
creds = []
|
||||
for i in range(2):
|
||||
|
||||
@@ -45,27 +45,6 @@ def test_create(post, project, machine_credential, inventory, alice, grant_proje
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credential_creation(get, post, organization_factory, job_template_factory, credentialtype_aws):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('kind', ['scm', 'insights'])
|
||||
def test_invalid_credential_kind_xfail(get, post, organization_factory, job_template_factory, kind):
|
||||
@@ -87,42 +66,6 @@ def test_invalid_credential_kind_xfail(get, post, organization_factory, job_temp
|
||||
assert 'Cannot assign a Credential of kind `{}`.'.format(kind) in response.data.values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_extra_credential_unique_type_xfail(get, post, organization_factory, job_template_factory, credentialtype_aws):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 201
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
# this request should fail because you can't assign the same type (aws)
|
||||
# twice
|
||||
response = post(url, {
|
||||
'name': 'My Cred',
|
||||
'credential_type': credentialtype_aws.pk,
|
||||
'inputs': {
|
||||
'username': 'joe',
|
||||
'password': 'another-secret',
|
||||
}
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 400
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, inventory, settings):
|
||||
project.use_role.members.add(alice)
|
||||
@@ -143,60 +86,6 @@ def test_create_with_forks_exceeding_maximum_xfail(alice, post, project, invento
|
||||
assert 'Maximum number of forks (10) exceeded' in str(response.data)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'associate': True,
|
||||
'id': credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 204
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_detach_extra_credential(get, post, organization_factory, job_template_factory, credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(credential)
|
||||
jt.save()
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'disassociate': True,
|
||||
'id': credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 204
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_attach_extra_credential_wrong_kind_xfail(get, post, organization_factory, job_template_factory, machine_credential):
|
||||
"""Extra credentials only allow net + cloud credentials"""
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
|
||||
url = reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk})
|
||||
response = post(url, {
|
||||
'associate': True,
|
||||
'id': machine_credential.id,
|
||||
}, objs.superusers.admin)
|
||||
assert response.status_code == 400
|
||||
|
||||
response = get(url, user=objs.superusers.admin)
|
||||
assert response.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"grant_project, grant_inventory, expect", [
|
||||
@@ -368,57 +257,6 @@ def test_launch_with_pending_deletion_inventory_workflow(get, post, organization
|
||||
assert resp.data['inventory'] == ['The inventory associated with this Workflow is being deleted.']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_launch_with_extra_credentials(get, post, organization_factory,
|
||||
job_template_factory, machine_credential,
|
||||
credential, net_credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.ask_credential_on_launch = True
|
||||
jt.save()
|
||||
|
||||
resp = post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': jt.pk}),
|
||||
dict(
|
||||
credentials=[machine_credential.pk, credential.pk, net_credential.pk]
|
||||
),
|
||||
objs.superusers.admin, expect=201
|
||||
)
|
||||
job_pk = resp.data.get('id')
|
||||
|
||||
resp = get(reverse('api:job_extra_credentials_list', kwargs={'pk': job_pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 2
|
||||
|
||||
resp = get(reverse('api:job_template_extra_credentials_list', kwargs={'pk': jt.pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_launch_with_extra_credentials_not_allowed(get, post, organization_factory,
|
||||
job_template_factory, machine_credential,
|
||||
credential, net_credential):
|
||||
objs = organization_factory("org", superusers=['admin'])
|
||||
jt = job_template_factory("jt", organization=objs.organization,
|
||||
inventory='test_inv', project='test_proj').job_template
|
||||
jt.credentials.add(machine_credential)
|
||||
jt.ask_credential_on_launch = False
|
||||
jt.save()
|
||||
|
||||
resp = post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': jt.pk}),
|
||||
dict(
|
||||
credentials=[machine_credential.pk, credential.pk, net_credential.pk]
|
||||
),
|
||||
objs.superusers.admin
|
||||
)
|
||||
assert 'credentials' in resp.data['ignored_fields'].keys()
|
||||
job_pk = resp.data.get('id')
|
||||
|
||||
resp = get(reverse('api:job_extra_credentials_list', kwargs={'pk': job_pk}), objs.superusers.admin)
|
||||
assert resp.data.get('count') == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_jt_without_project(inventory):
|
||||
data = dict(name="Test", job_type="run",
|
||||
|
||||
@@ -71,6 +71,18 @@ def test_node_accepts_prompted_fields(inventory, project, workflow_job_template,
|
||||
user=admin_user, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize("field_name, field_value", [
|
||||
('all_parents_must_converge', True),
|
||||
('all_parents_must_converge', False),
|
||||
])
|
||||
def test_create_node_with_field(field_name, field_value, workflow_job_template, post, admin_user):
|
||||
url = reverse('api:workflow_job_template_workflow_nodes_list',
|
||||
kwargs={'pk': workflow_job_template.pk})
|
||||
res = post(url, {field_name: field_value}, user=admin_user, expect=201)
|
||||
assert res.data[field_name] == field_value
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestApprovalNodes():
|
||||
def test_approval_node_creation(self, post, approval_node, admin_user):
|
||||
|
||||
@@ -65,6 +65,7 @@ class TestKeyRegeneration:
|
||||
assert nc['token'].startswith(PREFIX)
|
||||
|
||||
Slack = nt.CLASS_FOR_NOTIFICATION_TYPE[nt.notification_type]
|
||||
|
||||
class TestBackend(Slack):
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Job, JobEvent
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -61,3 +63,93 @@ def test_parent_failed(emit, event):
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation():
|
||||
hostnames = [f'Host {i}' for i in range(100)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([
|
||||
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
|
||||
for h in hostnames
|
||||
])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map
|
||||
).save()
|
||||
|
||||
assert j.job_host_summaries.count() == len(hostnames)
|
||||
assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames)
|
||||
|
||||
for s in j.job_host_summaries.all():
|
||||
assert host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == j.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_deleted_hosts():
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([
|
||||
Host(created=now(), modified=now(), name=h, inventory_id=inv.id)
|
||||
for h in hostnames
|
||||
])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in inv.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map
|
||||
).save()
|
||||
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in j.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5',
|
||||
'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
@@ -105,7 +105,10 @@ class TestJobNotificationMixin(object):
|
||||
assert isinstance(obj[key], dict)
|
||||
check_structure(expected_structure[key], obj[key])
|
||||
else:
|
||||
assert isinstance(obj[key], expected_structure[key])
|
||||
if key == 'job_explanation':
|
||||
assert isinstance(str(obj[key]), expected_structure[key])
|
||||
else:
|
||||
assert isinstance(obj[key], expected_structure[key])
|
||||
kwargs = {}
|
||||
if JobClass is InventoryUpdate:
|
||||
kwargs['inventory_source'] = inventory_source
|
||||
|
||||
@@ -18,6 +18,8 @@ from awx.main.dispatch.worker import BaseWorker, TaskWorker
|
||||
'''
|
||||
Prevent logger.<warn, debug, error> calls from triggering database operations
|
||||
'''
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _disable_database_settings(mocker):
|
||||
m = mocker.patch('awx.conf.settings.SettingsWrapper.all_supported_settings', new_callable=mock.PropertyMock)
|
||||
|
||||
@@ -50,7 +50,6 @@ INI_TEST_VARS = {
|
||||
'expand_hostvars': True,
|
||||
'fail_on_errors': True
|
||||
},
|
||||
'rhv': {}, # there are none
|
||||
'tower': {}, # there are none
|
||||
'vmware': {
|
||||
'alias_pattern': "{{ config.foo }}",
|
||||
@@ -64,11 +63,10 @@ INI_TEST_VARS = {
|
||||
'tags': 'Creator:jmarshall, peanutbutter:jelly'
|
||||
},
|
||||
'satellite6': {
|
||||
'satellite6_group_patterns': 'foo_group_patterns',
|
||||
'satellite6_group_patterns': '["{app}-{tier}-{color}", "{app}-{color}"]',
|
||||
'satellite6_group_prefix': 'foo_group_prefix',
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_rich_params': True,
|
||||
'satellite6_want_facts': True
|
||||
|
||||
},
|
||||
@@ -79,6 +77,12 @@ INI_TEST_VARS = {
|
||||
'nest_tags': 'yes',
|
||||
'suffix': '.ppt',
|
||||
'prefer_ipv4': 'yes'
|
||||
},
|
||||
'rhv': { # options specific to the plugin
|
||||
'ovirt_insecure': False,
|
||||
'groups': {
|
||||
'dev': '"dev" in tags'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
37
awx/main/tests/functional/test_labels.py
Normal file
37
awx/main/tests/functional/test_labels.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import pytest
|
||||
|
||||
# awx
|
||||
from awx.main.models import WorkflowJobTemplate
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_can_add_label(org_admin,organization, post):
|
||||
# create workflow
|
||||
wfjt = WorkflowJobTemplate.objects.create(name='test-wfjt')
|
||||
wfjt.organization = organization
|
||||
# create label
|
||||
wfjt.admin_role.members.add(org_admin)
|
||||
url = reverse('api:workflow_job_template_label_list', kwargs={'pk': wfjt.pk})
|
||||
data = {'name': 'dev-label', 'organization': organization.id}
|
||||
label = post(url, user=org_admin, data=data, expect=201)
|
||||
assert label.data['name'] == 'dev-label'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_can_remove_label(org_admin, organization, post, get):
|
||||
# create workflow
|
||||
wfjt = WorkflowJobTemplate.objects.create(name='test-wfjt')
|
||||
wfjt.organization = organization
|
||||
# create label
|
||||
wfjt.admin_role.members.add(org_admin)
|
||||
label = wfjt.labels.create(name='dev-label', organization=organization)
|
||||
# delete label
|
||||
url = reverse('api:workflow_job_template_label_list', kwargs={'pk': wfjt.pk})
|
||||
data = {
|
||||
"id": label.pk,
|
||||
"disassociate": True
|
||||
}
|
||||
post(url, data, org_admin, expect=204)
|
||||
results = get(url, org_admin, expect=200)
|
||||
assert results.data['count'] == 0
|
||||
@@ -128,7 +128,7 @@ def test_job_template_access_admin(role_names, jt_linked, rando):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_extra_credentials_prompts_access(
|
||||
def test_job_template_credentials_prompts_access(
|
||||
rando, post, inventory, project, machine_credential, vault_credential):
|
||||
jt = JobTemplate.objects.create(
|
||||
name = 'test-jt',
|
||||
@@ -149,14 +149,14 @@ def test_job_template_extra_credentials_prompts_access(
|
||||
@pytest.mark.django_db
|
||||
class TestJobTemplateCredentials:
|
||||
|
||||
def test_job_template_cannot_add_extra_credentials(self, job_template, credential, rando):
|
||||
def test_job_template_cannot_add_credentials(self, job_template, credential, rando):
|
||||
job_template.admin_role.members.add(rando)
|
||||
credential.read_role.members.add(rando)
|
||||
# without permission to credential, user can not attach it
|
||||
assert not JobTemplateAccess(rando).can_attach(
|
||||
job_template, credential, 'credentials', {})
|
||||
|
||||
def test_job_template_can_add_extra_credentials(self, job_template, credential, rando):
|
||||
def test_job_template_can_add_credentials(self, job_template, credential, rando):
|
||||
job_template.admin_role.members.add(rando)
|
||||
credential.use_role.members.add(rando)
|
||||
# user has permission to apply credential
|
||||
|
||||
@@ -35,7 +35,7 @@ data_loggly = {
|
||||
# Test reconfigure logging settings function
|
||||
# name this whatever you want
|
||||
@pytest.mark.parametrize(
|
||||
'enabled, type, host, port, protocol, expected_config', [
|
||||
'enabled, log_type, host, port, protocol, expected_config', [
|
||||
(
|
||||
True,
|
||||
'loggly',
|
||||
@@ -135,9 +135,20 @@ data_loggly = {
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="services/collector/event")', # noqa
|
||||
])
|
||||
),
|
||||
(
|
||||
True, # valid sumologic config
|
||||
'sumologic',
|
||||
'https://endpoint5.collection.us2.sumologic.com/receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==', # noqa
|
||||
None,
|
||||
'https',
|
||||
'\n'.join([
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" errorfile="/var/log/tower/rsyslog.err" action.resumeInterval="5" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
])
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_rsyslog_conf_template(enabled, type, host, port, protocol, expected_config):
|
||||
def test_rsyslog_conf_template(enabled, log_type, host, port, protocol, expected_config):
|
||||
|
||||
mock_settings, _ = _mock_logging_defaults()
|
||||
|
||||
@@ -146,7 +157,7 @@ def test_rsyslog_conf_template(enabled, type, host, port, protocol, expected_con
|
||||
setattr(mock_settings, 'LOGGING', logging_defaults)
|
||||
setattr(mock_settings, 'LOGGING["handlers"]["external_logger"]["address"]', '/var/run/awx-rsyslog/rsyslog.sock')
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_ENABLED', enabled)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_TYPE', type)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_TYPE', log_type)
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_HOST', host)
|
||||
if port:
|
||||
setattr(mock_settings, 'LOG_AGGREGATOR_PORT', port)
|
||||
|
||||
@@ -64,7 +64,6 @@ def could_be_playbook(project_path, dir_path, filename):
|
||||
matched = True
|
||||
break
|
||||
except IOError:
|
||||
logger.exception(f'failed to open {playbook_path}')
|
||||
return None
|
||||
if not matched:
|
||||
return None
|
||||
|
||||
@@ -43,7 +43,7 @@ logger = logging.getLogger('awx.main.utils')
|
||||
|
||||
__all__ = [
|
||||
'get_object_or_400', 'camelcase_to_underscore', 'underscore_to_camelcase', 'memoize',
|
||||
'memoize_delete', 'get_ansible_version', 'get_ssh_version', 'get_licenser', 'get_awx_http_client_headers',
|
||||
'memoize_delete', 'get_ansible_version', 'get_licenser', 'get_awx_http_client_headers',
|
||||
'get_awx_version', 'update_scm_url', 'get_type_for_model', 'get_model_for_type',
|
||||
'copy_model_by_class', 'region_sorting', 'copy_m2m_relationships',
|
||||
'prefetch_page_capabilities', 'to_python_boolean', 'ignore_inventory_computed_fields',
|
||||
@@ -190,20 +190,6 @@ def get_ansible_version():
|
||||
return _get_ansible_version('ansible')
|
||||
|
||||
|
||||
@memoize()
|
||||
def get_ssh_version():
|
||||
'''
|
||||
Return SSH version installed.
|
||||
'''
|
||||
try:
|
||||
proc = subprocess.Popen(['ssh', '-V'],
|
||||
stderr=subprocess.PIPE)
|
||||
result = smart_str(proc.communicate()[1])
|
||||
return result.split(" ")[0].split("_")[1]
|
||||
except Exception:
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def get_awx_version():
|
||||
'''
|
||||
Return AWX version as reported by setuptools.
|
||||
@@ -1024,14 +1010,17 @@ def get_custom_venv_choices(custom_paths=None):
|
||||
custom_venv_choices = []
|
||||
|
||||
for custom_venv_path in all_venv_paths:
|
||||
if os.path.exists(custom_venv_path):
|
||||
custom_venv_choices.extend([
|
||||
os.path.join(custom_venv_path, x, '')
|
||||
for x in os.listdir(custom_venv_path)
|
||||
if x != 'awx' and
|
||||
os.path.isdir(os.path.join(custom_venv_path, x)) and
|
||||
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
|
||||
])
|
||||
try:
|
||||
if os.path.exists(custom_venv_path):
|
||||
custom_venv_choices.extend([
|
||||
os.path.join(custom_venv_path, x, '')
|
||||
for x in os.listdir(custom_venv_path)
|
||||
if x != 'awx' and
|
||||
os.path.isdir(os.path.join(custom_venv_path, x)) and
|
||||
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
|
||||
])
|
||||
except Exception:
|
||||
logger.exception("Encountered an error while discovering custom virtual environments.")
|
||||
return custom_venv_choices
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
|
||||
import shutil
|
||||
import tempfile
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
@@ -77,7 +78,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
f'action.resumeInterval="{timeout}"'
|
||||
]
|
||||
if parsed.path:
|
||||
path = urlparse.quote(parsed.path[1:])
|
||||
path = urlparse.quote(parsed.path[1:], safe='/=')
|
||||
if parsed.query:
|
||||
path = f'{path}?{urlparse.quote(parsed.query)}'
|
||||
params.append(f'restpath="{path}"')
|
||||
@@ -112,6 +113,10 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
|
||||
def reconfigure_rsyslog():
|
||||
tmpl = construct_rsyslog_conf_template()
|
||||
with open('/var/lib/awx/rsyslog/rsyslog.conf', 'w') as f:
|
||||
f.write(tmpl + '\n')
|
||||
# Write config to a temp file then move it to preserve atomicity
|
||||
with tempfile.TemporaryDirectory(prefix='rsyslog-conf-') as temp_dir:
|
||||
path = temp_dir + '/rsyslog.conf.temp'
|
||||
with open(path, 'w') as f:
|
||||
f.write(tmpl + '\n')
|
||||
shutil.move(path, '/var/lib/awx/rsyslog/rsyslog.conf')
|
||||
supervisor_service_command(command='restart', service='awx-rsyslogd')
|
||||
|
||||
@@ -13,6 +13,10 @@ class RSysLogHandler(logging.handlers.SysLogHandler):
|
||||
|
||||
append_nul = False
|
||||
|
||||
def _connect_unixsocket(self, address):
|
||||
super(RSysLogHandler, self)._connect_unixsocket(address)
|
||||
self.socket.setblocking(False)
|
||||
|
||||
def emit(self, msg):
|
||||
if not settings.LOG_AGGREGATOR_ENABLED:
|
||||
return
|
||||
@@ -26,6 +30,14 @@ class RSysLogHandler(logging.handlers.SysLogHandler):
|
||||
# unfortunately, we can't log that because...rsyslogd is down (and
|
||||
# would just us back ddown this code path)
|
||||
pass
|
||||
except BlockingIOError:
|
||||
# for <some reason>, rsyslogd is no longer reading from the domain socket, and
|
||||
# we're unable to write any more to it without blocking (we've seen this behavior
|
||||
# from time to time when logging is totally misconfigured;
|
||||
# in this scenario, it also makes more sense to just drop the messages,
|
||||
# because the alternative is blocking the socket.send() in the
|
||||
# Python process, which we definitely don't want to do)
|
||||
pass
|
||||
|
||||
|
||||
ColorHandler = logging.StreamHandler
|
||||
|
||||
@@ -37,7 +37,7 @@ def get_broadcast_hosts():
|
||||
.order_by('hostname') \
|
||||
.values('hostname', 'ip_address') \
|
||||
.distinct()
|
||||
return [i['ip_address'] or i['hostname'] for i in instances]
|
||||
return {i['hostname']: i['ip_address'] or i['hostname'] for i in instances}
|
||||
|
||||
|
||||
def get_local_host():
|
||||
@@ -149,19 +149,33 @@ class BroadcastWebsocketTask(WebsocketTask):
|
||||
class BroadcastWebsocketManager(object):
|
||||
def __init__(self):
|
||||
self.event_loop = asyncio.get_event_loop()
|
||||
'''
|
||||
{
|
||||
'hostname1': BroadcastWebsocketTask(),
|
||||
'hostname2': BroadcastWebsocketTask(),
|
||||
'hostname3': BroadcastWebsocketTask(),
|
||||
}
|
||||
'''
|
||||
self.broadcast_tasks = dict()
|
||||
# parallel dict to broadcast_tasks that tracks stats
|
||||
self.local_hostname = get_local_host()
|
||||
self.stats_mgr = BroadcastWebsocketStatsManager(self.event_loop, self.local_hostname)
|
||||
|
||||
async def run_per_host_websocket(self):
|
||||
|
||||
while True:
|
||||
future_remote_hosts = get_broadcast_hosts()
|
||||
known_hosts = get_broadcast_hosts()
|
||||
future_remote_hosts = known_hosts.keys()
|
||||
current_remote_hosts = self.broadcast_tasks.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
new_remote_hosts = set(future_remote_hosts) - set(current_remote_hosts)
|
||||
|
||||
remote_addresses = {k: v.remote_host for k, v in self.broadcast_tasks.items()}
|
||||
for hostname, address in known_hosts.items():
|
||||
if hostname in self.broadcast_tasks and \
|
||||
address != remote_addresses[hostname]:
|
||||
deleted_remote_hosts.add(hostname)
|
||||
new_remote_hosts.add(hostname)
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.warn(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
if new_remote_hosts:
|
||||
@@ -177,7 +191,7 @@ class BroadcastWebsocketManager(object):
|
||||
broadcast_task = BroadcastWebsocketTask(name=self.local_hostname,
|
||||
event_loop=self.event_loop,
|
||||
stats=stats,
|
||||
remote_host=h)
|
||||
remote_host=known_hosts[h])
|
||||
broadcast_task.start()
|
||||
self.broadcast_tasks[h] = broadcast_task
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
- name: Poll for status of active job.
|
||||
hosts: all
|
||||
gather_facts: false
|
||||
collections:
|
||||
- ansible.posix
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
@@ -48,22 +48,8 @@
|
||||
- update_git
|
||||
|
||||
- block:
|
||||
- name: update project using hg
|
||||
hg:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
revision: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
register: hg_result
|
||||
|
||||
- name: Set the hg repository version
|
||||
set_fact:
|
||||
scm_version: "{{ hg_result['after'] }}"
|
||||
when: "'after' in hg_result"
|
||||
|
||||
- name: parse hg version string properly
|
||||
set_fact:
|
||||
scm_version: "{{scm_version|regex_replace('^([A-Za-z0-9]+).*$', '\\1')}}"
|
||||
- name: include hg tasks
|
||||
include_tasks: project_update_hg_tasks.yml
|
||||
tags:
|
||||
- update_hg
|
||||
|
||||
@@ -136,7 +122,7 @@
|
||||
register: doesRequirementsExist
|
||||
|
||||
- name: fetch galaxy roles from requirements.yml
|
||||
command: ansible-galaxy install -r roles/requirements.yml -p {{roles_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
command: ansible-galaxy role install -r roles/requirements.yml -p {{roles_destination|quote}}{{ ' -' + 'v' * ansible_verbosity if ansible_verbosity else '' }}
|
||||
args:
|
||||
chdir: "{{project_path|quote}}"
|
||||
register: galaxy_result
|
||||
|
||||
17
awx/playbooks/project_update_hg_tasks.yml
Normal file
17
awx/playbooks/project_update_hg_tasks.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: update project using hg
|
||||
hg:
|
||||
dest: "{{project_path|quote}}"
|
||||
repo: "{{scm_url|quote}}"
|
||||
revision: "{{scm_branch|quote}}"
|
||||
force: "{{scm_clean}}"
|
||||
register: hg_result
|
||||
|
||||
- name: Set the hg repository version
|
||||
set_fact:
|
||||
scm_version: "{{ hg_result['after'] }}"
|
||||
when: "'after' in hg_result"
|
||||
|
||||
- name: parse hg version string properly
|
||||
set_fact:
|
||||
scm_version: "{{scm_version|regex_replace('^([A-Za-z0-9]+).*$', '\\1')}}"
|
||||
@@ -9,6 +9,8 @@
|
||||
gather_facts: false
|
||||
vars:
|
||||
secret: "{{ lookup('pipe', 'cat ' + src + '/env/ssh_key') }}"
|
||||
collections:
|
||||
- ansible.posix
|
||||
|
||||
tasks:
|
||||
- name: synchronize job environment with isolated host
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# This file is a utility Ansible plugin that is not part of the AWX or Ansible
|
||||
# packages. It does not import any code from either package, nor does its
|
||||
# license apply to Ansible or AWX.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# Neither the name of the <ORGANIZATION> nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import os
|
||||
import memcache
|
||||
import json
|
||||
import datetime
|
||||
import base64
|
||||
from dateutil import parser
|
||||
from dateutil.tz import tzutc
|
||||
|
||||
from ansible import constants as C
|
||||
|
||||
try:
|
||||
from ansible.cache.base import BaseCacheModule
|
||||
except Exception:
|
||||
from ansible.plugins.cache.base import BaseCacheModule
|
||||
|
||||
|
||||
class CacheModule(BaseCacheModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.mc = memcache.Client([C.CACHE_PLUGIN_CONNECTION], debug=0)
|
||||
self._timeout = int(C.CACHE_PLUGIN_TIMEOUT)
|
||||
self._inventory_id = os.environ['INVENTORY_ID']
|
||||
|
||||
@property
|
||||
def host_names_key(self):
|
||||
return '{}'.format(self._inventory_id)
|
||||
|
||||
def translate_host_key(self, host_name):
|
||||
return '{}-{}'.format(self._inventory_id, base64.b64encode(host_name.encode('utf-8')))
|
||||
|
||||
def translate_modified_key(self, host_name):
|
||||
return '{}-{}-modified'.format(self._inventory_id, base64.b64encode(host_name.encode('utf-8')))
|
||||
|
||||
def get(self, key):
|
||||
host_key = self.translate_host_key(key)
|
||||
modified_key = self.translate_modified_key(key)
|
||||
|
||||
'''
|
||||
Cache entry expired
|
||||
'''
|
||||
modified = self.mc.get(modified_key)
|
||||
if modified is None:
|
||||
raise KeyError
|
||||
modified = parser.parse(modified).replace(tzinfo=tzutc())
|
||||
now_utc = datetime.datetime.now(tzutc())
|
||||
if self._timeout != 0 and (modified + datetime.timedelta(seconds=self._timeout)) < now_utc:
|
||||
raise KeyError
|
||||
|
||||
value_json = self.mc.get(host_key)
|
||||
if value_json is None:
|
||||
raise KeyError
|
||||
try:
|
||||
return json.loads(value_json)
|
||||
# If cache entry is corrupt or bad, fail gracefully.
|
||||
except (TypeError, ValueError):
|
||||
self.delete(key)
|
||||
raise KeyError
|
||||
|
||||
def set(self, key, value):
|
||||
host_key = self.translate_host_key(key)
|
||||
modified_key = self.translate_modified_key(key)
|
||||
|
||||
self.mc.set(host_key, json.dumps(value))
|
||||
value = json.dumps(value)
|
||||
rc = self.mc.set(host_key, value)
|
||||
if rc == 0 and len(value) > self.mc.server_max_value_length:
|
||||
self._display.error(
|
||||
"memcache.set('{}', '?') failed, value > server_max_value_length ({} bytes)".format(
|
||||
key, len(value)
|
||||
)
|
||||
)
|
||||
self.mc.set(modified_key, datetime.datetime.now(tzutc()).isoformat())
|
||||
|
||||
def keys(self):
|
||||
return self.mc.get(self.host_names_key)
|
||||
|
||||
def contains(self, key):
|
||||
try:
|
||||
self.get(key)
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def delete(self, key):
|
||||
self.set(key, {})
|
||||
|
||||
def flush(self):
|
||||
host_names = self.mc.get(self.host_names_key)
|
||||
if not host_names:
|
||||
return
|
||||
|
||||
for k in host_names:
|
||||
self.mc.delete(self.translate_host_key(k))
|
||||
self.mc.delete(self.translate_modified_key(k))
|
||||
|
||||
def copy(self):
|
||||
ret = dict()
|
||||
host_names = self.mc.get(self.host_names_key)
|
||||
if not host_names:
|
||||
return
|
||||
|
||||
for k in host_names:
|
||||
ret[k] = self.mc.get(self.translate_host_key(k))
|
||||
return ret
|
||||
|
||||
@@ -121,8 +121,8 @@ LOGIN_URL = '/api/login/'
|
||||
PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects')
|
||||
|
||||
# Absolute filesystem path to the directory to host collections for
|
||||
# running inventory imports
|
||||
INVENTORY_COLLECTIONS_ROOT = os.path.join(BASE_DIR, 'vendor', 'inventory_collections')
|
||||
# running inventory imports, isolated playbooks
|
||||
AWX_ANSIBLE_COLLECTIONS_PATHS = os.path.join(BASE_DIR, 'vendor', 'awx_ansible_collections')
|
||||
|
||||
# Absolute filesystem path to the directory for job status stdout (default for
|
||||
# development and tests, default for production defined in production.py). This
|
||||
@@ -247,7 +247,7 @@ TEMPLATES = [
|
||||
'loaders': [(
|
||||
'django.template.loaders.cached.Loader',
|
||||
('django.template.loaders.filesystem.Loader',
|
||||
'django.template.loaders.app_directories.Loader',),
|
||||
'django.template.loaders.app_directories.Loader',),
|
||||
)],
|
||||
'builtins': ['awx.main.templatetags.swagger'],
|
||||
},
|
||||
@@ -386,9 +386,6 @@ AUTH_BASIC_ENABLED = True
|
||||
# when trying to access a UI page that requries authentication.
|
||||
LOGIN_REDIRECT_OVERRIDE = ''
|
||||
|
||||
# If set, serve only minified JS for UI.
|
||||
USE_MINIFIED_JS = False
|
||||
|
||||
# Default to skipping isolated host key checking (the initial connection will
|
||||
# hang on an interactive "The authenticity of host example.org can't be
|
||||
# established" message)
|
||||
@@ -406,19 +403,13 @@ AWX_ISOLATED_CONNECTION_TIMEOUT = 10
|
||||
# The time (in seconds) between the periodic isolated heartbeat status check
|
||||
AWX_ISOLATED_PERIODIC_CHECK = 600
|
||||
|
||||
# Verbosity level for isolated node management tasks
|
||||
AWX_ISOLATED_VERBOSITY = 0
|
||||
|
||||
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
|
||||
DEVSERVER_DEFAULT_PORT = '8013'
|
||||
|
||||
# Set default ports for live server tests.
|
||||
os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
|
||||
|
||||
BROKER_DURABILITY = True
|
||||
BROKER_POOL_LIMIT = None
|
||||
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
||||
BROKER_TRANSPORT_OPTIONS = {}
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'tower_scheduler': {
|
||||
'task': 'awx.main.tasks.awx_periodic_scheduler',
|
||||
@@ -448,10 +439,11 @@ CELERYBEAT_SCHEDULE = {
|
||||
}
|
||||
|
||||
# Django Caching Configuration
|
||||
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
|
||||
CACHES = {
|
||||
'default': {
|
||||
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
|
||||
'LOCATION': 'unix:/var/run/memcached/memcached.sock'
|
||||
'BACKEND': 'django_redis.cache.RedisCache',
|
||||
'LOCATION': 'unix:/var/run/redis/redis.sock?db=1'
|
||||
},
|
||||
}
|
||||
|
||||
@@ -572,6 +564,9 @@ AWX_ROLES_ENABLED = True
|
||||
# Note: This setting may be overridden by database settings.
|
||||
AWX_COLLECTIONS_ENABLED = True
|
||||
|
||||
# Follow symlinks when scanning for playbooks
|
||||
AWX_SHOW_PLAYBOOK_LINKS = False
|
||||
|
||||
# Settings for primary galaxy server, should be set in the UI
|
||||
PRIMARY_GALAXY_URL = ''
|
||||
PRIMARY_GALAXY_USERNAME = ''
|
||||
@@ -644,7 +639,6 @@ INSIGHTS_TRACKING_STATE = False
|
||||
|
||||
# Last gather date for Analytics
|
||||
AUTOMATION_ANALYTICS_LAST_GATHER = None
|
||||
AUTOMATION_ANALYTICS_INTERVAL = 14400
|
||||
|
||||
# Default list of modules allowed for ad hoc commands.
|
||||
# Note: This setting may be overridden by database settings.
|
||||
@@ -909,20 +903,8 @@ SCM_EXCLUDE_EMPTY_GROUPS = False
|
||||
ACTIVITY_STREAM_ENABLED = True
|
||||
ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False
|
||||
|
||||
# Internal API URL for use by inventory scripts and callback plugin.
|
||||
INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT
|
||||
|
||||
CALLBACK_QUEUE = "callback_tasks"
|
||||
|
||||
SCHEDULER_QUEUE = "scheduler"
|
||||
|
||||
TASK_COMMAND_PORT = 6559
|
||||
|
||||
SOCKETIO_NOTIFICATION_PORT = 6557
|
||||
SOCKETIO_LISTEN_PORT = 8080
|
||||
|
||||
FACT_CACHE_PORT = 6564
|
||||
|
||||
# Note: This setting may be overridden by database settings.
|
||||
ORG_ADMINS_CAN_SEE_ALL_USERS = True
|
||||
MANAGE_ORGANIZATION_AUTH = True
|
||||
@@ -957,6 +939,7 @@ CHANNEL_LAYERS = {
|
||||
"CONFIG": {
|
||||
"hosts": [BROKER_URL],
|
||||
"capacity": 10000,
|
||||
"group_expiry": 157784760, # 5 years
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1201,7 +1184,6 @@ LOGGING = {
|
||||
},
|
||||
}
|
||||
}
|
||||
LOG_AGGREGATOR_AUDIT = False
|
||||
|
||||
# Apply coloring to messages logged to the console
|
||||
COLOR_LOGS = False
|
||||
|
||||
@@ -148,6 +148,9 @@ for setting in dir(this_module):
|
||||
include(optional('/etc/tower/settings.py'), scope=locals())
|
||||
include(optional('/etc/tower/conf.d/*.py'), scope=locals())
|
||||
|
||||
# Installed differently in Dockerfile compared to production versions
|
||||
AWX_ANSIBLE_COLLECTIONS_PATHS = '/vendor/awx_ansible_collections'
|
||||
|
||||
BASE_VENV_PATH = "/venv/"
|
||||
ANSIBLE_VENV_PATH = os.path.join(BASE_VENV_PATH, "ansible")
|
||||
AWX_VENV_PATH = os.path.join(BASE_VENV_PATH, "awx")
|
||||
|
||||
@@ -53,7 +53,7 @@ if "pytest" in sys.modules:
|
||||
PROJECTS_ROOT = '/var/lib/awx/projects/'
|
||||
|
||||
# Location for cross-development of inventory plugins
|
||||
# INVENTORY_COLLECTIONS_ROOT = '/awx_devel/awx/plugins/collections'
|
||||
AWX_ANSIBLE_COLLECTIONS_PATHS = '/vendor/awx_ansible_collections'
|
||||
|
||||
# Absolute filesystem path to the directory for job status stdout
|
||||
# This directory should not be web-accessible
|
||||
|
||||
@@ -30,12 +30,6 @@ SECRET_KEY = None
|
||||
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
|
||||
ALLOWED_HOSTS = []
|
||||
|
||||
# Production should only use minified JS for UI.
|
||||
USE_MINIFIED_JS = True
|
||||
|
||||
# URL used by inventory script and callback plugin to access API.
|
||||
INTERNAL_API_URL = 'http://127.0.0.1:80'
|
||||
|
||||
# Absolute filesystem path to the directory for job status stdout
|
||||
# This directory should not be web-accessible
|
||||
JOBOUTPUT_ROOT = '/var/lib/awx/job_status/'
|
||||
|
||||
@@ -740,7 +740,9 @@ class SAMLOrgAttrField(HybridDictField):
|
||||
class SAMLTeamAttrTeamOrgMapField(HybridDictField):
|
||||
|
||||
team = fields.CharField(required=True, allow_null=False)
|
||||
team_alias = fields.CharField(required=False, allow_null=True)
|
||||
organization = fields.CharField(required=True, allow_null=False)
|
||||
organization_alias = fields.CharField(required=False, allow_null=True)
|
||||
|
||||
child = _Forbidden()
|
||||
|
||||
|
||||
@@ -187,13 +187,22 @@ def update_user_teams_by_saml_attr(backend, details, user=None, *args, **kwargs)
|
||||
|
||||
team_ids = []
|
||||
for team_name_map in team_map.get('team_org_map', []):
|
||||
team_name = team_name_map.get('team', '')
|
||||
team_name = team_name_map.get('team', None)
|
||||
team_alias = team_name_map.get('team_alias', None)
|
||||
organization_name = team_name_map.get('organization', None)
|
||||
organization_alias = team_name_map.get('organization_alias', None)
|
||||
if team_name in saml_team_names:
|
||||
if not team_name_map.get('organization', ''):
|
||||
if not organization_name:
|
||||
# Settings field validation should prevent this.
|
||||
logger.error("organization name invalid for team {}".format(team_name))
|
||||
continue
|
||||
org = Organization.objects.get_or_create(name=team_name_map['organization'])[0]
|
||||
|
||||
if organization_alias:
|
||||
organization_name = organization_alias
|
||||
org = Organization.objects.get_or_create(name=organization_name)[0]
|
||||
|
||||
if team_alias:
|
||||
team_name = team_alias
|
||||
team = Team.objects.get_or_create(name=team_name, organization=org)[0]
|
||||
|
||||
team_ids.append(team.id)
|
||||
|
||||
@@ -193,6 +193,10 @@ class TestSAMLAttr():
|
||||
{'team': 'Red', 'organization': 'Default1'},
|
||||
{'team': 'Green', 'organization': 'Default1'},
|
||||
{'team': 'Green', 'organization': 'Default3'},
|
||||
{
|
||||
'team': 'Yellow', 'team_alias': 'Yellow_Alias',
|
||||
'organization': 'Default4', 'organization_alias': 'Default4_Alias'
|
||||
},
|
||||
]
|
||||
}
|
||||
return MockSettings()
|
||||
@@ -285,3 +289,18 @@ class TestSAMLAttr():
|
||||
assert Team.objects.get(name='Green', organization__name='Default1').member_role.members.count() == 3
|
||||
assert Team.objects.get(name='Green', organization__name='Default3').member_role.members.count() == 3
|
||||
|
||||
def test_update_user_teams_alias_by_saml_attr(self, orgs, users, kwargs, mock_settings):
|
||||
with mock.patch('django.conf.settings', mock_settings):
|
||||
u1 = users[0]
|
||||
|
||||
# Test getting teams from attribute with team->org mapping
|
||||
kwargs['response']['attributes']['groups'] = ['Yellow']
|
||||
|
||||
# Ensure team and org will be created
|
||||
update_user_teams_by_saml_attr(None, None, u1, **kwargs)
|
||||
|
||||
assert Team.objects.filter(name='Yellow', organization__name='Default4').count() == 0
|
||||
assert Team.objects.filter(name='Yellow_Alias', organization__name='Default4_Alias').count() == 1
|
||||
assert Team.objects.get(
|
||||
name='Yellow_Alias', organization__name='Default4_Alias').member_role.members.count() == 1
|
||||
|
||||
|
||||
@@ -71,6 +71,14 @@ class TestSAMLTeamAttrField():
|
||||
{'team': 'Engineering', 'organization': 'Ansible2'},
|
||||
{'team': 'Engineering2', 'organization': 'Ansible'},
|
||||
]},
|
||||
{'remove': True, 'saml_attr': 'foobar', 'team_org_map': [
|
||||
{
|
||||
'team': 'Engineering', 'team_alias': 'Engineering Team',
|
||||
'organization': 'Ansible', 'organization_alias': 'Awesome Org'
|
||||
},
|
||||
{'team': 'Engineering', 'organization': 'Ansible2'},
|
||||
{'team': 'Engineering2', 'organization': 'Ansible'},
|
||||
]},
|
||||
])
|
||||
def test_internal_value_valid(self, data):
|
||||
field = SAMLTeamAttrField()
|
||||
|
||||
5
awx/ui/build/zuul_download_chromium.sh
Executable file
5
awx/ui/build/zuul_download_chromium.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
REVISION=588429
|
||||
CHROMIUM_URL="https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/${REVISION}/chrome-linux.zip"
|
||||
|
||||
wget ${CHROMIUM_URL} -w 30 -t 6 -O /tmp/chrome-linux.zip
|
||||
unzip -o -d /tmp /tmp/chrome-linux.zip
|
||||
@@ -71,6 +71,9 @@ export default ['i18n', function(i18n) {
|
||||
AWX_COLLECTIONS_ENABLED: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
AWX_SHOW_PLAYBOOK_LINKS: {
|
||||
type: 'toggleSwitch',
|
||||
},
|
||||
PRIMARY_GALAXY_URL: {
|
||||
type: 'text',
|
||||
reset: 'PRIMARY_GALAXY_URL',
|
||||
|
||||
@@ -263,8 +263,8 @@ export default ['NotificationsList', 'i18n', function(NotificationsList, i18n){
|
||||
dataTitle: i18n._("Source Variables"),
|
||||
dataPlacement: 'right',
|
||||
awPopOver: i18n._(`Override variables found in openstack.yml and used by the inventory update script. For an example variable configuration
|
||||
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/openstack.yml\" target=\"_blank\">
|
||||
view openstack.yml in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
<a href=\"https://github.com/openstack/ansible-collections-openstack/blob/master/scripts/inventory/openstack.yml\" target=\"_blank\">
|
||||
view openstack.yml in the Openstack github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
dataContainer: 'body',
|
||||
subForm: 'sourceSubForm'
|
||||
},
|
||||
@@ -280,8 +280,8 @@ export default ['NotificationsList', 'i18n', function(NotificationsList, i18n){
|
||||
dataTitle: i18n._("Source Variables"),
|
||||
dataPlacement: 'right',
|
||||
awPopOver: i18n._(`Override variables found in cloudforms.ini and used by the inventory update script. For an example variable configuration
|
||||
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/cloudforms.ini\" target=\"_blank\">
|
||||
view cloudforms.ini in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
<a href=\"https://github.com/ansible-collections/community.general/blob/master/scripts/inventory/cloudforms.ini\" target=\"_blank\">
|
||||
view cloudforms.ini in the Ansible Collections github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
dataContainer: 'body',
|
||||
subForm: 'sourceSubForm'
|
||||
},
|
||||
@@ -297,8 +297,8 @@ export default ['NotificationsList', 'i18n', function(NotificationsList, i18n){
|
||||
dataTitle: i18n._("Source Variables"),
|
||||
dataPlacement: 'right',
|
||||
awPopOver: i18n._(`Override variables found in foreman.ini and used by the inventory update script. For an example variable configuration
|
||||
<a href=\"https://github.com/ansible/ansible/blob/devel/contrib/inventory/foreman.ini\" target=\"_blank\">
|
||||
view foreman.ini in the Ansible github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
<a href=\"https://github.com/ansible-collections/community.general/blob/master/scripts/inventory/foreman.ini\" target=\"_blank\">
|
||||
view foreman.ini in the Ansible Collections github repo.</a> Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Tower documentation for example syntax.`),
|
||||
dataContainer: 'body',
|
||||
subForm: 'sourceSubForm'
|
||||
},
|
||||
|
||||
@@ -123,9 +123,9 @@ export default
|
||||
if(!Empty(scope.selected_credentials.machine)) {
|
||||
job_launch_data.credential_id = scope.selected_credentials.machine.id;
|
||||
}
|
||||
job_launch_data.extra_credentials = [];
|
||||
job_launch_data.credentials = [];
|
||||
scope.selected_credentials.extra.forEach((extraCredential) => {
|
||||
job_launch_data.extra_credentials.push(extraCredential.id);
|
||||
job_launch_data.credentials.push(extraCredential.id);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -133,7 +133,7 @@ export default
|
||||
}
|
||||
else {
|
||||
var buttons = [{
|
||||
"label": "Continue",
|
||||
"label": i18n._("Continue"),
|
||||
"onClick": function() {
|
||||
// make a rest call here to force the API to
|
||||
// move the session time forward
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
|
||||
export default
|
||||
[ 'NotificationsList', 'GetBasePath', 'ToggleNotification', 'NotificationsListInit',
|
||||
'$stateParams', 'Dataset', '$scope',
|
||||
'$stateParams', 'Dataset', '$scope', 'isAdmin',
|
||||
function(
|
||||
NotificationsList, GetBasePath, ToggleNotification, NotificationsListInit,
|
||||
$stateParams, Dataset, $scope) {
|
||||
$stateParams, Dataset, $scope, isAdmin) {
|
||||
var defaultUrl = GetBasePath('system_job_templates'),
|
||||
list = NotificationsList,
|
||||
id = $stateParams.management_id;
|
||||
@@ -19,6 +19,8 @@ export default
|
||||
$scope[`${list.iterator}_dataset`] = Dataset.data;
|
||||
$scope[list.name] = $scope[`${list.iterator}_dataset`].results;
|
||||
|
||||
$scope.sufficientRoleForNotifToggle = isAdmin;
|
||||
|
||||
NotificationsListInit({
|
||||
scope: $scope,
|
||||
url: defaultUrl,
|
||||
|
||||
@@ -39,6 +39,19 @@ export default {
|
||||
let path = `${GetBasePath('system_job_templates')}${$stateParams.management_id}`;
|
||||
Rest.setUrl(path);
|
||||
return Rest.get(path).then((res) => res.data);
|
||||
}],
|
||||
isAdmin: ['Rest', 'GetBasePath', function(Rest, GetBasePath) {
|
||||
Rest.setUrl(GetBasePath('me'));
|
||||
return Rest.get()
|
||||
.then((res) => {
|
||||
if (res.data && res.data.results && res.data.count && res.data.results[0] && res.data.results[0].is_superuser) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
})
|
||||
.catch(() => {
|
||||
return false;
|
||||
});
|
||||
}]
|
||||
},
|
||||
ncyBreadcrumb: {
|
||||
|
||||
@@ -168,22 +168,6 @@ export default ['i18n', function(i18n) {
|
||||
subForm: 'typeSubForm',
|
||||
ngDisabled: '!(notification_template.summary_fields.user_capabilities.edit || canAdd)'
|
||||
},
|
||||
rooms: {
|
||||
label: i18n._('Destination Channels'),
|
||||
type: 'textarea',
|
||||
rows: 3,
|
||||
awPopOver: i18n._('Enter one HipChat channel per line. The pound symbol (#) is not required.'),
|
||||
dataTitle: i18n._('Destination Channels'),
|
||||
dataPlacement: 'right',
|
||||
dataContainer: "body",
|
||||
awRequiredWhen: {
|
||||
reqExpression: "room_required",
|
||||
init: "false"
|
||||
},
|
||||
ngShow: "notification_type.value == 'hipchat'",
|
||||
subForm: 'typeSubForm',
|
||||
ngDisabled: '!(notification_template.summary_fields.user_capabilities.edit || canAdd)'
|
||||
},
|
||||
token: {
|
||||
labelBind: 'tokenLabel',
|
||||
type: 'sensitive',
|
||||
@@ -344,18 +328,6 @@ export default ['i18n', function(i18n) {
|
||||
subForm: 'typeSubForm',
|
||||
ngDisabled: '!(notification_template.summary_fields.user_capabilities.edit || canAdd)'
|
||||
},
|
||||
api_url: {
|
||||
label: i18n._('API URL'),
|
||||
type: 'text',
|
||||
placeholder: 'https://mycompany.hipchat.com',
|
||||
awRequiredWhen: {
|
||||
reqExpression: "hipchat_required",
|
||||
init: "false"
|
||||
},
|
||||
ngShow: "notification_type.value == 'hipchat' ",
|
||||
subForm: 'typeSubForm',
|
||||
ngDisabled: '!(notification_template.summary_fields.user_capabilities.edit || canAdd)'
|
||||
},
|
||||
message_from: {
|
||||
label: i18n._('Notification Label'),
|
||||
type: 'text',
|
||||
|
||||
@@ -148,11 +148,14 @@ export default ['$scope', 'TemplatesService',
|
||||
|
||||
Object.keys(nodeRef).map((workflowMakerNodeId) => {
|
||||
const node = nodeRef[workflowMakerNodeId];
|
||||
const all_parents_must_converge = _.get(node, 'all_parents_must_converge', false);
|
||||
if (node.isNew) {
|
||||
if (node.unifiedJobTemplate && node.unifiedJobTemplate.unified_job_type === "workflow_approval") {
|
||||
addPromises.push(TemplatesService.addWorkflowNode({
|
||||
url: $scope.workflowJobTemplateObj.related.workflow_nodes,
|
||||
data: {}
|
||||
data: {
|
||||
all_parents_must_converge
|
||||
}
|
||||
}).then(({data: newNodeData}) => {
|
||||
Rest.setUrl(newNodeData.related.create_approval_template);
|
||||
approvalTemplatePromises.push(Rest.post({
|
||||
@@ -234,6 +237,14 @@ export default ['$scope', 'TemplatesService',
|
||||
});
|
||||
}));
|
||||
}
|
||||
if (node.originalNodeObject.all_parents_must_converge !== all_parents_must_converge) {
|
||||
editPromises.push(TemplatesService.editWorkflowNode({
|
||||
id: node.originalNodeObject.id,
|
||||
data: {
|
||||
all_parents_must_converge
|
||||
}
|
||||
}));
|
||||
}
|
||||
} else {
|
||||
editPromises.push(TemplatesService.editWorkflowNode({
|
||||
id: node.originalNodeObject.id,
|
||||
|
||||
6
awx/ui/package-lock.json
generated
6
awx/ui/package-lock.json
generated
@@ -14435,9 +14435,9 @@
|
||||
}
|
||||
},
|
||||
"websocket-extensions": {
|
||||
"version": "0.1.3",
|
||||
"resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.3.tgz",
|
||||
"integrity": "sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==",
|
||||
"version": "0.1.4",
|
||||
"resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
|
||||
"integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==",
|
||||
"dev": true
|
||||
},
|
||||
"whet.extend": {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user