mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
521 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ac304d289 | ||
|
|
3e5851f3af | ||
|
|
adb1b12074 | ||
|
|
8fae20c48a | ||
|
|
ec364cc60e | ||
|
|
1cfd51764e | ||
|
|
0b8fedfd04 | ||
|
|
72a8173462 | ||
|
|
873b1fbe07 | ||
|
|
1f36e84b45 | ||
|
|
8c4bff2b86 | ||
|
|
14f636af84 | ||
|
|
0057c8daf6 | ||
|
|
d8a28b3c06 | ||
|
|
40c2b700fe | ||
|
|
71d548f9e5 | ||
|
|
dd98963f86 | ||
|
|
4b467dfd8d | ||
|
|
456b56778e | ||
|
|
5b3cb20f92 | ||
|
|
d7086a3c88 | ||
|
|
21e7ab078c | ||
|
|
946ca0b3b8 | ||
|
|
b831dbd608 | ||
|
|
943e455f9d | ||
|
|
53bc88abe2 | ||
|
|
3b4d95633e | ||
|
|
93c329d9d5 | ||
|
|
f4c53aaf22 | ||
|
|
333ef76cbd | ||
|
|
fc0b58fd04 | ||
|
|
bef0a8b23a | ||
|
|
a5f33456b6 | ||
|
|
21fb395912 | ||
|
|
44255f378d | ||
|
|
71a6d48612 | ||
|
|
b7e5f5d1e1 | ||
|
|
b6b167627c | ||
|
|
20f5b255c9 | ||
|
|
3bcf46555d | ||
|
|
94703ccf84 | ||
|
|
6cdea1909d | ||
|
|
f133580172 | ||
|
|
4b90a7fcd1 | ||
|
|
95bfedad5b | ||
|
|
1081f2d8e9 | ||
|
|
c4ab54d7f3 | ||
|
|
bcefcd8cf8 | ||
|
|
0bd057529d | ||
|
|
a82c03e2e2 | ||
|
|
447ac77535 | ||
|
|
72d0928f1b | ||
|
|
6d727d4bc4 | ||
|
|
6040e44d9d | ||
|
|
b99ce5cd62 | ||
|
|
ba8a90c55f | ||
|
|
7ee2172517 | ||
|
|
07f49f5925 | ||
|
|
376993077a | ||
|
|
48f586bac4 | ||
|
|
16dab57c63 | ||
|
|
75a71492fd | ||
|
|
e9bd99c1ff | ||
|
|
56878b4910 | ||
|
|
19ca480078 | ||
|
|
64eb963025 | ||
|
|
dc34d0887a | ||
|
|
160634fb6f | ||
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa | ||
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 | ||
|
|
c9190ebd8f | ||
|
|
eb33973fa3 | ||
|
|
40be2e7b6e | ||
|
|
485813211a | ||
|
|
0a87bf1b5e | ||
|
|
fa0e0b2576 | ||
|
|
1d3b2f57ce | ||
|
|
0577e1ee79 | ||
|
|
470ecc4a4f | ||
|
|
965127637b | ||
|
|
eba130cf41 | ||
|
|
441336301e | ||
|
|
2a0be898e6 | ||
|
|
c47acc5988 | ||
|
|
70ba32b5b2 | ||
|
|
81e06dace2 | ||
|
|
3e8202590c | ||
|
|
ad96a72ebe | ||
|
|
eb0058268b | ||
|
|
2bf6512a8e | ||
|
|
855f61a04e | ||
|
|
532e71ff45 | ||
|
|
b9ea114cac | ||
|
|
e41ad82687 | ||
|
|
3bd25c682e | ||
|
|
7169c75b1a | ||
|
|
fdb359a67b | ||
|
|
ed2a59c1a3 | ||
|
|
906f8a1dce | ||
|
|
6833976c54 | ||
|
|
d15405eafe | ||
|
|
6c3bbfc3be | ||
|
|
2e3e6cbde5 | ||
|
|
54894c14dc | ||
|
|
2a51f23b7d | ||
|
|
80df31fc4e | ||
|
|
8f8462b38e | ||
|
|
0c41abea0e | ||
|
|
3eda1ede8d | ||
|
|
40fca6db57 | ||
|
|
148111a072 | ||
|
|
9cad45feac | ||
|
|
6834568c5d | ||
|
|
f7fdb7fe8d | ||
|
|
d8abd4912b | ||
|
|
4fbdc412ad | ||
|
|
db1af57daa | ||
|
|
ffa59864ee | ||
|
|
b209bc67b4 | ||
|
|
1faea020af | ||
|
|
b55a099620 | ||
|
|
f6dd3cb988 | ||
|
|
c448b87c85 | ||
|
|
4dd823121a | ||
|
|
ec4f10d868 | ||
|
|
2a1dffd363 | ||
|
|
8c7ab8fcf2 | ||
|
|
3de8455960 | ||
|
|
d832e75e99 | ||
|
|
a89e266feb | ||
|
|
8e1516eeb7 | ||
|
|
c7f2fdbe57 | ||
|
|
c75757bf22 | ||
|
|
b8ec7c4072 | ||
|
|
bb1c155bc9 | ||
|
|
4822dd79fc | ||
|
|
4cd90163fc | ||
|
|
8dc6ceffee | ||
|
|
2c7184f9d2 | ||
|
|
5cf93febaa | ||
|
|
284bd8377a | ||
|
|
14992cee17 | ||
|
|
6db663eacb | ||
|
|
87bb70bcc0 | ||
|
|
c2d02841e8 | ||
|
|
e5a6007bf1 | ||
|
|
6f9ea1892b | ||
|
|
abc56305cc | ||
|
|
9bb6786a58 | ||
|
|
aec9a9ca56 | ||
|
|
7e4cf859f5 | ||
|
|
90c3d8a275 | ||
|
|
6d1c8de4ed | ||
|
|
601b62deef | ||
|
|
131dd088cd | ||
|
|
445d892050 | ||
|
|
35a576f2dd | ||
|
|
7838641215 | ||
|
|
ab5cc2e69c | ||
|
|
5a63533967 | ||
|
|
b549ae1efa | ||
|
|
bd0089fd35 | ||
|
|
40d18e95c2 | ||
|
|
191a0f7f2a | ||
|
|
852bb0717c | ||
|
|
98bfe3f43f | ||
|
|
53a7b7818e | ||
|
|
e7c7454a3a | ||
|
|
63e82aa4a3 | ||
|
|
fc1b74aa68 | ||
|
|
ea455df9f4 | ||
|
|
8e2a5ed8ae | ||
|
|
1d7e54bd39 | ||
|
|
83df056f71 | ||
|
|
48edb15a03 | ||
|
|
8ddc19a927 | ||
|
|
b021ad7b28 | ||
|
|
b8ba2feecd | ||
|
|
8cfb704f86 | ||
|
|
efcac860de | ||
|
|
6c5590e0e6 | ||
|
|
0edcd688a2 | ||
|
|
b8c48f7d50 | ||
|
|
07e30a3d5f | ||
|
|
cb5a8aa194 | ||
|
|
8b49f910c7 | ||
|
|
a4f808df34 | ||
|
|
82abd18927 | ||
|
|
5e9d514e5e | ||
|
|
4a34ee1f1e | ||
|
|
3624fe2cac | ||
|
|
0f96d9aca2 | ||
|
|
989b80e771 | ||
|
|
cc64be937d | ||
|
|
94183d602c | ||
|
|
ac4ef141bf | ||
|
|
86f6b54eec | ||
|
|
bd8108b27c | ||
|
|
aed96fb365 | ||
|
|
fe2da52eec | ||
|
|
974465e46a | ||
|
|
c736986023 | ||
|
|
6b381aa79e | ||
|
|
755e55ec70 | ||
|
|
255c2e4172 | ||
|
|
aa8437fd77 | ||
|
|
66f14bfe8f | ||
|
|
721a2002dc | ||
|
|
af39b2cd3f | ||
|
|
cdd48dd7cd | ||
|
|
d3de884baf | ||
|
|
fa8968b95b | ||
|
|
897a19e127 | ||
|
|
4bae961b5f | ||
|
|
900c4fd8f1 | ||
|
|
4d5bbd7065 | ||
|
|
fb8fadc7f9 | ||
|
|
ba99ddfd82 | ||
|
|
9676a95e05 | ||
|
|
36d6ed9cac | ||
|
|
875f1a82e4 | ||
|
|
db71b63829 | ||
|
|
cd4d83acb7 | ||
|
|
7e25a694f3 | ||
|
|
baca43ee62 | ||
|
|
3b69552260 | ||
|
|
f9bd780d62 | ||
|
|
a665d96026 | ||
|
|
e47d30974c | ||
|
|
2b8ed66f3e | ||
|
|
dfe8b3b16b | ||
|
|
c738d0788e | ||
|
|
0c2d589109 | ||
|
|
a47bbb5479 | ||
|
|
4b4b73c02a | ||
|
|
d1d08fe499 | ||
|
|
7e7a9f541c | ||
|
|
98d67e2133 | ||
|
|
7a36041bf2 | ||
|
|
b96564da55 | ||
|
|
044d6bf97c | ||
|
|
d357c1162f | ||
|
|
3c22fc9242 | ||
|
|
8c86092bf5 | ||
|
|
081206965c | ||
|
|
036f85cd80 | ||
|
|
6976ac9273 | ||
|
|
9009a21a32 | ||
|
|
aafd4df288 | ||
|
|
844666df4c | ||
|
|
0ae720244c | ||
|
|
b70fa88b78 | ||
|
|
fbaeb90268 | ||
|
|
2a549c0b23 | ||
|
|
2c320cb16d | ||
|
|
434595481c | ||
|
|
444d05447e | ||
|
|
fbe202bdbf | ||
|
|
d89cad0d9e | ||
|
|
bdfd6f47ff | ||
|
|
ae7be2eea1 | ||
|
|
8957a84738 | ||
|
|
bac124004f | ||
|
|
f46c7452d1 | ||
|
|
098861d906 | ||
|
|
daf39dc77e | ||
|
|
00d8291d40 | ||
|
|
88d1a484fa | ||
|
|
5afdfb1135 | ||
|
|
2f15cc5170 | ||
|
|
f15d40286c | ||
|
|
f58c44590d | ||
|
|
ef99770383 | ||
|
|
84f67c7f82 | ||
|
|
433c28caa8 | ||
|
|
fa05f55512 | ||
|
|
0d5c0bcb91 | ||
|
|
f3fa75d832 | ||
|
|
285b7b0e5f | ||
|
|
08e8147374 | ||
|
|
09bd398a9e | ||
|
|
8d6f50fae8 | ||
|
|
ecfbcb641e | ||
|
|
e434b1e0f3 | ||
|
|
66c3acf777 | ||
|
|
ed1983bd8c | ||
|
|
5c4277958c | ||
|
|
7e4da7efa2 | ||
|
|
7b1cb281c2 | ||
|
|
dee39f3f1c | ||
|
|
ba7f97f84b | ||
|
|
85e7189ee3 | ||
|
|
06430741ab | ||
|
|
cf091d7836 | ||
|
|
a66acd87e6 | ||
|
|
595b4e3876 | ||
|
|
74c46568c1 | ||
|
|
05e9b29460 | ||
|
|
f1196fc019 | ||
|
|
7f020052db | ||
|
|
53260213ba | ||
|
|
7d1ee37689 | ||
|
|
45c13c25a4 | ||
|
|
ba0e9831d2 | ||
|
|
92dce85468 | ||
|
|
77139e4138 | ||
|
|
b28e14c630 | ||
|
|
bf5594e338 | ||
|
|
f012a69c93 | ||
|
|
0fb334e372 | ||
|
|
b7c5cbac3f | ||
|
|
eb7407593f | ||
|
|
287596234c | ||
|
|
ee7b3470da | ||
|
|
0faa1c8a24 | ||
|
|
77175d2862 | ||
|
|
22464a5838 | ||
|
|
3919ea6270 | ||
|
|
9d9f650051 | ||
|
|
66a3cb6b09 | ||
|
|
d282393035 | ||
|
|
6ea3b20912 | ||
|
|
3025ef0dfa | ||
|
|
397d58c459 | ||
|
|
d739a4a90a | ||
|
|
3fe64ad101 | ||
|
|
919d1e5d40 | ||
|
|
7fda4b0675 | ||
|
|
d8af19d169 | ||
|
|
1821e540f7 | ||
|
|
77be6c7495 | ||
|
|
baed869d93 | ||
|
|
b87ff45c07 | ||
|
|
7acc0067f5 | ||
|
|
0a13762f11 | ||
|
|
2c673c8f1f | ||
|
|
8c187c74fc | ||
|
|
2ce9440bab | ||
|
|
765487390f | ||
|
|
086722149c | ||
|
|
c10ada6f44 | ||
|
|
b350cd053d | ||
|
|
d0acb1c53f | ||
|
|
f61b73010a | ||
|
|
adb89cd48f | ||
|
|
3e509b3d55 | ||
|
|
f0badea9d3 | ||
|
|
6a1ec0dc89 | ||
|
|
329fb88bbb | ||
|
|
177f8cb7b2 | ||
|
|
b43107a5e9 | ||
|
|
4857685e1c | ||
|
|
8ba1a2bcf7 | ||
|
|
e7c80fe1e8 | ||
|
|
33f1c35292 | ||
|
|
ba899324f2 | ||
|
|
9c236eb8dd | ||
|
|
36559a4539 | ||
|
|
7a4b3ed139 | ||
|
|
cd5cc64d6a | ||
|
|
71a11ea3ad | ||
|
|
cfbbc4cb92 | ||
|
|
592920ee51 | ||
|
|
b75b84e282 | ||
|
|
f4b80c70e3 | ||
|
|
9870187af5 | ||
|
|
bbb436ddbb | ||
|
|
abf915fafe | ||
|
|
481814991e | ||
|
|
e94ee8f8d7 | ||
|
|
e660f62a59 | ||
|
|
a2a04002b6 | ||
|
|
93117c8264 | ||
|
|
b8118ac86a | ||
|
|
c08f1ddcaa | ||
|
|
d57f549a4c | ||
|
|
93e6f974f6 | ||
|
|
32f7dfece1 | ||
|
|
68b32b9b4f | ||
|
|
886ba1ea7f | ||
|
|
b128f05a37 | ||
|
|
36c9c9cdc4 | ||
|
|
342e9197b8 | ||
|
|
2205664fb4 | ||
|
|
7cdf471894 | ||
|
|
8719648ff5 | ||
|
|
c1455ee125 | ||
|
|
11d5e5c7d4 | ||
|
|
fba4e06c50 | ||
|
|
12a4c301b8 | ||
|
|
8a1cdf859e | ||
|
|
2f68317e5f | ||
|
|
0f4bac7aed | ||
|
|
e42461d96f | ||
|
|
9b716235a2 | ||
|
|
eb704dbaad | ||
|
|
105609ec20 | ||
|
|
9b390a624f | ||
|
|
0046ce5e69 | ||
|
|
b80d0ae85b | ||
|
|
1c0142f75c | ||
|
|
1ea6d15ee3 | ||
|
|
3cd5d59d87 | ||
|
|
d32a5905e8 | ||
|
|
e53a5da91e | ||
|
|
1a56272eaf | ||
|
|
3975028bd4 | ||
|
|
1c51ef8a69 | ||
|
|
6b0fe8d137 | ||
|
|
4a3d437b32 | ||
|
|
23f3ab6a66 | ||
|
|
ffa3cd1fff | ||
|
|
236de7e209 | ||
|
|
4e5cce8d15 | ||
|
|
184719e9f2 | ||
|
|
6c9e2502a5 | ||
|
|
0b1b866128 | ||
|
|
80ebe13841 | ||
|
|
328880609b | ||
|
|
71c307ab8a | ||
|
|
3ce68ced1e | ||
|
|
20817789bd | ||
|
|
2b63b55b34 | ||
|
|
64923e12fc | ||
|
|
6d4f92e1e8 | ||
|
|
fff6fa7d7a | ||
|
|
44db4587be | ||
|
|
dc0958150a | ||
|
|
9f27436c75 | ||
|
|
e60869e653 | ||
|
|
51e19d9d0b | ||
|
|
0fea29ad4d | ||
|
|
0a40b758c3 | ||
|
|
1191458d80 | ||
|
|
c0491a7b10 | ||
|
|
14e613bc92 | ||
|
|
98e37383c2 | ||
|
|
9e336d55e4 | ||
|
|
0e68caf0f7 | ||
|
|
c9c150b5a6 | ||
|
|
f97605430b | ||
|
|
454f31f6a4 | ||
|
|
f62bf6a4c3 | ||
|
|
a0dafbfd8c | ||
|
|
b5c052b2e6 | ||
|
|
1e690fcd7f | ||
|
|
479d0c2b12 | ||
|
|
ede185504c | ||
|
|
b0c416334f | ||
|
|
7c4aedf716 | ||
|
|
76f03b9adc | ||
|
|
b3bda415da | ||
|
|
21291b53fd | ||
|
|
3eb748ff1f | ||
|
|
6d2c10ad02 | ||
|
|
ede9d961da |
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -19,6 +19,8 @@ body:
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
@@ -42,6 +44,7 @@ body:
|
||||
label: Select the relevant components
|
||||
options:
|
||||
- label: UI
|
||||
- label: UI (tech preview)
|
||||
- label: API
|
||||
- label: Docs
|
||||
- label: Collection
|
||||
|
||||
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Setup images for AWX
|
||||
description: Builds new awx_devel image
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
73
.github/actions/run_awx_devel/action.yml
vendored
Normal file
73
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: Run AWX docker-compose
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token to pass to awx_devel_image
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
outputs:
|
||||
ip:
|
||||
description: The IP of the tools_awx_1 container
|
||||
value: ${{ steps.data.outputs.ip }}
|
||||
admin-token:
|
||||
description: OAuth token for admin user
|
||||
value: ${{ steps.data.outputs.admin_token }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Build UI
|
||||
# This must be a string comparison in composite actions:
|
||||
# https://github.com/actions/runner/issues/2238
|
||||
if: ${{ inputs.build-ui == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
make ui-devel
|
||||
EOSH
|
||||
|
||||
- name: Get instance data
|
||||
id: data
|
||||
shell: bash
|
||||
run: |
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Upload logs
|
||||
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||
inputs:
|
||||
log-filename:
|
||||
description: "*Unique* name of the log file"
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get AWX logs
|
||||
shell: bash
|
||||
run: |
|
||||
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||
|
||||
- name: Upload AWX logs as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker-compose-logs
|
||||
path: ${{ inputs.log-filename }}
|
||||
19
.github/dependabot.yml
vendored
19
.github/dependabot.yml
vendored
@@ -1,19 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/awx/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 5
|
||||
allow:
|
||||
- dependency-type: "production"
|
||||
reviewers:
|
||||
- "AlexSCorey"
|
||||
- "keithjgrant"
|
||||
- "kialam"
|
||||
- "mabashian"
|
||||
- "marshmalien"
|
||||
labels:
|
||||
- "component:ui"
|
||||
- "dependencies"
|
||||
target-branch: "devel"
|
||||
2
.github/issue_labeler.yml
vendored
2
.github/issue_labeler.yml
vendored
@@ -6,6 +6,8 @@ needs_triage:
|
||||
- "Feature Summary"
|
||||
"component:ui":
|
||||
- "\\[X\\] UI"
|
||||
"component:ui_next":
|
||||
- "\\[X\\] UI \\(tech preview\\)"
|
||||
"component:api":
|
||||
- "\\[X\\] API"
|
||||
"component:docs":
|
||||
|
||||
4
.github/pr_labeler.yml
vendored
4
.github/pr_labeler.yml
vendored
@@ -15,5 +15,5 @@
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["awx/requirements/*.txt"]
|
||||
- any: ["awx/requirements/requirements.in"]
|
||||
- any: ["requirements/*.txt"]
|
||||
- any: ["requirements/requirements.in"]
|
||||
|
||||
4
.github/triage_replies.md
vendored
4
.github/triage_replies.md
vendored
@@ -7,8 +7,8 @@
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
### Visit the Forum or Matrix
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
|
||||
|
||||
### Denied Submission
|
||||
|
||||
|
||||
165
.github/workflows/ci.yml
vendored
165
.github/workflows/ci.yml
vendored
@@ -3,7 +3,7 @@ name: CI
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
@@ -20,6 +20,8 @@ jobs:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
- name: api-migrations
|
||||
command: /start_tests.sh test_migrations
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
- name: api-swagger
|
||||
@@ -35,29 +37,40 @@ jobs:
|
||||
- name: ui-test-general
|
||||
command: make ui-test-general
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
@@ -67,7 +80,7 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -102,7 +115,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
@@ -114,3 +127,137 @@ jobs:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target-regex:
|
||||
- name: a-h
|
||||
regex: ^[a-h]
|
||||
- name: i-p
|
||||
regex: ^[i-p]
|
||||
- name: r-z0-9
|
||||
regex: ^[r-z0-9]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies for running tests
|
||||
run: |
|
||||
python3 -m pip install -e ./awxkit/
|
||||
python3 -m pip install -r awx_collection/requirements.txt
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||
echo '[general]' > ~/.tower_cli.cfg
|
||||
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||
env:
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||
cd coverage
|
||||
for i in coverage-*; do
|
||||
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
done
|
||||
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-test coverage combine --requirements
|
||||
ansible-test coverage html
|
||||
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-collection-integration-coverage-html
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||
|
||||
11
.github/workflows/devel_images.yml
vendored
11
.github/workflows/devel_images.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -48,8 +48,11 @@ jobs:
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
|
||||
- name: Push image
|
||||
- name: Push development images
|
||||
run: |
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
|
||||
- name: Push AWX k8s image, only for upstream and feature branches
|
||||
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
if: endsWith(github.repository, '/awx')
|
||||
|
||||
16
.github/workflows/docs.yml
vendored
Normal file
16
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Assure docs can be built
|
||||
run: tox -e docs
|
||||
54
.github/workflows/e2e_test.yml
vendored
54
.github/workflows/e2e_test.yml
vendored
@@ -19,41 +19,20 @@ jobs:
|
||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build UI
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
||||
|
||||
- name: Start AWX
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
||||
build-ui: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
docker pull quay.io/awx/awx_cypress_base:latest
|
||||
|
||||
- name: Checkout test project
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/tower-qa
|
||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||
@@ -65,18 +44,6 @@ jobs:
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
docker build -t awx-pf-tests .
|
||||
|
||||
- name: Update default AWX password
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5;
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
@@ -86,7 +53,7 @@ jobs:
|
||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
AWX_IP=${{ steps.awx.outputs.ip }}
|
||||
printenv > .env
|
||||
echo "Executing tests:"
|
||||
docker run \
|
||||
@@ -102,8 +69,7 @@ jobs:
|
||||
-w /e2e \
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- name: Save AWX logs
|
||||
uses: actions/upload-artifact@v2
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
8
.github/workflows/label_issue.yml
vendored
8
.github/workflows/label_issue.yml
vendored
@@ -6,6 +6,10 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: write # to fetch code
|
||||
issues: write # to label issues
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -13,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Label Issue
|
||||
uses: github/issue-labeler@v2.4.1
|
||||
uses: github/issue-labeler@v3.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
not-before: 2021-12-07T07:00:00Z
|
||||
@@ -24,7 +28,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
6
.github/workflows/label_pr.yml
vendored
6
.github/workflows/label_pr.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: write # to determine modified files (actions/labeler)
|
||||
pull-requests: write # to add labels to PRs (actions/labeler)
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -23,7 +27,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
1
.github/workflows/pr_body_check.yml
vendored
1
.github/workflows/pr_body_check.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
|
||||
13
.github/workflows/promote.yml
vendored
13
.github/workflows/promote.yml
vendored
@@ -8,19 +8,22 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -37,8 +40,12 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
env:
|
||||
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
|
||||
8
.github/workflows/stage.yml
vendored
8
.github/workflows/stage.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
@@ -52,18 +52,18 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
4
.github/workflows/upload_schema.yml
vendored
4
.github/workflows/upload_schema.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -157,10 +157,15 @@ use_dev_supervisor.txt
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/awx/ui/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile
|
||||
/Dockerfile.dev
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[allowlist]
|
||||
description = "Documentation contains example secrets and passwords"
|
||||
paths = [
|
||||
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||
]
|
||||
5
.pip-tools.toml
Normal file
5
.pip-tools.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[tool.pip-tools]
|
||||
resolver = "backtracking"
|
||||
allow-unsafe = true
|
||||
strip-extras = true
|
||||
quiet = true
|
||||
15
.readthedocs.yaml
Normal file
15
.readthedocs.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: >-
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
@@ -10,6 +10,7 @@ ignore: |
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
.readthedocs.yaml
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
|
||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||
|
||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
As of version 18.0, `awx-operator` is the preferred install/upgrade method. Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||
|
||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
||||
https://github.com/ansible/awx-operator/blob/devel/docs/upgrade/upgrading.md
|
||||
|
||||
@@ -31,7 +31,7 @@ If your issue isn't considered high priority, then please be patient as it may t
|
||||
|
||||
`state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state.
|
||||
|
||||
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familar with an area of the code base the issue is for.
|
||||
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familiar with an area of the code base the issue is for.
|
||||
|
||||
`state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on.
|
||||
|
||||
|
||||
129
Makefile
129
Makefile
@@ -1,14 +1,16 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON ?= python3.9
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
SHELL := bash
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
|
||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
@@ -27,6 +29,8 @@ COLLECTION_TEMPLATE_VERSION ?= false
|
||||
# NOTE: This defaults the container image version to the branch that's active
|
||||
COMPOSE_TAG ?= $(GIT_BRANCH)
|
||||
MAIN_NODE_TYPE ?= hybrid
|
||||
# If set to true docker-compose will also start a pgbouncer instance and use it
|
||||
PGBOUNCER ?= false
|
||||
# If set to true docker-compose will also start a keycloak instance
|
||||
KEYCLOAK ?= false
|
||||
# If set to true docker-compose will also start an ldap instance
|
||||
@@ -37,17 +41,24 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance
|
||||
VAULT ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
|
||||
DEV_DOCKER_OWNER ?= ansible
|
||||
# Docker will only accept lowercase, so github names like Paul need to be paul
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
|
||||
# Python packages to install only from source (not from binary wheels)
|
||||
# Comma separated list
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
@@ -68,7 +79,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -262,11 +273,11 @@ run-wsrelay:
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
run-ws-heartbeat:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
$(PYTHON) manage.py run_ws_heartbeat
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
@@ -294,13 +305,13 @@ swagger: reports
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
BLACK_ARGS="--check" $(MAKE) black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
## Run egg_info_dev to generate awx.egg-info for development.
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
@@ -313,21 +324,16 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
## Login to Github container image registry, pull image, then build image.
|
||||
github_ci_setup:
|
||||
# GITHUB_ACTOR is automatic github actions env var
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
make docker-compose-build
|
||||
test_migrations:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
|
||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
||||
github_ci_runner: github_ci_setup docker-runner
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -369,11 +375,11 @@ test_collection_sanity:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -515,13 +521,20 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
|
||||
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
|
||||
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
|
||||
-e enable_pgbouncer=$(PGBOUNCER) \
|
||||
-e enable_keycloak=$(KEYCLOAK) \
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT);
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
@@ -552,19 +565,28 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_devel image for docker compose development environment
|
||||
docker-compose-build: Dockerfile.dev
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -576,7 +598,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -593,12 +615,36 @@ VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
@echo "$(subst python,,$(PYTHON))"
|
||||
|
||||
.PHONY: version-for-buildyml
|
||||
version-for-buildyml:
|
||||
@echo $(firstword $(subst +, ,$(VERSION)))
|
||||
# version-for-buildyml prints a special version string for build.yml,
|
||||
# chopping off the sha after the '+' sign.
|
||||
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
|
||||
# This does the same thing in native make without
|
||||
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
|
||||
# Example:
|
||||
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
|
||||
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
@@ -613,13 +659,9 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -627,10 +669,12 @@ awx-kube-build: Dockerfile
|
||||
## generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||
|
||||
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@@ -639,6 +683,7 @@ messages:
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
.PHONY: print-%
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -650,12 +695,12 @@ HELP_FILTER=.PHONY
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
@$(MAKE) -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@@ -679,4 +724,4 @@ help/generate:
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@make -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://libera.chat)
|
||||
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||
|
||||
@@ -52,39 +52,14 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django # noqa: F401
|
||||
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
pass
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
def names_digest(*args, length):
|
||||
"""
|
||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
||||
identifying names. Support for use in FIPS environments.
|
||||
"""
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(arg.encode())
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
|
||||
@@ -347,7 +347,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
args.append(Q(**{k: v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model ' 'does not use roles for access control.'))
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.'))
|
||||
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
|
||||
if or_filters:
|
||||
q = Q()
|
||||
|
||||
@@ -5,13 +5,11 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -35,7 +33,7 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
@@ -171,7 +169,7 @@ class APIView(views.APIView):
|
||||
self.__init_request_error__ = exc
|
||||
except UnsupportedMediaType as exc:
|
||||
exc.detail = _(
|
||||
'You did not use correct Content-Type in your HTTP request. ' 'If you are using our REST API, the Content-Type must be application/json'
|
||||
'You did not use correct Content-Type in your HTTP request. If you are using our REST API, the Content-Type must be application/json'
|
||||
)
|
||||
self.__init_request_error__ = exc
|
||||
return drf_request
|
||||
@@ -234,7 +232,8 @@ class APIView(views.APIView):
|
||||
|
||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||
time_started = getattr(self, 'time_started', None)
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
if request.user.is_authenticated:
|
||||
response['X-API-Product-Version'] = get_awx_version()
|
||||
response['X-API-Product-Name'] = server_product_name()
|
||||
|
||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||
@@ -364,12 +363,7 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
qs = self.model._default_manager
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
qs = optimize_queryset(qs)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
@@ -512,6 +506,9 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# And optionally (user must have given access permission on parent object
|
||||
# to view sublist):
|
||||
# parent_access = 'read'
|
||||
# filter_read_permission sets whether or not to override the default intersection behavior
|
||||
# implemented here
|
||||
filter_read_permission = True
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListAPIView, self).get_description_context()
|
||||
@@ -526,12 +523,16 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model).distinct()
|
||||
sublist_qs = self.get_sublist_queryset(parent)
|
||||
return qs & sublist_qs
|
||||
if not self.filter_read_permission:
|
||||
return optimize_queryset(self.get_sublist_queryset(parent))
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
if hasattr(self, 'parent_key'):
|
||||
# This is vastly preferable for ReverseForeignKey relationships
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
|
||||
|
||||
def get_sublist_queryset(self, parent):
|
||||
return getattrd(parent, self.relationship).distinct()
|
||||
return getattrd(parent, self.relationship)
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
@@ -580,15 +581,6 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
d.update({'parent_key': getattr(self, 'parent_key', None)})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -967,16 +959,11 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
|
||||
@@ -71,7 +71,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _(
|
||||
'Data structure with name/description for related resources. ' 'The output for some objects may be limited for performance reasons.'
|
||||
'Data structure with name/description for related resources. The output for some objects may be limited for performance reasons.'
|
||||
),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
|
||||
@@ -25,6 +25,7 @@ __all__ = [
|
||||
'UserPermission',
|
||||
'IsSystemAdminOrAuditor',
|
||||
'WorkflowApprovalPermission',
|
||||
'AnalyticsPermission',
|
||||
]
|
||||
|
||||
|
||||
@@ -250,3 +251,16 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||
class WebhookKeyPermission(permissions.BasePermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
return request.user.can_access(view.model, 'admin', obj, request.data)
|
||||
|
||||
|
||||
class AnalyticsPermission(permissions.BasePermission):
|
||||
"""
|
||||
Allows GET/POST/OPTIONS to system admins and system auditors.
|
||||
"""
|
||||
|
||||
def has_permission(self, request, view):
|
||||
if not (request.user and request.user.is_authenticated):
|
||||
return False
|
||||
if request.method in ["GET", "POST", "OPTIONS"]:
|
||||
return request.user.is_superuser or request.user.is_system_auditor
|
||||
return request.user.is_superuser
|
||||
|
||||
@@ -220,7 +220,7 @@ class CopySerializer(serializers.Serializer):
|
||||
view = self.context.get('view', None)
|
||||
obj = view.get_object()
|
||||
if name == obj.name:
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from' ' it cannot have the same name.'.format(name)))
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from it cannot have the same name.'.format(name)))
|
||||
return attrs
|
||||
|
||||
|
||||
@@ -760,7 +760,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
event_processing_finished = serializers.BooleanField(
|
||||
help_text=_('Indicates whether all of the events generated by this ' 'unified job have been saved to the database.'), read_only=True
|
||||
help_text=_('Indicates whether all of the events generated by this unified job have been saved to the database.'), read_only=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -954,7 +954,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializer):
|
||||
password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
|
||||
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.'))
|
||||
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
|
||||
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
|
||||
is_system_auditor = serializers.BooleanField(default=False)
|
||||
@@ -981,7 +981,12 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
ret.pop('password', None)
|
||||
if self.get_external_account(obj):
|
||||
# If this is an external account it shouldn't have a password field
|
||||
ret.pop('password', None)
|
||||
else:
|
||||
# If its an internal account lets assume there is a password and return $encrypted$ to the user
|
||||
ret['password'] = '$encrypted$'
|
||||
if obj and type(self) is UserSerializer:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
@@ -995,13 +1000,31 @@ class UserSerializer(BaseSerializer):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
# Check if a password is too long
|
||||
password_max_length = User._meta.get_field('password').max_length
|
||||
if len(value) > password_max_length:
|
||||
raise serializers.ValidationError(_('Password max length is {}'.format(password_max_length)))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH', 0) and len(value) < getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'):
|
||||
raise serializers.ValidationError(_('Password must be at least {} characters long.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS', 0) and sum(c.isdigit() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'):
|
||||
raise serializers.ValidationError(_('Password must contain at least {} digits.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER', 0) and sum(c.isupper() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} uppercase characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER')))
|
||||
)
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL', 0) and sum(not c.isalnum() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} special characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL')))
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
# For now we're not raising an error, just not saving password for
|
||||
# users managed by LDAP who already have an unusable password set.
|
||||
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
|
||||
if new_password and not self.get_external_account(obj):
|
||||
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj):
|
||||
obj.set_password(new_password)
|
||||
obj.save(update_fields=['password'])
|
||||
|
||||
@@ -1556,7 +1579,7 @@ class ProjectPlaybooksSerializer(ProjectSerializer):
|
||||
|
||||
|
||||
class ProjectInventoriesSerializer(ProjectSerializer):
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, ' 'not comprehensive.'))
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, not comprehensive.'))
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
@@ -1606,8 +1629,8 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
|
||||
fields = ('*', 'host_status_counts', 'playbook_counts')
|
||||
|
||||
def get_playbook_counts(self, obj):
|
||||
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
|
||||
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
|
||||
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
|
||||
|
||||
data = {'play_count': play_count, 'task_count': task_count}
|
||||
|
||||
@@ -2167,7 +2190,7 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if not settings.IS_TESTING_MODE:
|
||||
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')):
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
@@ -2882,7 +2905,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
):
|
||||
if getattr(self.instance, related_objects).count() > 0:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality' ' of the resources using it.')
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality of the resources using it.')
|
||||
)
|
||||
|
||||
return credential_type
|
||||
@@ -2902,7 +2925,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, ' 'do not give either team or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.'),
|
||||
)
|
||||
team = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Team.objects.all(),
|
||||
@@ -2910,14 +2933,14 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, ' 'do not give either user or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.'),
|
||||
)
|
||||
organization = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Organization.objects.all(),
|
||||
required=False,
|
||||
default=None,
|
||||
allow_null=True,
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, ' 'do not give either user or team.'),
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, do not give either user or team.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -2939,7 +2962,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
if len(owner_fields) > 1:
|
||||
received = ", ".join(sorted(owner_fields))
|
||||
raise serializers.ValidationError(
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, " "received {} fields.".format(received))}
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, received {} fields.".format(received))}
|
||||
)
|
||||
|
||||
if attrs.get('team'):
|
||||
@@ -3210,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
@@ -3599,7 +3622,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
|
||||
try:
|
||||
return obj.result_stdout
|
||||
except StdoutMaxBytesExceeded as e:
|
||||
return _("Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes.").format(
|
||||
return _("Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes.").format(
|
||||
text_size=e.total, supported_size=e.supported
|
||||
)
|
||||
|
||||
@@ -4513,7 +4536,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
if cred.unique_hash() in provided_mapping.keys():
|
||||
continue # User replaced credential with new of same type
|
||||
errors.setdefault('credentials', []).append(
|
||||
_('Removing {} credential at launch time without replacement is not supported. ' 'Provided list lacked credential(s): {}.').format(
|
||||
_('Removing {} credential at launch time without replacement is not supported. Provided list lacked credential(s): {}.').format(
|
||||
cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])
|
||||
)
|
||||
)
|
||||
@@ -4663,12 +4686,11 @@ class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
|
||||
# many-to-many fields
|
||||
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
|
||||
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
|
||||
fields = ('*', 'credentials', 'labels', 'instance_groups') # m2m fields are not canonical for WJ nodes
|
||||
|
||||
def validate(self, attrs):
|
||||
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
|
||||
@@ -4728,21 +4750,21 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
|
||||
requested_use_credentials = set()
|
||||
requested_use_labels = set()
|
||||
# requested_use_instance_groups = set()
|
||||
requested_use_instance_groups = set()
|
||||
for job in attrs['jobs']:
|
||||
for cred in job.get('credentials', []):
|
||||
requested_use_credentials.add(cred)
|
||||
for label in job.get('labels', []):
|
||||
requested_use_labels.add(label)
|
||||
# for instance_group in job.get('instance_groups', []):
|
||||
# requested_use_instance_groups.add(instance_group)
|
||||
for instance_group in job.get('instance_groups', []):
|
||||
requested_use_instance_groups.add(instance_group)
|
||||
|
||||
key_to_obj_map = {
|
||||
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
|
||||
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
|
||||
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
|
||||
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
|
||||
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
|
||||
}
|
||||
|
||||
@@ -4769,7 +4791,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
|
||||
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
|
||||
self.check_list_permission(Label, requested_use_labels)
|
||||
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
|
||||
|
||||
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
|
||||
@@ -4816,7 +4838,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
node_m2m_object_types_to_through_model = {
|
||||
'credentials': WorkflowJobNode.credentials.through,
|
||||
'labels': WorkflowJobNode.labels.through,
|
||||
# 'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
}
|
||||
node_deferred_attr_names = (
|
||||
'limit',
|
||||
@@ -4869,9 +4891,9 @@ class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
|
||||
for label in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
|
||||
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
|
||||
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'instance_groups':
|
||||
for instance_group in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if through_model_objects:
|
||||
through_model.objects.bulk_create(through_model_objects)
|
||||
|
||||
@@ -4996,7 +5018,7 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
for subevent in event_messages:
|
||||
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
|
||||
error_list.append(
|
||||
_("Workflow Approval event '{}' invalid, must be one of " "'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
_("Workflow Approval event '{}' invalid, must be one of 'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
)
|
||||
continue
|
||||
subevent_messages = event_messages[subevent]
|
||||
@@ -5334,10 +5356,16 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target', 'link_state')
|
||||
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
||||
return res
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
@@ -5354,6 +5382,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
@@ -5390,6 +5419,8 @@ class InstanceSerializer(BaseSerializer):
|
||||
'node_state',
|
||||
'ip_address',
|
||||
'listener_port',
|
||||
'peers',
|
||||
'peers_from_control_nodes',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
@@ -5413,7 +5444,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
res = super(InstanceSerializer, self).get_related(obj)
|
||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
|
||||
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
@@ -5442,22 +5473,57 @@ class InstanceSerializer(BaseSerializer):
|
||||
def get_health_check_pending(self, obj):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, data):
|
||||
if self.instance:
|
||||
if self.instance.node_type == Instance.Types.HOP:
|
||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||
else:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||
return data
|
||||
def validate(self, attrs):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
def check_peers_changed():
|
||||
'''
|
||||
return True if
|
||||
- 'peers' in attrs
|
||||
- instance peers matches peers in attrs
|
||||
'''
|
||||
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
||||
|
||||
if not self.instance and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||
|
||||
node_type = get_field_from_model_or_attrs("node_type")
|
||||
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
||||
listener_port = get_field_from_model_or_attrs("listener_port")
|
||||
peers = attrs.get('peers', [])
|
||||
|
||||
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
||||
|
||||
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(
|
||||
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
||||
)
|
||||
|
||||
if not listener_port and peers_from_control_nodes:
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
||||
|
||||
if not listener_port and self.instance and self.instance.peers_from.exists():
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
||||
|
||||
for peer in peers:
|
||||
if peer.listener_port is None:
|
||||
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
||||
|
||||
if not settings.IS_K8S:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
def validate_node_type(self, value):
|
||||
if not self.instance:
|
||||
if value not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only create execution nodes.")
|
||||
else:
|
||||
if self.instance.node_type != value:
|
||||
raise serializers.ValidationError("Cannot change node type.")
|
||||
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
|
||||
|
||||
if self.instance and self.instance.node_type != value:
|
||||
raise serializers.ValidationError(_("Cannot change node type."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5465,30 +5531,41 @@ class InstanceSerializer(BaseSerializer):
|
||||
if self.instance:
|
||||
if value != self.instance.node_state:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_hostname(self, value):
|
||||
"""
|
||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
||||
Cannot change the hostname
|
||||
"""
|
||||
if self.instance and self.instance.hostname != value:
|
||||
raise serializers.ValidationError("Cannot change hostname.")
|
||||
raise serializers.ValidationError(_("Cannot change hostname."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_listener_port(self, value):
|
||||
if self.instance and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError("Cannot change listener port.")
|
||||
"""
|
||||
Cannot change listener port, unless going from none to integer, and vice versa
|
||||
"""
|
||||
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_peers_from_control_nodes(self, value):
|
||||
"""
|
||||
Can only enable for K8S based deployments
|
||||
"""
|
||||
if value and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5496,7 +5573,19 @@ class InstanceSerializer(BaseSerializer):
|
||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
|
||||
read_only_fields = (
|
||||
'uuid',
|
||||
'hostname',
|
||||
'ip_address',
|
||||
'version',
|
||||
'last_health_check',
|
||||
'errors',
|
||||
'cpu',
|
||||
'memory',
|
||||
'cpu_capacity',
|
||||
'mem_capacity',
|
||||
'capacity',
|
||||
)
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
@@ -5536,7 +5625,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_container_group = serializers.BooleanField(
|
||||
required=False,
|
||||
help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
help_text=_('Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
)
|
||||
# NOTE: help_text is duplicated from field definitions, no obvious way of
|
||||
# both defining field details here and also getting the field's help_text
|
||||
@@ -5547,7 +5636,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Percentage'),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to " "this group when new instances come online."),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to this group when new instances come online."),
|
||||
)
|
||||
policy_instance_minimum = serializers.IntegerField(
|
||||
default=0,
|
||||
@@ -5555,7 +5644,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Minimum'),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to this group when new instances come online."),
|
||||
)
|
||||
max_concurrent_jobs = serializers.IntegerField(
|
||||
default=0,
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
import json
|
||||
import warnings
|
||||
|
||||
from coreapi.document import Object, Link
|
||||
|
||||
from rest_framework import exceptions
|
||||
from rest_framework.permissions import AllowAny
|
||||
from rest_framework.renderers import CoreJSONRenderer
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
|
||||
from rest_framework.views import APIView
|
||||
|
||||
from rest_framework_swagger import renderers
|
||||
from drf_yasg.views import get_schema_view
|
||||
from drf_yasg import openapi
|
||||
|
||||
|
||||
class SuperUserSchemaGenerator(SchemaGenerator):
|
||||
@@ -55,43 +49,15 @@ class AutoSchema(DRFAuthSchema):
|
||||
return description
|
||||
|
||||
|
||||
class SwaggerSchemaView(APIView):
|
||||
_ignore_model_permissions = True
|
||||
exclude_from_schema = True
|
||||
permission_classes = [AllowAny]
|
||||
renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
|
||||
|
||||
def get(self, request):
|
||||
generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
|
||||
schema = generator.get_schema(request=request)
|
||||
# python core-api doesn't support the deprecation yet, so track it
|
||||
# ourselves and return it in a response header
|
||||
_deprecated = []
|
||||
|
||||
# By default, DRF OpenAPI serialization places all endpoints in
|
||||
# a single node based on their root path (/api). Instead, we want to
|
||||
# group them by topic/tag so that they're categorized in the rendered
|
||||
# output
|
||||
document = schema._data.pop('api')
|
||||
for path, node in document.items():
|
||||
if isinstance(node, Object):
|
||||
for action in node.values():
|
||||
topic = getattr(action, 'topic', None)
|
||||
if topic:
|
||||
schema._data.setdefault(topic, Object())
|
||||
schema._data[topic]._data[path] = node
|
||||
|
||||
if isinstance(action, Object):
|
||||
for link in action.links.values():
|
||||
if link.deprecated:
|
||||
_deprecated.append(link.url)
|
||||
elif isinstance(node, Link):
|
||||
topic = getattr(node, 'topic', None)
|
||||
if topic:
|
||||
schema._data.setdefault(topic, Object())
|
||||
schema._data[topic]._data[path] = node
|
||||
|
||||
if not schema:
|
||||
raise exceptions.ValidationError('The schema generator did not return a schema Document')
|
||||
|
||||
return Response(schema, headers={'X-Deprecated-Paths': json.dumps(_deprecated)})
|
||||
schema_view = get_schema_view(
|
||||
openapi.Info(
|
||||
title="Snippets API",
|
||||
default_version='v1',
|
||||
description="Test description",
|
||||
terms_of_service="https://www.google.com/policies/terms/",
|
||||
contact=openapi.Contact(email="contact@snippets.local"),
|
||||
license=openapi.License(name="BSD License"),
|
||||
),
|
||||
public=True,
|
||||
permission_classes=[AllowAny],
|
||||
)
|
||||
|
||||
@@ -3,21 +3,35 @@ receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
{% if instance.node_type == "execution" %}
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
params: worker
|
||||
allowruntimeparams: true
|
||||
verifysignature: true
|
||||
custom_worksign_public_keyfile: receptor/work-public-key.pem
|
||||
additional_python_packages:
|
||||
- ansible-runner
|
||||
{% endif %}
|
||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
receptor_protocol: 'tcp'
|
||||
{% if instance.listener_port %}
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_dependencies:
|
||||
- python39-pip
|
||||
{% else %}
|
||||
receptor_listener: false
|
||||
{% endif %}
|
||||
{% if peers %}
|
||||
receptor_peers:
|
||||
{% for peer in peers %}
|
||||
- host: {{ peer.host }}
|
||||
port: {{ peer.port }}
|
||||
protocol: tcp
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% verbatim %}
|
||||
podman_user: "{{ receptor_user }}"
|
||||
podman_group: "{{ receptor_group }}"
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
{% verbatim %}
|
||||
---
|
||||
- hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create the receptor user
|
||||
user:
|
||||
{% verbatim %}
|
||||
name: "{{ receptor_user }}"
|
||||
{% endverbatim %}
|
||||
shell: /bin/bash
|
||||
- name: Enable Copr repo for Receptor
|
||||
command: dnf copr enable ansible-awx/receptor -y
|
||||
{% if instance.node_type == "execution" %}
|
||||
- import_role:
|
||||
name: ansible.receptor.podman
|
||||
{% endif %}
|
||||
- import_role:
|
||||
name: ansible.receptor.setup
|
||||
- name: Install ansible-runner
|
||||
pip:
|
||||
name: ansible-runner
|
||||
executable: pip3.9
|
||||
{% endverbatim %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 1.1.0
|
||||
version: 2.0.2
|
||||
|
||||
31
awx/api/urls/analytics.py
Normal file
31
awx/api/urls/analytics.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
import awx.api.views.analytics as analytics
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
|
||||
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
|
||||
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
|
||||
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
|
||||
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
|
||||
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
|
||||
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
|
||||
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
|
||||
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
|
||||
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
|
||||
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
|
||||
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
|
||||
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
|
||||
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
|
||||
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
|
||||
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
|
||||
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
|
||||
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
|
||||
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -30,7 +30,7 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
HostMetricSummaryMonthlyList,
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
@@ -42,6 +42,7 @@ from awx.api.views.bulk import (
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
@@ -82,7 +83,7 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
|
||||
from .analytics import urls as analytics_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -122,8 +123,7 @@ v2_urls = [
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@@ -147,6 +147,7 @@ v2_urls = [
|
||||
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
|
||||
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
@@ -165,10 +166,13 @@ urlpatterns = [
|
||||
]
|
||||
if MODE == 'development':
|
||||
# Only include these if we are in the development environment
|
||||
from awx.api.swagger import SwaggerSchemaView
|
||||
|
||||
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
|
||||
from awx.api.swagger import schema_view
|
||||
|
||||
from awx.api.urls.debug import urls as debug_urls
|
||||
|
||||
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
|
||||
urlpatterns += [
|
||||
re_path(r'^swagger(?P<format>\.json|\.yaml)/$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
|
||||
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
|
||||
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
|
||||
]
|
||||
|
||||
@@ -62,7 +62,7 @@ from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
@@ -128,6 +128,10 @@ logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
|
||||
if not cursor.fetchone():
|
||||
return 0
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
@@ -341,17 +345,18 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
data.pop('ip_address', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
obj = self.get_object()
|
||||
obj.set_capacity_value()
|
||||
obj.save(update_fields=['capacity'])
|
||||
capacity_changed = obj.set_capacity_value()
|
||||
if capacity_changed:
|
||||
obj.save(update_fields=['capacity'])
|
||||
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||
return r
|
||||
|
||||
@@ -565,7 +570,7 @@ class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
if self.relationship not in ask_mapping:
|
||||
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
|
||||
elif sub.passwords_needed:
|
||||
return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
|
||||
return {"msg": _("Credential that requires user input on launch cannot be used in saved launch configuration.")}
|
||||
|
||||
ask_field_name = ask_mapping[self.relationship]
|
||||
|
||||
@@ -794,13 +799,7 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
@@ -1570,16 +1569,15 @@ class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
name = _("Host Metrics Summary Monthly")
|
||||
model = models.HostMetricSummaryMonthly
|
||||
serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('date',)
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
@@ -1634,13 +1632,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
parent_model = models.Inventory
|
||||
relationship = 'hosts'
|
||||
parent_key = 'inventory'
|
||||
|
||||
def get_queryset(self):
|
||||
inventory = self.get_parent_object()
|
||||
qs = getattrd(inventory, self.relationship).all()
|
||||
# Apply queryset optimizations
|
||||
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
|
||||
return qs
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -2513,7 +2505,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
return Response(
|
||||
dict(
|
||||
error=_(
|
||||
"$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
|
||||
"$encrypted$ is a reserved keyword for password question defaults, survey question {idx} is type {survey_item[type]}."
|
||||
).format(**context)
|
||||
),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
@@ -2581,16 +2573,7 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'credentials'
|
||||
|
||||
def get_queryset(self):
|
||||
# Return the full list of credentials
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
sublist_qs = getattrd(parent, self.relationship)
|
||||
sublist_qs = sublist_qs.prefetch_related(
|
||||
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
|
||||
)
|
||||
return sublist_qs
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
@@ -2692,7 +2675,10 @@ class JobTemplateCallback(GenericAPIView):
|
||||
# Permission class should have already validated host_config_key.
|
||||
job_template = self.get_object()
|
||||
# Attempt to find matching hosts based on remote address.
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
if job_template.inventory:
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
else:
|
||||
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
# If the host is not found, update the inventory before trying to
|
||||
# match again.
|
||||
inventory_sources_already_updated = []
|
||||
@@ -2777,6 +2763,7 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class JobTemplateAccessList(ResourceAccessList):
|
||||
@@ -2867,16 +2854,7 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
|
||||
relationship = ''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
'''
|
||||
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
|
||||
'relationship'
|
||||
'''
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if created:
|
||||
@@ -2951,14 +2929,7 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = ''
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
@@ -3137,11 +3108,8 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
relationship = 'workflow_job_template_nodes'
|
||||
parent_key = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3233,11 +3201,8 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
relationship = 'workflow_job_nodes'
|
||||
parent_key = 'workflow_job'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
@@ -3372,7 +3337,6 @@ class JobLabelList(SubListAPIView):
|
||||
serializer_class = serializers.LabelSerializer
|
||||
parent_model = models.Job
|
||||
relationship = 'labels'
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
@@ -3551,11 +3515,7 @@ class BaseJobHostSummariesList(SubListAPIView):
|
||||
relationship = 'job_host_summaries'
|
||||
name = _('Job Host Summaries List')
|
||||
search_fields = ('host_name',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostJobHostSummariesList(BaseJobHostSummariesList):
|
||||
@@ -4099,7 +4059,7 @@ class UnifiedJobStdout(RetrieveAPIView):
|
||||
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
|
||||
except models.StdoutMaxBytesExceeded as e:
|
||||
response_message = _(
|
||||
"Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
|
||||
"Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes."
|
||||
).format(text_size=e.total, supported_size=e.supported)
|
||||
if request.accepted_renderer.format == 'json':
|
||||
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
|
||||
|
||||
296
awx/api/views/analytics.py
Normal file
296
awx/api/views/analytics.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import requests
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils import translation
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
|
||||
AWX_ANALYTICS_API_PREFIX = 'analytics'
|
||||
|
||||
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
|
||||
ERROR_MISSING_URL = "missing-url"
|
||||
ERROR_MISSING_USER = "missing-user"
|
||||
ERROR_MISSING_PASSWORD = "missing-password"
|
||||
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
|
||||
ERROR_NOT_FOUND = "not-found"
|
||||
ERROR_UNAUTHORIZED = "unauthorized"
|
||||
ERROR_UNKNOWN = "unknown"
|
||||
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
|
||||
|
||||
logger = logging.getLogger('awx.api.views.analytics')
|
||||
|
||||
|
||||
class MissingSettings(Exception):
|
||||
"""Settings are not correct Exception"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GetNotAllowedMixin(object):
|
||||
def get(self, request, format=None):
|
||||
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
|
||||
|
||||
class AnalyticsRootView(APIView):
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
name = _('Automation Analytics')
|
||||
swagger_topic = 'Automation Analytics'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class AnalyticsGenericView(APIView):
|
||||
"""
|
||||
Example:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
params = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_by': 'name:asc',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_options': 'name',
|
||||
'sort_order': 'asc',
|
||||
'tags': [],
|
||||
'slug': [],
|
||||
'name': [],
|
||||
'description': '',
|
||||
}
|
||||
|
||||
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
|
||||
headers=headers, json=json_data)
|
||||
|
||||
return Response(response.json(), status=response.status_code)
|
||||
"""
|
||||
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
|
||||
@staticmethod
|
||||
def _request_headers(request):
|
||||
headers = {}
|
||||
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
|
||||
if request.headers.get(header, None):
|
||||
headers[header] = request.headers.get(header)
|
||||
headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _get_analytics_path(request_path):
|
||||
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
|
||||
path_specific = parts[-1]
|
||||
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
|
||||
|
||||
def _get_analytics_url(self, request_path):
|
||||
analytics_path = self._get_analytics_path(request_path)
|
||||
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
|
||||
if not url:
|
||||
raise MissingSettings(ERROR_MISSING_URL)
|
||||
url_parts = urlparse.urlsplit(url)
|
||||
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
|
||||
return analytics_url
|
||||
|
||||
@staticmethod
|
||||
def _get_setting(setting_name, default, error_message):
|
||||
setting = getattr(settings, setting_name, default)
|
||||
if not setting:
|
||||
raise MissingSettings(error_message)
|
||||
return setting
|
||||
|
||||
@staticmethod
|
||||
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
|
||||
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
|
||||
if message:
|
||||
text["error"]["message"] = message
|
||||
return Response(text, status=status_code)
|
||||
|
||||
def _error_response_404(self, response):
|
||||
try:
|
||||
json_response = response.json()
|
||||
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
|
||||
message = json_response.get('error', None)
|
||||
if message:
|
||||
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
|
||||
|
||||
# Standard 404 problem => HTTP 404
|
||||
message = json_response.get('detail', None) or response.text
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
# Unexpected text => still HTTP 404
|
||||
message = response.text
|
||||
|
||||
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@staticmethod
|
||||
def _update_response_links(json_response):
|
||||
if not json_response.get('links', None):
|
||||
return
|
||||
|
||||
for key, value in json_response['links'].items():
|
||||
if value:
|
||||
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
|
||||
|
||||
def _forward_response(self, response):
|
||||
try:
|
||||
content_type = response.headers.get('content-type', '')
|
||||
if content_type.find('application/json') != -1:
|
||||
json_response = response.json()
|
||||
self._update_response_links(json_response)
|
||||
|
||||
return Response(json_response, status=response.status_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Response error: {e}")
|
||||
|
||||
return Response(response.content, status=response.status_code)
|
||||
|
||||
def _send_to_analytics(self, request, method):
|
||||
try:
|
||||
headers = self._request_headers(request)
|
||||
|
||||
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
|
||||
url = self._get_analytics_url(request.path)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
else:
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
auth=(rh_user, rh_password),
|
||||
verify=settings.INSIGHTS_CERT_PATH,
|
||||
params=request.query_params,
|
||||
headers=headers,
|
||||
json=request.data,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
#
|
||||
elif response.status_code == status.HTTP_404_NOT_FOUND:
|
||||
return self._error_response_404(response)
|
||||
#
|
||||
# Success or not a 401/404 errors are just forwarded
|
||||
#
|
||||
else:
|
||||
return self._forward_response(response)
|
||||
|
||||
except MissingSettings as e:
|
||||
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
|
||||
return self._error_response(e.args[0], remote=False)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Analytics API: Request error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class AnalyticsGenericListView(AnalyticsGenericView):
|
||||
def get(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsGenericDetailView(AnalyticsGenericView):
|
||||
def get(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsAuthorizedView(AnalyticsGenericListView):
|
||||
name = _("Authorized")
|
||||
|
||||
|
||||
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Reports")
|
||||
swagger_topic = "Automation Analytics"
|
||||
|
||||
|
||||
class AnalyticsReportDetail(AnalyticsGenericDetailView):
|
||||
name = _("Report")
|
||||
|
||||
|
||||
class AnalyticsReportOptionsList(AnalyticsGenericListView):
|
||||
name = _("Report Options")
|
||||
|
||||
|
||||
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Adoption Rate")
|
||||
|
||||
|
||||
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Event Explorer")
|
||||
|
||||
|
||||
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Host Explorer")
|
||||
|
||||
|
||||
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Job Explorer")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Templates")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Template For Hosts")
|
||||
|
||||
|
||||
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("ROI Templates")
|
||||
@@ -1,5 +1,7 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.reverse import reverse
|
||||
@@ -18,6 +20,9 @@ from awx.api import (
|
||||
|
||||
|
||||
class BulkView(APIView):
|
||||
name = _('Bulk')
|
||||
swagger_topic = 'Bulk'
|
||||
|
||||
permission_classes = [IsAuthenticated]
|
||||
renderer_classes = [
|
||||
renderers.BrowsableAPIRenderer,
|
||||
|
||||
@@ -6,6 +6,8 @@ import io
|
||||
import ipaddress
|
||||
import os
|
||||
import tarfile
|
||||
import time
|
||||
import re
|
||||
|
||||
import asn1
|
||||
from awx.api import serializers
|
||||
@@ -40,6 +42,8 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||
# │ │ └── receptor.key
|
||||
# │ └── work-public-key.pem
|
||||
# └── requirements.yml
|
||||
|
||||
|
||||
class InstanceInstallBundle(GenericAPIView):
|
||||
name = _('Install Bundle')
|
||||
model = models.Instance
|
||||
@@ -49,56 +53,54 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
def get(self, request, *args, **kwargs):
|
||||
instance_obj = self.get_object()
|
||||
|
||||
if instance_obj.node_type not in ('execution',):
|
||||
if instance_obj.node_type not in ('execution', 'hop'):
|
||||
return Response(
|
||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
with io.BytesIO() as f:
|
||||
with tarfile.open(fileobj=f, mode='w:gz') as tar:
|
||||
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
|
||||
tar.add(
|
||||
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
|
||||
)
|
||||
# copy /etc/receptor/tls/ca/mesh-CA.crt to receptor/tls/ca in the tar file
|
||||
tar.add(os.path.realpath('/etc/receptor/tls/ca/mesh-CA.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/mesh-CA.crt")
|
||||
|
||||
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
|
||||
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
|
||||
# copy /etc/receptor/work_public_key.pem to receptor/work_public_key.pem
|
||||
tar.add('/etc/receptor/work_public_key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work_public_key.pem")
|
||||
|
||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||
key, cert = generate_receptor_tls(instance_obj)
|
||||
|
||||
def tar_addfile(tarinfo, filecontent):
|
||||
tarinfo.mtime = time.time()
|
||||
tarinfo.size = len(filecontent)
|
||||
tar.addfile(tarinfo, io.BytesIO(filecontent))
|
||||
|
||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||
key_tarinfo.size = len(key)
|
||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||
tar_addfile(key_tarinfo, key)
|
||||
|
||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||
cert_tarinfo.size = len(cert)
|
||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||
tar_addfile(cert_tarinfo, cert)
|
||||
|
||||
# generate and write install_receptor.yml to the tar file
|
||||
playbook = generate_playbook().encode('utf-8')
|
||||
playbook = generate_playbook(instance_obj).encode('utf-8')
|
||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||
playbook_tarinfo.size = len(playbook)
|
||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||
tar_addfile(playbook_tarinfo, playbook)
|
||||
|
||||
# generate and write inventory.yml to the tar file
|
||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||
tar_addfile(inventory_yml_tarinfo, inventory_yml)
|
||||
|
||||
# generate and write group_vars/all.yml to the tar file
|
||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||
group_vars_tarinfo.size = len(group_vars)
|
||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||
tar_addfile(group_vars_tarinfo, group_vars)
|
||||
|
||||
# generate and write requirements.yml to the tar file
|
||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||
tar_addfile(requirements_yml_tarinfo, requirements_yml)
|
||||
|
||||
# respond with the tarfile
|
||||
f.seek(0)
|
||||
@@ -107,8 +109,10 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
return response
|
||||
|
||||
|
||||
def generate_playbook():
|
||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||
def generate_playbook(instance_obj):
|
||||
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', playbook_yaml)
|
||||
|
||||
|
||||
def generate_requirements_yml():
|
||||
@@ -120,7 +124,12 @@ def generate_inventory_yml(instance_obj):
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||
peers = []
|
||||
for instance in instance_obj.peers.all():
|
||||
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', all_yaml)
|
||||
|
||||
|
||||
def generate_receptor_tls(instance_obj):
|
||||
@@ -161,14 +170,14 @@ def generate_receptor_tls(instance_obj):
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
|
||||
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
|
||||
# sign csr with the receptor ca key from /etc/receptor/ca/mesh-CA.key
|
||||
with open('/etc/receptor/tls/ca/mesh-CA.key', 'rb') as f:
|
||||
ca_key = serialization.load_pem_private_key(
|
||||
f.read(),
|
||||
password=None,
|
||||
)
|
||||
|
||||
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
|
||||
with open('/etc/receptor/tls/ca/mesh-CA.crt', 'rb') as f:
|
||||
ca_cert = x509.load_pem_x509_certificate(f.read())
|
||||
|
||||
cert = (
|
||||
|
||||
@@ -50,7 +50,7 @@ class UnifiedJobDeletionMixin(object):
|
||||
return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||
logger.warning('Allowing deletion of {} through the API without all events processed.'.format(obj.log_format))
|
||||
|
||||
# Manually cascade delete events if unpartitioned job
|
||||
if obj.has_unpartitioned_events:
|
||||
|
||||
@@ -61,12 +61,6 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
@@ -207,6 +201,7 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
@@ -214,6 +209,7 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
|
||||
@@ -20,6 +20,7 @@ from rest_framework import status
|
||||
|
||||
import requests
|
||||
|
||||
from awx import MODE
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.analytics import all_collectors
|
||||
@@ -54,6 +55,8 @@ class ApiRootView(APIView):
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
|
||||
if MODE == 'development':
|
||||
data['swagger'] = drf_reverse('api:schema-swagger-ui')
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -104,8 +107,7 @@ class ApiVersionRootView(APIView):
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@@ -126,6 +128,7 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ class WebhookReceiverBase(APIView):
|
||||
# Ensure that the full contents of the request are captured for multiple uses.
|
||||
request.body
|
||||
|
||||
logger.debug("headers: {}\n" "data: {}\n".format(request.headers, request.data))
|
||||
logger.debug("headers: {}\ndata: {}\n".format(request.headers, request.data))
|
||||
obj = self.get_object()
|
||||
self.check_signature(obj)
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ class ConfConfig(AppConfig):
|
||||
def ready(self):
|
||||
self.module.autodiscover()
|
||||
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations'}:
|
||||
if not set(sys.argv) & {'migrate', 'check_migrations', 'showmigrations'}:
|
||||
from .settings import SettingsWrapper
|
||||
|
||||
SettingsWrapper.initialize()
|
||||
|
||||
17
awx/conf/migrations/0010_change_to_JSONField.py
Normal file
17
awx/conf/migrations/0010_change_to_JSONField.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 4.2 on 2023-06-09 19:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('conf', '0009_rename_proot_settings'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='setting',
|
||||
name='value',
|
||||
field=models.JSONField(null=True),
|
||||
),
|
||||
]
|
||||
@@ -8,7 +8,6 @@ import json
|
||||
from django.db import models
|
||||
|
||||
# AWX
|
||||
from awx.main.fields import JSONBlob
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.conf import settings_registry
|
||||
@@ -18,7 +17,7 @@ __all__ = ['Setting']
|
||||
|
||||
class Setting(CreatedModifiedModel):
|
||||
key = models.CharField(max_length=255)
|
||||
value = JSONBlob(null=True)
|
||||
value = models.JSONField(null=True)
|
||||
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))
|
||||
|
||||
def __str__(self):
|
||||
|
||||
@@ -418,6 +418,10 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
# If the last line did not return, that means we hit a database error
|
||||
# in that case, we should not have a local cache value
|
||||
# thus, return empty as a signal to use the default
|
||||
return empty
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
|
||||
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
|
||||
def test_to_representation_valid(self, value_in, value_known):
|
||||
@@ -48,7 +48,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_representation(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
|
||||
|
||||
class TestListTuplesField:
|
||||
@@ -67,7 +67,7 @@ class TestListTuplesField:
|
||||
field = ListTuplesField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " "but got {} instead.".format(t)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 but got {} instead.".format(t)
|
||||
|
||||
|
||||
class TestStringListPathField:
|
||||
|
||||
@@ -13,6 +13,7 @@ from unittest import mock
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.utils import Error as DBError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import pytest
|
||||
|
||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||
def test_getattr_with_database_error(settings):
|
||||
"""
|
||||
If a setting is defined via the registry and has a null-ish default which is not None
|
||||
then referencing that setting during a database outage should give that default
|
||||
this is regression testing for a bug where it would return None
|
||||
"""
|
||||
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||
mock_ensure.side_effect = DBError('for test')
|
||||
assert settings.AWX_VAR == []
|
||||
|
||||
@@ -79,7 +79,6 @@ __all__ = [
|
||||
'get_user_queryset',
|
||||
'check_user_access',
|
||||
'check_user_access_with_errors',
|
||||
'user_accessible_objects',
|
||||
'consumer_access',
|
||||
]
|
||||
|
||||
@@ -136,10 +135,6 @@ def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
|
||||
def user_accessible_objects(user, role_name):
|
||||
return ResourceMixin._accessible_objects(User, user, role_name)
|
||||
|
||||
|
||||
def get_user_queryset(user, model_class):
|
||||
"""
|
||||
Return a queryset for the given model_class containing only the instances
|
||||
@@ -366,9 +361,9 @@ class BaseAccess(object):
|
||||
report_violation = lambda message: None
|
||||
else:
|
||||
report_violation = lambda message: logger.warning(message)
|
||||
if validation_info.get('trial', False) is True or validation_info['instance_count'] == 10: # basic 10 license
|
||||
if validation_info.get('trial', False) is True:
|
||||
|
||||
def report_violation(message):
|
||||
def report_violation(message): # noqa
|
||||
raise PermissionDenied(message)
|
||||
|
||||
if check_expiration and validation_info.get('time_remaining', None) is None:
|
||||
@@ -2234,7 +2229,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if not node_access.can_add({'reference_obj': node}):
|
||||
wj_add_perm = False
|
||||
if not wj_add_perm and self.save_messages:
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' 'resources required for relaunch.')
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job resources required for relaunch.')
|
||||
return wj_add_perm
|
||||
|
||||
def can_cancel(self, obj):
|
||||
@@ -2952,3 +2947,19 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
|
||||
def optimize_queryset(queryset):
|
||||
"""
|
||||
A utility method in case you already have a queryset and just want to
|
||||
apply the standard optimizations for that model.
|
||||
In other words, use if you do not want to start from filtered_queryset for some reason.
|
||||
"""
|
||||
if not queryset.model or queryset.model not in access_registry:
|
||||
return queryset
|
||||
access_class = access_registry[queryset.model]
|
||||
if access_class.select_related:
|
||||
queryset = queryset.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
queryset = queryset.prefetch_related(*access_class.prefetch_related)
|
||||
return queryset
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, Min
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather):
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@@ -69,6 +69,54 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def host_metric_slicing(key, since, until, last_gather, **kwargs):
|
||||
"""
|
||||
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
|
||||
"""
|
||||
from awx.main.models.inventory import HostMetric
|
||||
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# Check if full sync should be done
|
||||
full_sync_enabled = kwargs.get('full_sync_enabled', False)
|
||||
last_entry = None
|
||||
if not full_sync_enabled:
|
||||
#
|
||||
# If not, try incremental sync first
|
||||
#
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = last_entries.get(key)
|
||||
if not last_entry:
|
||||
#
|
||||
# If not done before, switch to full sync
|
||||
#
|
||||
full_sync_enabled = True
|
||||
|
||||
if full_sync_enabled:
|
||||
#
|
||||
# Find the lowest date for full sync
|
||||
#
|
||||
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
|
||||
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
|
||||
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
|
||||
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
|
||||
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
|
||||
|
||||
if not last_entry:
|
||||
# empty table
|
||||
return []
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
end = min(start + timedelta(days=30), until)
|
||||
yield (start, end)
|
||||
start = end
|
||||
|
||||
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
@@ -83,7 +131,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.5', description=_('General platform configuration.'))
|
||||
@register('config', '1.6', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -107,10 +155,13 @@ def config(since, **kwargs):
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'usage': license_info.get('usage'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'subscription_id': license_info.get('subscription_id'),
|
||||
'account_number': license_info.get('account_number'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
@@ -348,7 +399,10 @@ def _copy_table(table, query, path):
|
||||
file_path = os.path.join(path, table + '_table.csv')
|
||||
file = FileSplitter(filespec=file_path)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.copy_expert(query, file)
|
||||
with cursor.copy(query) as copy:
|
||||
while data := copy.read():
|
||||
byte_data = bytes(data)
|
||||
file.write(byte_data.decode())
|
||||
return file.file_list()
|
||||
|
||||
|
||||
@@ -537,3 +591,42 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
|
||||
@register(
|
||||
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
|
||||
)
|
||||
def host_metric_table(since, full_path, until, **kwargs):
|
||||
host_metric_query = '''COPY (SELECT main_hostmetric.id,
|
||||
main_hostmetric.hostname,
|
||||
main_hostmetric.first_automation,
|
||||
main_hostmetric.last_automation,
|
||||
main_hostmetric.last_deleted,
|
||||
main_hostmetric.deleted,
|
||||
main_hostmetric.automated_counter,
|
||||
main_hostmetric.deleted_counter,
|
||||
main_hostmetric.used_in_inventories
|
||||
FROM main_hostmetric
|
||||
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
|
||||
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
|
||||
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
|
||||
@register('host_metric_summary_monthly_table', '1.0', format='csv', description=_('HostMetricSummaryMonthly export, full sync'), expensive=trivial_slicing)
|
||||
def host_metric_summary_monthly_table(since, full_path, **kwargs):
|
||||
query = '''
|
||||
COPY (SELECT main_hostmetricsummarymonthly.id,
|
||||
main_hostmetricsummarymonthly.date,
|
||||
main_hostmetricsummarymonthly.license_capacity,
|
||||
main_hostmetricsummarymonthly.license_consumed,
|
||||
main_hostmetricsummarymonthly.hosts_added,
|
||||
main_hostmetricsummarymonthly.hosts_deleted,
|
||||
main_hostmetricsummarymonthly.indirectly_managed_hosts
|
||||
FROM main_hostmetricsummarymonthly
|
||||
ORDER BY main_hostmetricsummarymonthly.id ASC) TO STDOUT WITH CSV HEADER
|
||||
'''
|
||||
|
||||
return _copy_table(table='host_metric_summary_monthly', query=query, path=full_path)
|
||||
|
||||
@@ -52,7 +52,7 @@ def all_collectors():
|
||||
}
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=None):
|
||||
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
@@ -71,6 +71,7 @@ def register(key, version, description=None, format='json', expensive=None):
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
f.__awx_full_sync_interval__ = full_sync_interval
|
||||
return f
|
||||
|
||||
return decorate
|
||||
@@ -259,10 +260,19 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# These slicer functions may return a generator. The `since` parameter is
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
#
|
||||
# Or it can force full table sync if interval is given
|
||||
kwargs = dict()
|
||||
full_sync_enabled = False
|
||||
if func.__awx_full_sync_interval__:
|
||||
last_full_sync = last_entries.get(f"{key}_full")
|
||||
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
|
||||
|
||||
kwargs['full_sync_enabled'] = full_sync_enabled
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@@ -301,6 +311,12 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
succeeded = False
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
# update full sync timestamp if successfully shipped
|
||||
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
|
||||
with disable_activity_stream():
|
||||
last_entries[f"{key}_full"] = now()
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
@@ -359,9 +375,7 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(
|
||||
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
|
||||
)
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -209,6 +209,11 @@ class Metrics:
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
# dispatcher subsystem metrics
|
||||
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
||||
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
||||
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
||||
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
@@ -298,11 +303,13 @@ class Metrics:
|
||||
try:
|
||||
current_time = time.time()
|
||||
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
|
||||
serialized_metrics = self.serialize_local_metrics()
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
'metrics': serialized_metrics,
|
||||
}
|
||||
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
|
||||
87
awx/main/cache.py
Normal file
87
awx/main/cache.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import functools
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT
|
||||
from django.core.cache.backends.redis import RedisCache
|
||||
|
||||
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
|
||||
import socket
|
||||
|
||||
# This list comes from what django-redis ignores and the behavior we are trying
|
||||
# to retain while dropping the dependency on django-redis.
|
||||
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
|
||||
|
||||
CONNECTION_INTERRUPTED_SENTINEL = object()
|
||||
|
||||
|
||||
def optionally_ignore_exceptions(func=None, return_value=None):
|
||||
if func is None:
|
||||
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except IGNORED_EXCEPTIONS as e:
|
||||
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
|
||||
return return_value
|
||||
raise e.__cause__ or e
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AWXRedisCache(RedisCache):
|
||||
"""
|
||||
We just want to wrap the upstream RedisCache class so that we can ignore
|
||||
the exceptions that it raises when the cache is unavailable.
|
||||
"""
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().add(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
|
||||
def _get(self, key, default=None, version=None):
|
||||
return super().get(key, default, version)
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
value = self._get(key, default, version)
|
||||
if value is CONNECTION_INTERRUPTED_SENTINEL:
|
||||
return default
|
||||
return value
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set(key, value, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().touch(key, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete(self, key, version=None):
|
||||
return super().delete(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def get_many(self, keys, version=None):
|
||||
return super().get_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def has_key(self, key, version=None):
|
||||
return super().has_key(key, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def incr(self, key, delta=1, version=None):
|
||||
return super().incr(key, delta, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return super().set_many(data, timeout, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def delete_many(self, keys, version=None):
|
||||
return super().delete_many(keys, version)
|
||||
|
||||
@optionally_ignore_exceptions
|
||||
def clear(self):
|
||||
return super().clear()
|
||||
100
awx/main/conf.py
100
awx/main/conf.py
@@ -94,6 +94,20 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CSRF_TRUSTED_ORIGINS',
|
||||
default=[],
|
||||
field_class=fields.StringListField,
|
||||
label=_('CSRF Trusted Origins List'),
|
||||
help_text=_(
|
||||
"If the service is behind a reverse proxy/load balancer, use this setting "
|
||||
"to configure the schema://addresses from which the service should trust "
|
||||
"Origin header values. "
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'LICENSE',
|
||||
field_class=fields.DictField,
|
||||
@@ -680,15 +694,33 @@ register(
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
|
||||
field_class=fields.IntegerField,
|
||||
default=131072,
|
||||
min_value=1,
|
||||
label=_('Maximum number of messages that can be stored in the log action queue'),
|
||||
help_text=_(
|
||||
'Defines how large the rsyslog action queue can grow in number of messages '
|
||||
'stored. This can have an impact on memory utilization. When the queue '
|
||||
'reaches 75% of this number, the queue will start writing to disk '
|
||||
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
|
||||
'DEBUG messages will start to be discarded (queue.discardMark with '
|
||||
'queue.discardSeverity=5).'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistance for external log aggregation (in GB)'),
|
||||
label=_('Maximum disk persistence for rsyslogd action queuing (in GB)'),
|
||||
help_text=_(
|
||||
'Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting.'
|
||||
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
|
||||
'to process an incoming message (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
|
||||
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -822,6 +854,64 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CLEANUP_HOST_METRICS_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last cleanup date for HostMetrics'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last computing date of HostMetricSummaryMonthly'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_CLEANUP_PATHS',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Enable or Disable tmp dir cleanup'),
|
||||
default=True,
|
||||
help_text=_('Enable or Disable TMP Dir cleanup'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_REQUEST_PROFILE',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Debug Web Requests'),
|
||||
default=False,
|
||||
help_text=_('Debug web request python timing'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
register(
|
||||
'DEFAULT_CONTAINER_RUN_OPTIONS',
|
||||
field_class=fields.StringListField,
|
||||
label=_('Container Run Options'),
|
||||
default=['--network', 'slirp4netns:enable_ipv6=true'],
|
||||
help_text=_("List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']"),
|
||||
category=('Jobs'),
|
||||
category_slug='jobs',
|
||||
)
|
||||
|
||||
register(
|
||||
'RECEPTOR_RELEASE_WORK',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Release Receptor Work'),
|
||||
default=True,
|
||||
help_text=_('Release receptor work'),
|
||||
category=('Debug'),
|
||||
category_slug='debug',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -54,6 +54,12 @@ aim_inputs = {
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
},
|
||||
{'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
|
||||
{
|
||||
'id': 'object_property',
|
||||
'label': _('Object Property'),
|
||||
'type': 'string',
|
||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
||||
},
|
||||
{
|
||||
'id': 'reason',
|
||||
'label': _('Reason'),
|
||||
@@ -74,6 +80,7 @@ def aim_backend(**kwargs):
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
object_property = kwargs.get('object_property', '')
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
@@ -98,7 +105,18 @@ def aim_backend(**kwargs):
|
||||
allow_redirects=False,
|
||||
)
|
||||
raise_for_status(res)
|
||||
return res.json()['Content']
|
||||
# CCP returns the property name capitalized, username is camel case
|
||||
# so we need to handle that case
|
||||
if object_property == '':
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'username':
|
||||
object_property = 'UserName'
|
||||
elif object_property not in res:
|
||||
raise KeyError('Property {} not found in object'.format(object_property))
|
||||
else:
|
||||
object_property = object_property.capitalize()
|
||||
|
||||
return res.json()[object_property]
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
65
awx/main/credential_plugins/aws_secretsmanager.py
Normal file
65
awx/main/credential_plugins/aws_secretsmanager.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
secrets_manager_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'aws_access_key',
|
||||
'label': _('AWS Access Key'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'aws_secret_key',
|
||||
'label': _('AWS Secret Key'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
},
|
||||
],
|
||||
'metadata': [
|
||||
{
|
||||
'id': 'region_name',
|
||||
'label': _('AWS Secrets Manager Region'),
|
||||
'type': 'string',
|
||||
'help_text': _('Region which the secrets manager is located'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_name',
|
||||
'label': _('AWS Secret Name'),
|
||||
'type': 'string',
|
||||
},
|
||||
],
|
||||
'required': ['aws_access_key', 'aws_secret_key', 'region_name', 'secret_name'],
|
||||
}
|
||||
|
||||
|
||||
def aws_secretsmanager_backend(**kwargs):
|
||||
secret_name = kwargs['secret_name']
|
||||
region_name = kwargs['region_name']
|
||||
aws_secret_access_key = kwargs['aws_secret_key']
|
||||
aws_access_key_id = kwargs['aws_access_key']
|
||||
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name='secretsmanager', region_name=region_name, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id
|
||||
)
|
||||
|
||||
try:
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
except ClientError as e:
|
||||
raise e
|
||||
# Secrets Manager decrypts the secret value using the associated KMS CMK
|
||||
# Depending on whether the secret was a string or binary, only one of these fields will be populated
|
||||
if 'SecretString' in get_secret_value_response:
|
||||
secret = get_secret_value_response['SecretString']
|
||||
|
||||
else:
|
||||
secret = get_secret_value_response['SecretBinary']
|
||||
|
||||
return secret
|
||||
|
||||
|
||||
aws_secretmanager_plugin = CredentialPlugin('AWS Secrets Manager lookup', inputs=secrets_manager_inputs, backend=aws_secretsmanager_backend)
|
||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import requests
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
||||
}
|
||||
|
||||
|
||||
def _is_base64(s: str) -> bool:
|
||||
try:
|
||||
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||
'allow_redirects': False,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,25 +2,29 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
|
||||
from base64 import b64decode
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_id',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
@@ -35,8 +39,22 @@ dsv_inputs = {
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_field',
|
||||
'label': _('Secret Field'),
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'secret_decoding',
|
||||
'label': _('Should the secret be base64 decoded?'),
|
||||
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
|
||||
'choices': ['No Decoding', 'Decode Base64'],
|
||||
'type': 'string',
|
||||
'default': 'No Decoding',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -45,12 +63,32 @@ if settings.DEBUG:
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
'default': 'https://{}.secretsvaultcloud.{}',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
)
|
||||
|
||||
def dsv_backend(**kwargs):
|
||||
tenant_name = kwargs['tenant']
|
||||
tenant_tld = kwargs.get('tld', 'com')
|
||||
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
|
||||
client_id = kwargs['client_id']
|
||||
client_secret = kwargs['client_secret']
|
||||
secret_path = kwargs['path']
|
||||
secret_field = kwargs['secret_field']
|
||||
# providing a default value to remain backward compatible for secrets that have not specified this option
|
||||
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
|
||||
|
||||
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
|
||||
|
||||
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
|
||||
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
|
||||
|
||||
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
|
||||
if secret_decoding == 'Decode Base64':
|
||||
return b64decode(dsv_secret['data'][secret_field]).decode()
|
||||
|
||||
return dsv_secret['data'][secret_field]
|
||||
|
||||
|
||||
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
|
||||
|
||||
@@ -265,6 +265,8 @@ def kv_backend(**kwargs):
|
||||
|
||||
if secret_key:
|
||||
try:
|
||||
if (secret_key != 'data') and (secret_key not in json['data']) and ('data' in json['data']):
|
||||
return json['data']['data'][secret_key]
|
||||
return json['data'][secret_key]
|
||||
except KeyError:
|
||||
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
try:
|
||||
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
except ImportError:
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -17,6 +20,12 @@ tss_inputs = {
|
||||
'help_text': _('The (Application) user username'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'domain',
|
||||
'label': _('Domain'),
|
||||
'help_text': _('The (Application) user domain'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': _('Password'),
|
||||
@@ -44,7 +53,12 @@ tss_inputs = {
|
||||
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
if kwargs.get("domain"):
|
||||
authorizer = DomainPasswordGrantAuthorizer(
|
||||
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||
)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
secret_dict = secret_server.get_secret(kwargs['secret_id'])
|
||||
secret = ServerSecret(**secret_dict)
|
||||
|
||||
@@ -87,7 +87,7 @@ class RecordedQueryLog(object):
|
||||
)
|
||||
log.commit()
|
||||
log.execute(
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) ' 'VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
(os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt),
|
||||
)
|
||||
log.commit()
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import psycopg
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.settings.application_name import get_application_name
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
@@ -38,8 +40,12 @@ def get_task_queuename():
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
def __init__(self, conn, select_timeout=None):
|
||||
self.conn = conn
|
||||
if select_timeout is None:
|
||||
self.select_timeout = 5
|
||||
else:
|
||||
self.select_timeout = select_timeout
|
||||
|
||||
def listen(self, channel):
|
||||
with self.conn.cursor() as cur:
|
||||
@@ -53,25 +59,42 @@ class PubSub(object):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||
|
||||
def events(self, select_timeout=5, yield_timeouts=False):
|
||||
@staticmethod
|
||||
def current_notifies(conn):
|
||||
"""
|
||||
Altered version of .notifies method from psycopg library
|
||||
This removes the outer while True loop so that we only process
|
||||
queued notifications
|
||||
"""
|
||||
with conn.lock:
|
||||
try:
|
||||
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
|
||||
except psycopg.errors._NO_TRACEBACK as ex:
|
||||
raise ex.with_traceback(None)
|
||||
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
|
||||
for pgn in ns:
|
||||
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
|
||||
yield n
|
||||
|
||||
def events(self, yield_timeouts=False):
|
||||
if not self.conn.autocommit:
|
||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||
|
||||
while True:
|
||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
||||
if select.select([self.conn], [], [], self.select_timeout) == NOT_READY:
|
||||
if yield_timeouts:
|
||||
yield None
|
||||
else:
|
||||
self.conn.poll()
|
||||
while self.conn.notifies:
|
||||
yield self.conn.notifies.pop(0)
|
||||
notification_generator = self.current_notifies(self.conn)
|
||||
for notification in notification_generator:
|
||||
yield notification
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pg_bus_conn(new_connection=False):
|
||||
def pg_bus_conn(new_connection=False, select_timeout=None):
|
||||
'''
|
||||
Any listeners probably want to establish a new database connection,
|
||||
separate from the Django connection used for queries, because that will prevent
|
||||
@@ -83,12 +106,12 @@ def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
conf = settings.DATABASES['default'].copy()
|
||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
|
||||
conn = psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
|
||||
else:
|
||||
if pg_connection.connection is None:
|
||||
pg_connection.connect()
|
||||
@@ -96,7 +119,7 @@ def pg_bus_conn(new_connection=False):
|
||||
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||
conn = pg_connection.connection
|
||||
|
||||
pubsub = PubSub(conn)
|
||||
pubsub = PubSub(conn, select_timeout=select_timeout)
|
||||
yield pubsub
|
||||
if new_connection:
|
||||
conn.close()
|
||||
|
||||
@@ -37,8 +37,14 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
def cancel(self, task_ids, with_reply=True):
|
||||
if with_reply:
|
||||
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
|
||||
else:
|
||||
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
return self.control_with_reply('schedule', *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def generate_reply_queue_name(cls):
|
||||
@@ -52,14 +58,14 @@ class Control(object):
|
||||
if not connection.get_autocommit():
|
||||
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
with pg_bus_conn(select_timeout=timeout) as conn:
|
||||
conn.listen(reply_queue)
|
||||
send_data = {'control': command, 'reply_to': reply_queue}
|
||||
if extra_data:
|
||||
send_data.update(extra_data)
|
||||
conn.notify(self.queuename, json.dumps(send_data))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
for reply in conn.events(yield_timeouts=True):
|
||||
if reply is None:
|
||||
logger.error(f'{self.service} did not reply within {timeout}s')
|
||||
raise RuntimeError(f"{self.service} did not reply within {timeout}s")
|
||||
|
||||
@@ -1,53 +1,142 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process
|
||||
import yaml
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connections
|
||||
from schedule import Scheduler
|
||||
from django_guid import set_guid
|
||||
from django_guid.utils import generate_guid
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
def run_continuously(self):
|
||||
idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
|
||||
class ScheduledTask:
|
||||
"""
|
||||
Class representing schedules, very loosely modeled after python schedule library Job
|
||||
the idea of this class is to:
|
||||
- only deal in relative times (time since the scheduler global start)
|
||||
- only deal in integer math for target runtimes, but float for current relative time
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warning('periodic beat started')
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
pid = os.getpid()
|
||||
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
|
||||
raise SystemExit()
|
||||
try:
|
||||
for conn in connections.all():
|
||||
# If the database connection has a hiccup, re-establish a new
|
||||
# connection
|
||||
conn.close_if_unusable_or_obsolete()
|
||||
set_guid(generate_guid())
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception('encountered an error while scheduling periodic tasks')
|
||||
time.sleep(idle_seconds)
|
||||
Missed schedule policy:
|
||||
Invariant target times are maintained, meaning that if interval=10s offset=0
|
||||
and it runs at t=7s, then it calls for next run in 3s.
|
||||
However, if a complete interval has passed, that is counted as a missed run,
|
||||
and missed runs are abandoned (no catch-up runs).
|
||||
"""
|
||||
|
||||
process = Process(target=run)
|
||||
process.daemon = True
|
||||
process.start()
|
||||
def __init__(self, name: str, data: dict):
|
||||
# parameters need for schedule computation
|
||||
self.interval = int(data['schedule'].total_seconds())
|
||||
self.offset = 0 # offset relative to start time this schedule begins
|
||||
self.index = 0 # number of periods of the schedule that has passed
|
||||
|
||||
# parameters that do not affect scheduling logic
|
||||
self.last_run = None # time of last run, only used for debug
|
||||
self.completed_runs = 0 # number of times schedule is known to run
|
||||
self.name = name
|
||||
self.data = data # used by caller to know what to run
|
||||
|
||||
@property
|
||||
def next_run(self):
|
||||
"Time until the next run with t=0 being the global_start of the scheduler class"
|
||||
return (self.index + 1) * self.interval + self.offset
|
||||
|
||||
def due_to_run(self, relative_time):
|
||||
return bool(self.next_run <= relative_time)
|
||||
|
||||
def expected_runs(self, relative_time):
|
||||
return int((relative_time - self.offset) / self.interval)
|
||||
|
||||
def mark_run(self, relative_time):
|
||||
self.last_run = relative_time
|
||||
self.completed_runs += 1
|
||||
new_index = self.expected_runs(relative_time)
|
||||
if new_index > self.index + 1:
|
||||
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
|
||||
self.index = new_index
|
||||
|
||||
def missed_runs(self, relative_time):
|
||||
"Number of times job was supposed to ran but failed to, only used for debug"
|
||||
missed_ct = self.expected_runs(relative_time) - self.completed_runs
|
||||
# if this is currently due to run do not count that as a missed run
|
||||
if missed_ct and self.due_to_run(relative_time):
|
||||
missed_ct -= 1
|
||||
return missed_ct
|
||||
|
||||
|
||||
def run_continuously():
|
||||
scheduler = Scheduler()
|
||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
||||
total_seconds = task['schedule'].total_seconds()
|
||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
||||
scheduler.run_continuously()
|
||||
class Scheduler:
|
||||
def __init__(self, schedule):
|
||||
"""
|
||||
Expects schedule in the form of a dictionary like
|
||||
{
|
||||
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'}
|
||||
}
|
||||
Only the schedule nearest-second value is used for scheduling,
|
||||
the rest of the data is for use by the caller to know what to run.
|
||||
"""
|
||||
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
|
||||
min_interval = min(job.interval for job in self.jobs)
|
||||
num_jobs = len(self.jobs)
|
||||
|
||||
# this is intentionally oppioniated against spammy schedules
|
||||
# a core goal is to spread out the scheduled tasks (for worker management)
|
||||
# and high-frequency schedules just do not work with that
|
||||
if num_jobs > min_interval:
|
||||
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
|
||||
|
||||
# even space out jobs over the base interval
|
||||
for i, job in enumerate(self.jobs):
|
||||
job.offset = (i * min_interval) // num_jobs
|
||||
|
||||
# internally times are all referenced relative to startup time, add grace period
|
||||
self.global_start = time.time() + 2.0
|
||||
|
||||
def get_and_mark_pending(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
to_run = []
|
||||
for job in self.jobs:
|
||||
if job.due_to_run(relative_time):
|
||||
to_run.append(job)
|
||||
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
|
||||
job.mark_run(relative_time)
|
||||
return to_run
|
||||
|
||||
def time_until_next_run(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
next_job = min(self.jobs, key=lambda j: j.next_run)
|
||||
delta = next_job.next_run - relative_time
|
||||
if delta <= 0.1:
|
||||
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
|
||||
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
|
||||
return 0.1
|
||||
elif delta > 20.0:
|
||||
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
|
||||
return 20.0
|
||||
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
|
||||
return delta
|
||||
|
||||
def debug(self, *args, **kwargs):
|
||||
data = dict()
|
||||
data['title'] = 'Scheduler status'
|
||||
|
||||
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
relative_time = time.time() - self.global_start
|
||||
data['started_time'] = start_time
|
||||
data['current_time'] = now
|
||||
data['current_time_relative'] = round(relative_time, 3)
|
||||
data['total_schedules'] = len(self.jobs)
|
||||
|
||||
data['schedule_list'] = dict(
|
||||
[
|
||||
(
|
||||
job.name,
|
||||
dict(
|
||||
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
|
||||
next_run_in_seconds=round(job.next_run - relative_time, 3),
|
||||
offset_in_seconds=job.offset,
|
||||
completed_runs=job.completed_runs,
|
||||
missed_runs=job.missed_runs(relative_time),
|
||||
),
|
||||
)
|
||||
for job in sorted(self.jobs, key=lambda job: job.interval)
|
||||
]
|
||||
)
|
||||
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
|
||||
|
||||
@@ -339,6 +339,17 @@ class AutoscalePool(WorkerPool):
|
||||
# but if the task takes longer than the time defined here, we will force it to stop here
|
||||
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
|
||||
|
||||
# initialize some things for subsystem metrics periodic gathering
|
||||
# the AutoscalePool class does not save these to redis directly, but reports via produce_subsystem_metrics
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
metrics_object.set('dispatcher_pool_max_worker_count', self.worker_count_max)
|
||||
self.worker_count_max = len(self.workers)
|
||||
|
||||
@property
|
||||
def should_grow(self):
|
||||
if len(self.workers) < self.min_workers:
|
||||
@@ -406,16 +417,16 @@ class AutoscalePool(WorkerPool):
|
||||
# the task manager to never do more work
|
||||
current_task = w.current_task
|
||||
if current_task and isinstance(current_task, dict):
|
||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
||||
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager')
|
||||
current_task_name = current_task.get('task', '')
|
||||
if any(current_task_name.endswith(e) for e in endings):
|
||||
if current_task_name.endswith(endings):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > self.task_manager_timeout:
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}')
|
||||
os.kill(w.pid, signal.SIGUSR1)
|
||||
|
||||
for m in orphaned:
|
||||
# if all the workers are dead, spawn at least one
|
||||
@@ -443,7 +454,12 @@ class AutoscalePool(WorkerPool):
|
||||
idx = random.choice(range(len(self.workers)))
|
||||
return idx, self.workers[idx]
|
||||
else:
|
||||
return super(AutoscalePool, self).up()
|
||||
self.scale_up_ct += 1
|
||||
ret = super(AutoscalePool, self).up()
|
||||
new_worker_ct = len(self.workers)
|
||||
if new_worker_ct > self.worker_count_max:
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
|
||||
@@ -73,15 +73,15 @@ class task:
|
||||
return cls.apply_async(args, kwargs)
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
def get_async_body(cls, args=None, kwargs=None, uuid=None, **kw):
|
||||
"""
|
||||
Get the python dict to become JSON data in the pg_notify message
|
||||
This same message gets passed over the dispatcher IPC queue to workers
|
||||
If a task is submitted to a multiprocessing pool, skipping pg_notify, this might be used directly
|
||||
"""
|
||||
task_id = uuid or str(uuid4())
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||
guid = get_guid()
|
||||
if guid:
|
||||
@@ -89,6 +89,16 @@ class task:
|
||||
if bind_kwargs:
|
||||
obj['bind_kwargs'] = bind_kwargs
|
||||
obj.update(**kw)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = cls.get_async_body(args=args, kwargs=kwargs, uuid=uuid, **kw)
|
||||
if callable(queue):
|
||||
queue = queue()
|
||||
if not is_testing():
|
||||
@@ -116,4 +126,5 @@ class task:
|
||||
setattr(fn, 'name', cls.name)
|
||||
setattr(fn, 'apply_async', cls.apply_async)
|
||||
setattr(fn, 'delay', cls.delay)
|
||||
setattr(fn, 'get_async_body', cls.get_async_body)
|
||||
return fn
|
||||
|
||||
@@ -7,17 +7,21 @@ import signal
|
||||
import sys
|
||||
import redis
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg
|
||||
import time
|
||||
from uuid import UUID
|
||||
from queue import Empty as QueueEmpty
|
||||
from datetime import timedelta
|
||||
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch.periodic import Scheduler
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.utils.db import set_connection_name
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -62,10 +66,12 @@ class AWXConsumerBase(object):
|
||||
def control(self, body):
|
||||
logger.warning(f'Received control signal:\n{body}')
|
||||
control = body.get('control')
|
||||
if control in ('status', 'running', 'cancel'):
|
||||
if control in ('status', 'schedule', 'running', 'cancel'):
|
||||
reply_queue = body['reply_to']
|
||||
if control == 'status':
|
||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||
if control == 'schedule':
|
||||
msg = self.scheduler.debug()
|
||||
elif control == 'running':
|
||||
msg = []
|
||||
for worker in self.pool.workers:
|
||||
@@ -83,24 +89,20 @@ class AWXConsumerBase(object):
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
if reply_queue is not None:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
else:
|
||||
logger.error('unrecognized control message: {}'.format(control))
|
||||
|
||||
def process_task(self, body):
|
||||
def dispatch_task(self, body):
|
||||
"""This will place the given body into a worker queue to run method decorated as a task"""
|
||||
if isinstance(body, dict):
|
||||
body['time_ack'] = time.time()
|
||||
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
if len(self.pool):
|
||||
if "uuid" in body and body['uuid']:
|
||||
try:
|
||||
@@ -114,15 +116,24 @@ class AWXConsumerBase(object):
|
||||
self.pool.write(queue, body)
|
||||
self.total_messages += 1
|
||||
|
||||
def process_task(self, body):
|
||||
"""Routes the task details in body as either a control task or a task-task"""
|
||||
if 'control' in body:
|
||||
try:
|
||||
return self.control(body)
|
||||
except Exception:
|
||||
logger.exception(f"Exception handling control message: {body}")
|
||||
return
|
||||
self.dispatch_task(body)
|
||||
|
||||
@log_excess_runtime(logger)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.last_stats = time.time()
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
self.last_stats = time.time()
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
signal.signal(signal.SIGINT, self.stop)
|
||||
@@ -141,29 +152,72 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerRedis, self).run(*args, **kwargs)
|
||||
self.worker.on_start()
|
||||
logger.info(f'Callback receiver started with pid={os.getpid()}')
|
||||
db.connection.close() # logs use database, so close connection
|
||||
|
||||
while True:
|
||||
logger.debug(f'{os.getpid()} is alive')
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, *args, schedule=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
|
||||
# if no successful loops have ran since startup, then we should fail right away
|
||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = time.time()
|
||||
init_time = time.time()
|
||||
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = init_time
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.last_metrics_gather = init_time
|
||||
self.listen_cumulative_time = 0.0
|
||||
if schedule:
|
||||
schedule = schedule.copy()
|
||||
else:
|
||||
schedule = {}
|
||||
# add control tasks to be ran at regular schedules
|
||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||
# so that we scale down workers and free up connections
|
||||
schedule['pool_cleanup'] = {'control': self.pool.cleanup, 'schedule': timedelta(seconds=60)}
|
||||
# record subsystem metrics for the dispatcher
|
||||
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
|
||||
self.scheduler = Scheduler(schedule)
|
||||
|
||||
def record_metrics(self):
|
||||
current_time = time.time()
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
self.listen_cumulative_time = 0.0
|
||||
self.last_metrics_gather = current_time
|
||||
|
||||
def run_periodic_tasks(self):
|
||||
self.record_statistics() # maintains time buffer in method
|
||||
"""
|
||||
Run general periodic logic, and return maximum time in seconds before
|
||||
the next requested run
|
||||
This may be called more often than that when events are consumed
|
||||
so this should be very efficient in that
|
||||
"""
|
||||
try:
|
||||
self.record_statistics() # maintains time buffer in method
|
||||
except Exception as exc:
|
||||
logger.warning(f'Failed to save dispatcher statistics {exc}')
|
||||
|
||||
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
|
||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||
# so that we scale down workers and free up connections
|
||||
self.pool.cleanup()
|
||||
self.last_cleanup = time.time()
|
||||
for job in self.scheduler.get_and_mark_pending():
|
||||
if 'control' in job.data:
|
||||
try:
|
||||
job.data['control']()
|
||||
except Exception:
|
||||
logger.exception(f'Error running control task {job.data}')
|
||||
elif 'task' in job.data:
|
||||
body = self.worker.resolve_callable(job.data['task']).get_async_body()
|
||||
# bypasses pg_notify for scheduled tasks
|
||||
self.dispatch_task(body)
|
||||
|
||||
self.pg_is_down = False
|
||||
self.listen_start = time.time()
|
||||
|
||||
return self.scheduler.time_until_next_run()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
@@ -179,17 +233,21 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
if init is False:
|
||||
self.worker.on_start()
|
||||
init = True
|
||||
# run_periodic_tasks run scheduled actions and gives time until next scheduled action
|
||||
# this is saved to the conn (PubSub) object in order to modify read timeout in-loop
|
||||
conn.select_timeout = self.run_periodic_tasks()
|
||||
# this is the main operational loop for awx-manage run_dispatcher
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
self.listen_cumulative_time += time.time() - self.listen_start # for metrics
|
||||
if e is not None:
|
||||
self.process_task(json.loads(e.payload))
|
||||
self.run_periodic_tasks()
|
||||
self.pg_is_down = False
|
||||
conn.select_timeout = self.run_periodic_tasks()
|
||||
if self.should_stop:
|
||||
return
|
||||
except psycopg2.InterfaceError:
|
||||
except psycopg.InterfaceError:
|
||||
logger.warning("Stale Postgres message bus connection, reconnecting")
|
||||
continue
|
||||
except (db.DatabaseError, psycopg2.OperationalError):
|
||||
except (db.DatabaseError, psycopg.OperationalError):
|
||||
# If we have attained stady state operation, tolerate short-term database hickups
|
||||
if not self.pg_is_down:
|
||||
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
|
||||
@@ -219,6 +277,7 @@ class BaseWorker(object):
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
set_connection_name('worker') # set application_name to distinguish from other dispatcher processes
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
@@ -230,8 +289,8 @@ class BaseWorker(object):
|
||||
break
|
||||
except QueueEmpty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
||||
except Exception:
|
||||
logger.exception("Exception on worker {}, reconnecting: ".format(idx))
|
||||
continue
|
||||
try:
|
||||
for conn in db.connections.all():
|
||||
|
||||
@@ -191,7 +191,9 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
e._retry_count = retry_count
|
||||
|
||||
# special sanitization logic for postgres treatment of NUL 0x00 char
|
||||
if (retry_count == 1) and isinstance(exc_indv, ValueError) and ("\x00" in e.stdout):
|
||||
# This used to check the class of the exception but on the postgres3 upgrade it could appear
|
||||
# as either DataError or ValueError, so now lets just try if its there.
|
||||
if (retry_count == 1) and ("\x00" in e.stdout):
|
||||
e.stdout = e.stdout.replace("\x00", "")
|
||||
|
||||
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
|
||||
|
||||
@@ -67,10 +67,60 @@ def __enum_validate__(validator, enums, instance, schema):
|
||||
Draft4Validator.VALIDATORS['enum'] = __enum_validate__
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger('awx.main.fields')
|
||||
|
||||
|
||||
class JSONBlob(JSONField):
|
||||
# Cringe... a JSONField that is back ended with a TextField.
|
||||
# This field was a legacy custom field type that tl;dr; was a TextField
|
||||
# Over the years, with Django upgrades, we were able to go to a JSONField instead of the custom field
|
||||
# However, we didn't want to have large customers with millions of events to update from text to json during an upgrade
|
||||
# So we keep this field type as backended with TextField.
|
||||
def get_internal_type(self):
|
||||
return "TextField"
|
||||
|
||||
# postgres uses a Jsonb field as the default backend
|
||||
# with psycopg2 it was using a psycopg2._json.Json class internally
|
||||
# with psycopg3 it uses a psycopg.types.json.Jsonb class internally
|
||||
# The binary class was not compatible with a text field, so we are going to override these next two methods and ensure we are using a string
|
||||
|
||||
def from_db_value(self, value, expression, connection):
|
||||
if value is None:
|
||||
return value
|
||||
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load JSONField {self.name}: {e}")
|
||||
|
||||
return value
|
||||
|
||||
def get_db_prep_value(self, value, connection, prepared=False):
|
||||
if not prepared:
|
||||
value = self.get_prep_value(value)
|
||||
try:
|
||||
# Null characters are not allowed in text fields and JSONBlobs are JSON data but saved as text
|
||||
# So we want to make sure we strip out any null characters also note, these "should" be escaped by the dumps process:
|
||||
# >>> my_obj = { 'test': '\x00' }
|
||||
# >>> import json
|
||||
# >>> json.dumps(my_obj)
|
||||
# '{"test": "\\u0000"}'
|
||||
# But just to be safe, lets remove them if they are there. \x00 and \u0000 are the same:
|
||||
# >>> string = "\x00"
|
||||
# >>> "\u0000" in string
|
||||
# True
|
||||
dumped_value = json.dumps(value)
|
||||
if "\x00" in dumped_value:
|
||||
dumped_value = dumped_value.replace("\x00", '')
|
||||
return dumped_value
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to dump JSONField {self.name}: {e} value: {value}")
|
||||
|
||||
return value
|
||||
|
||||
|
||||
# Based on AutoOneToOneField from django-annoying:
|
||||
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
|
||||
@@ -800,7 +850,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
def validate_env_var_allowed(self, env_var):
|
||||
if env_var.startswith('ANSIBLE_'):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} may affect Ansible configuration so its ' 'use is not allowed in credentials.').format(env_var),
|
||||
_('Environment variable {} may affect Ansible configuration so its use is not allowed in credentials.').format(env_var),
|
||||
code='invalid',
|
||||
params={'value': env_var},
|
||||
)
|
||||
|
||||
@@ -23,7 +23,10 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
pks_to_delete.add(asobj.pk)
|
||||
# Cleanup objects in batches instead of deleting each one individually.
|
||||
if len(pks_to_delete) >= 500:
|
||||
if len(pks_to_delete) >= self.batch_size:
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
pks_to_delete.clear()
|
||||
@@ -63,4 +66,5 @@ class Command(BaseCommand):
|
||||
self.days = int(options.get('days', 30))
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 500))
|
||||
self.cleanup_activitystream()
|
||||
|
||||
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
This command provides cleanup task for HostMetric model.
|
||||
There are two modes, which run in following order:
|
||||
- soft cleanup
|
||||
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||
- - updates columns delete, deleted_counter and last_deleted
|
||||
- hard cleanup
|
||||
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||
This operation happens after the soft deletion has finished.
|
||||
"""
|
||||
|
||||
help = 'Run soft and hard-deletion of HostMetrics'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction, connection
|
||||
from django.db.models import Min, Max
|
||||
@@ -17,10 +18,7 @@ from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification
|
||||
|
||||
|
||||
def unified_job_class_to_event_table_name(job_class):
|
||||
return f'main_{job_class().event_class.__name__.lower()}'
|
||||
from awx.main.utils import unified_job_class_to_event_table_name
|
||||
|
||||
|
||||
def partition_table_name(job_class, dt):
|
||||
@@ -152,7 +150,10 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
|
||||
)
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
@@ -198,18 +199,58 @@ class Command(BaseCommand):
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
def has_unpartitioned_table(self, model):
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _delete_unpartitioned_table(self, model):
|
||||
"If the unpartitioned table is no longer necessary, it will drop the table"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
if not self.has_unpartitioned_table(model):
|
||||
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
|
||||
return
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
|
||||
row = cursor.fetchone()
|
||||
last_created = row[0]
|
||||
|
||||
if last_created:
|
||||
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||
else:
|
||||
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||
|
||||
if (last_created is None) or (last_created < self.cutoff):
|
||||
self.logger.warning(
|
||||
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
|
||||
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
|
||||
)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
|
||||
|
||||
def _delete_unpartitioned_events(self, model, pk_list):
|
||||
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
rel_name = model().event_parent_key
|
||||
|
||||
# Bail if the unpartitioned table does not exist anymore
|
||||
if not self.has_unpartitioned_table(model):
|
||||
return
|
||||
|
||||
# Table still exists, delete individual unpartitioned events
|
||||
if pk_list:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
batch_size = 100000
|
||||
|
||||
# Hack to avoid doing N+1 queries as each item in the Job query set does
|
||||
# an individual query to get the underlying UnifiedJob.
|
||||
Job.polymorphic_super_sub_accessors_replaced = True
|
||||
@@ -224,13 +265,14 @@ class Command(BaseCommand):
|
||||
deleted = 0
|
||||
info = qs.aggregate(min=Min('id'), max=Max('id'))
|
||||
if info['min'] is not None:
|
||||
for start in range(info['min'], info['max'] + 1, batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
|
||||
for start in range(info['min'], info['max'] + 1, self.batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
|
||||
pk_list = qs_batch.values_list('id', flat=True)
|
||||
|
||||
_, results = qs_batch.delete()
|
||||
deleted += results['main.Job']
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
# Avoid dropping the job event table in case we have interacted with it already
|
||||
self._delete_unpartitioned_events(Job, pk_list)
|
||||
|
||||
return skipped, deleted
|
||||
|
||||
@@ -253,7 +295,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
self._delete_unpartitioned_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -281,7 +323,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -309,7 +351,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -333,7 +375,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
self._delete_unpartitioned_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -378,12 +420,12 @@ class Command(BaseCommand):
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.days = int(options.get('days', 90))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 100000))
|
||||
try:
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
@@ -405,19 +447,29 @@ class Command(BaseCommand):
|
||||
del s.receivers[:]
|
||||
s.sender_receivers_cache.clear()
|
||||
|
||||
for m in model_names:
|
||||
if m not in models_to_cleanup:
|
||||
continue
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
|
||||
if not self.dry_run:
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
|
||||
unified_job_class = apps.get_model('main', unified_job_class_name)
|
||||
try:
|
||||
unified_job_class().event_class
|
||||
except (NotImplementedError, AttributeError):
|
||||
continue # no need to run this for models without events
|
||||
self._delete_unpartitioned_table(unified_job_class)
|
||||
|
||||
@@ -44,7 +44,7 @@ class Command(BaseCommand):
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
|
||||
@@ -13,7 +13,7 @@ class Command(BaseCommand):
|
||||
Deprovision a cluster node
|
||||
"""
|
||||
|
||||
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
|
||||
help = 'Remove instance from the database. Specify `--hostname` to use this command.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.tasks.host_metrics import HostMetricSummaryMonthlyTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Computing of HostMetricSummaryMonthly'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
@@ -22,7 +22,7 @@ class Command(BaseCommand):
|
||||
'# Discovered Virtual Environments:',
|
||||
'\n'.join(venvs),
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
|
||||
@@ -44,16 +44,18 @@ class Command(BaseCommand):
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
end_color = '\033[0m'
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
end_color = ''
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
|
||||
print()
|
||||
|
||||
27
awx/main/management/commands/precreate_partitions.py
Normal file
27
awx/main/management/commands/precreate_partitions.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from django.utils.timezone import now
|
||||
from django.core.management.base import BaseCommand, CommandParser
|
||||
from datetime import timedelta
|
||||
from awx.main.utils.common import create_partition, unified_job_class_to_event_table_name
|
||||
from awx.main.models import Job, SystemJob, ProjectUpdate, InventoryUpdate, AdHocCommand
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""Command used to precreate database partitions to avoid pg_dump locks"""
|
||||
|
||||
def add_arguments(self, parser: CommandParser) -> None:
|
||||
parser.add_argument('--count', dest='count', action='store', help='The amount of hours of partitions to create', type=int, default=1)
|
||||
|
||||
def _create_partitioned_tables(self, count):
|
||||
tables = list()
|
||||
for model in (Job, SystemJob, ProjectUpdate, InventoryUpdate, AdHocCommand):
|
||||
tables.append(unified_job_class_to_event_table_name(model))
|
||||
start = now()
|
||||
while count > 0:
|
||||
for table in tables:
|
||||
create_partition(table, start)
|
||||
print(f'Created partitions for {table} {start}')
|
||||
start = start + timedelta(hours=1)
|
||||
count -= 1
|
||||
|
||||
def handle(self, **options):
|
||||
self._create_partitioned_tables(count=options.get('count'))
|
||||
@@ -25,17 +25,20 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||
|
||||
def _register_hostname(self, hostname, node_type, uuid):
|
||||
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
||||
if not hostname:
|
||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
|
||||
(changed, instance) = Instance.objects.register(
|
||||
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
||||
)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
||||
if changed:
|
||||
print("Successfully registered instance {}".format(hostname))
|
||||
else:
|
||||
@@ -58,6 +61,6 @@ class Command(BaseCommand):
|
||||
@transaction.atomic
|
||||
def handle(self, **options):
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
||||
if self.changed:
|
||||
print("(changed: True)")
|
||||
|
||||
@@ -2,6 +2,7 @@ import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
@@ -18,7 +19,7 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
|
||||
@@ -4,28 +4,22 @@ import logging
|
||||
import yaml
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
from awx.main.dispatch import periodic
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
def construct_bcast_queue_name(common_name):
|
||||
return common_name + '_' + settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the task dispatcher'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
|
||||
parser.add_argument('--schedule', dest='schedule', action='store_true', help='print the current status of schedules being ran by dispatcher')
|
||||
parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher')
|
||||
parser.add_argument(
|
||||
'--reload',
|
||||
@@ -47,6 +41,9 @@ class Command(BaseCommand):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
return
|
||||
if options.get('schedule'):
|
||||
print(Control('dispatcher').schedule())
|
||||
return
|
||||
if options.get('running'):
|
||||
print(Control('dispatcher').running())
|
||||
return
|
||||
@@ -63,21 +60,11 @@ class Command(BaseCommand):
|
||||
print(Control('dispatcher').cancel(cancel_data))
|
||||
return
|
||||
|
||||
# It's important to close these because we're _about_ to fork, and we
|
||||
# don't want the forked processes to inherit the open sockets
|
||||
# for the DB and cache connections (that way lies race conditions)
|
||||
django_connection.close()
|
||||
django_cache.close()
|
||||
|
||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
||||
# (like the node heartbeat)
|
||||
periodic.run_continuously()
|
||||
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
# TODO: Send a message with action=offline if we notice a SIGTERM or SIGINT
|
||||
# (wsrelay can use this to remove the node quicker)
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
@@ -22,7 +22,7 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
|
||||
45
awx/main/management/commands/run_ws_heartbeat.py
Normal file
45
awx/main/management/commands/run_ws_heartbeat.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_ws_heartbeat')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (ws_heartbeat)'
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def notify_listener_and_exit(self, *args):
|
||||
with pg_bus_conn(new_connection=False) as conn:
|
||||
conn.notify('web_ws_heartbeat', self.construct_payload(action='offline'))
|
||||
sys.exit(0)
|
||||
|
||||
def do_heartbeat_loop(self):
|
||||
while True:
|
||||
with pg_bus_conn() as conn:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_ws_heartbeat', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
|
||||
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_heartbeat_loop()
|
||||
@@ -98,6 +98,7 @@ class Command(BaseCommand):
|
||||
try:
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
connection.close() # Because of async nature, main loop will use new connection, so close this
|
||||
except Exception as exc:
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
from django.db.models.functions import Lower
|
||||
@@ -114,25 +115,29 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, uuid=None, hostname=None, ip_address=None, node_type='hybrid', defaults=None):
|
||||
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
||||
if not hostname:
|
||||
hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
if not ip_address:
|
||||
ip_address = ""
|
||||
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
# if one exists, set it to None
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = None
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
# if one exists, set it to ""
|
||||
if ip_address:
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = ""
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
|
||||
# Return existing instance that matches hostname or UUID (default to UUID)
|
||||
if uuid is not None and uuid != UUID_DEFAULT and self.filter(uuid=uuid).exists():
|
||||
instance = self.filter(uuid=uuid)
|
||||
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
||||
instance = self.filter(uuid=node_uuid)
|
||||
else:
|
||||
# if instance was not retrieved by uuid and hostname was, use the hostname
|
||||
instance = self.filter(hostname=hostname)
|
||||
@@ -156,6 +161,9 @@ class InstanceManager(models.Manager):
|
||||
if instance.node_type != node_type:
|
||||
instance.node_type = node_type
|
||||
update_fields.append('node_type')
|
||||
if instance.listener_port != listener_port:
|
||||
instance.listener_port = listener_port
|
||||
update_fields.append('listener_port')
|
||||
if update_fields:
|
||||
instance.save(update_fields=update_fields)
|
||||
return (True, instance)
|
||||
@@ -166,14 +174,11 @@ class InstanceManager(models.Manager):
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'listener_port': 27199,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {}
|
||||
if uuid is not None:
|
||||
uuid_option = {'uuid': uuid}
|
||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||
if node_type == 'execution' and 'version' not in create_defaults:
|
||||
create_defaults['version'] = RECEPTOR_PENDING
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
||||
return (True, instance)
|
||||
|
||||
@@ -122,7 +122,7 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
field_class=fields.DictField,
|
||||
read_only=True,
|
||||
label=_('Formats of all available named urls'),
|
||||
help_text=_('Read-only list of key-value pairs that shows the standard format of all ' 'available named URLs.'),
|
||||
help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'),
|
||||
category=_('Named URL'),
|
||||
category_slug='named-url',
|
||||
)
|
||||
|
||||
@@ -9,13 +9,11 @@ from django.db import migrations, models
|
||||
import django.utils.timezone
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
import taggit.managers
|
||||
import awx.main.fields
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('contenttypes', '0002_remove_content_type_name'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
]
|
||||
@@ -184,12 +182,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('kind', 'name'),
|
||||
@@ -529,12 +521,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||
],
|
||||
options={
|
||||
@@ -589,12 +575,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
@@ -644,12 +624,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-next_run'],
|
||||
@@ -687,12 +661,6 @@ class Migration(migrations.Migration):
|
||||
),
|
||||
),
|
||||
('organization', models.ForeignKey(related_name='teams', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
('users', models.ManyToManyField(related_name='teams', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||
],
|
||||
options={
|
||||
@@ -1267,13 +1235,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='created_by',
|
||||
@@ -1319,13 +1280,6 @@ class Migration(migrations.Migration):
|
||||
name='schedule',
|
||||
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to='main.Schedule', null=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjob',
|
||||
name='unified_job_template',
|
||||
@@ -1370,13 +1324,6 @@ class Migration(migrations.Migration):
|
||||
help_text='Organization containing this inventory.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='host',
|
||||
name='inventory',
|
||||
@@ -1407,13 +1354,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='host',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='group',
|
||||
name='hosts',
|
||||
@@ -1441,13 +1381,6 @@ class Migration(migrations.Migration):
|
||||
name='parents',
|
||||
field=models.ManyToManyField(related_name='children', to='main.Group', blank=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='group',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='custominventoryscript',
|
||||
name='organization',
|
||||
@@ -1459,13 +1392,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='custominventoryscript',
|
||||
name='tags',
|
||||
field=taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='credential',
|
||||
name='team',
|
||||
|
||||
@@ -12,8 +12,6 @@ import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now
|
||||
|
||||
import taggit.managers
|
||||
|
||||
|
||||
def create_system_job_templates(apps, schema_editor):
|
||||
"""
|
||||
@@ -125,7 +123,6 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('contenttypes', '0002_remove_content_type_name'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('main', '0001_initial'),
|
||||
@@ -256,12 +253,6 @@ class Migration(migrations.Migration):
|
||||
'organization',
|
||||
models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
@@ -721,12 +712,6 @@ class Migration(migrations.Migration):
|
||||
help_text='Organization this label belongs to.',
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('organization', 'name'),
|
||||
|
||||
@@ -2,17 +2,14 @@
|
||||
# Python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Psycopg2
|
||||
from psycopg2.extensions import AsIs
|
||||
|
||||
# Django
|
||||
from django.db import connection, migrations, models, OperationalError, ProgrammingError
|
||||
from django.conf import settings
|
||||
import taggit.managers
|
||||
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
from awx.main.models import Host
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def replaces():
|
||||
@@ -135,9 +132,11 @@ class Migration(migrations.Migration):
|
||||
help_text='If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
[("CREATE INDEX host_ansible_facts_default_gin ON %s USING gin" "(ansible_facts jsonb_path_ops);", [AsIs(Host._meta.db_table)])],
|
||||
[('DROP INDEX host_ansible_facts_default_gin;', None)],
|
||||
dbawaremigrations.RunSQL(
|
||||
sql="CREATE INDEX host_ansible_facts_default_gin ON {} USING gin(ansible_facts jsonb_path_ops);".format(Host._meta.db_table),
|
||||
reverse_sql='DROP INDEX host_ansible_facts_default_gin;',
|
||||
sqlite_sql=dbawaremigrations.RunSQL.noop,
|
||||
sqlite_reverse_sql=dbawaremigrations.RunSQL.noop,
|
||||
),
|
||||
# SCM file-based inventories
|
||||
migrations.AddField(
|
||||
@@ -320,10 +319,6 @@ class Migration(migrations.Migration):
|
||||
model_name='permission',
|
||||
name='project',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='permission',
|
||||
name='tags',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='permission',
|
||||
name='team',
|
||||
@@ -513,12 +508,6 @@ class Migration(migrations.Migration):
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': ('kind', 'name'),
|
||||
|
||||
@@ -3,24 +3,27 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
tables_to_drop = [
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
]
|
||||
postgres_sql = ([("DROP TABLE IF EXISTS {} CASCADE;".format(table))] for table in tables_to_drop)
|
||||
sqlite_sql = ([("DROP TABLE IF EXISTS {};".format(table))] for table in tables_to_drop)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([("DROP TABLE IF EXISTS {} CASCADE;".format(table))])
|
||||
for table in (
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
)
|
||||
]
|
||||
operations = [dbawaremigrations.RunSQL(p, sqlite_sql=s) for p, s in zip(postgres_sql, sqlite_sql)]
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
import taggit.managers
|
||||
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
@@ -20,7 +19,6 @@ def setup_tower_managed_defaults(apps, schema_editor):
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('taggit', '0002_auto_20150616_2121'),
|
||||
('main', '0066_v350_inventorysource_custom_virtualenv'),
|
||||
]
|
||||
|
||||
@@ -60,12 +58,6 @@ class Migration(migrations.Migration):
|
||||
'source_credential',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential'),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
(
|
||||
'target_credential',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential'),
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
@@ -12,21 +14,21 @@ def migrate_event_data(apps, schema_editor):
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# rename the current event table
|
||||
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _old_{tblname};')
|
||||
# create a *new* table with the same schema
|
||||
cursor.execute(f'CREATE TABLE {tblname} (LIKE _old_{tblname} INCLUDING ALL);')
|
||||
# alter the *new* table so that the primary key is a big int
|
||||
# This loop used to do roughly the following:
|
||||
# Rename the table to _old_<tablename>
|
||||
# Create a new table form the old table (it would have no rows)
|
||||
# Drop the old sequnce and create a new on tied to the new table and set the sequence to the last number from the old table
|
||||
# This used to work with postgres spitting out a NOTICE and DETAIL
|
||||
# With the django 4.2 upgrade that changed to an ERROR and HINT
|
||||
# By the time we hit the 4.2 upgrade, no one should be upgrading a database this old directly to this new schema
|
||||
# So we no longer really care about having to do all of this work, we only need a table with a bigint ID field
|
||||
# And this can be achieved by just changing the id column type...
|
||||
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
|
||||
|
||||
# recreate counter for the new table's primary key to
|
||||
# start where the *old* table left off (we have to do this because the
|
||||
# counter changed from an int to a bigint)
|
||||
cursor.execute(f'DROP SEQUENCE IF EXISTS "{tblname}_id_seq" CASCADE;')
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
||||
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
||||
cursor.execute(f'DROP TABLE _old_{tblname};')
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
# TODO: cmeyers fill this in
|
||||
return
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
@@ -42,7 +44,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
|
||||
@@ -4,12 +4,10 @@ from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
import django.db.models.expressions
|
||||
import taggit.managers
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('taggit', '0003_taggeditem_add_unique_index'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('main', '0123_drop_hg_support'),
|
||||
]
|
||||
@@ -69,12 +67,6 @@ class Migration(migrations.Migration):
|
||||
to='main.Organization',
|
||||
),
|
||||
),
|
||||
(
|
||||
'tags',
|
||||
taggit.managers.TaggableManager(
|
||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
@@ -30,7 +32,7 @@ def migrate_event_data(apps, schema_editor):
|
||||
# otherwise, the schema changes we would make on the old jobevents table
|
||||
# (namely, dropping the primary key constraint) would cause the migration
|
||||
# to suffer a serious performance degradation
|
||||
cursor.execute(f'CREATE TABLE tmp_{tblname} ' f'(LIKE _unpartitioned_{tblname} INCLUDING ALL)')
|
||||
cursor.execute(f'CREATE TABLE tmp_{tblname} (LIKE _unpartitioned_{tblname} INCLUDING ALL)')
|
||||
|
||||
# drop primary key constraint; in a partioned table
|
||||
# constraints must include the partition key itself
|
||||
@@ -48,7 +50,7 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'DROP TABLE tmp_{tblname}')
|
||||
|
||||
# recreate primary key constraint
|
||||
cursor.execute(f'ALTER TABLE ONLY {tblname} ' f'ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
|
||||
cursor.execute(f'ALTER TABLE ONLY {tblname} ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
"""
|
||||
@@ -59,6 +61,10 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
return None
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -72,7 +78,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user