mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 03:24:50 -03:30
Compare commits
385 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45c13c25a4 | ||
|
|
ba0e9831d2 | ||
|
|
92dce85468 | ||
|
|
77139e4138 | ||
|
|
b28e14c630 | ||
|
|
bf5594e338 | ||
|
|
f012a69c93 | ||
|
|
0fb334e372 | ||
|
|
b7c5cbac3f | ||
|
|
eb7407593f | ||
|
|
287596234c | ||
|
|
ee7b3470da | ||
|
|
0faa1c8a24 | ||
|
|
77175d2862 | ||
|
|
22464a5838 | ||
|
|
3919ea6270 | ||
|
|
9d9f650051 | ||
|
|
66a3cb6b09 | ||
|
|
d282393035 | ||
|
|
6ea3b20912 | ||
|
|
3025ef0dfa | ||
|
|
397d58c459 | ||
|
|
d739a4a90a | ||
|
|
3fe64ad101 | ||
|
|
919d1e5d40 | ||
|
|
7fda4b0675 | ||
|
|
d8af19d169 | ||
|
|
1821e540f7 | ||
|
|
77be6c7495 | ||
|
|
baed869d93 | ||
|
|
b87ff45c07 | ||
|
|
7acc0067f5 | ||
|
|
0a13762f11 | ||
|
|
2c673c8f1f | ||
|
|
8c187c74fc | ||
|
|
2ce9440bab | ||
|
|
765487390f | ||
|
|
086722149c | ||
|
|
c10ada6f44 | ||
|
|
b350cd053d | ||
|
|
d0acb1c53f | ||
|
|
f61b73010a | ||
|
|
adb89cd48f | ||
|
|
3e509b3d55 | ||
|
|
f0badea9d3 | ||
|
|
6a1ec0dc89 | ||
|
|
329fb88bbb | ||
|
|
177f8cb7b2 | ||
|
|
b43107a5e9 | ||
|
|
4857685e1c | ||
|
|
8ba1a2bcf7 | ||
|
|
e7c80fe1e8 | ||
|
|
33f1c35292 | ||
|
|
ba899324f2 | ||
|
|
9c236eb8dd | ||
|
|
36559a4539 | ||
|
|
7a4b3ed139 | ||
|
|
cd5cc64d6a | ||
|
|
71a11ea3ad | ||
|
|
cfbbc4cb92 | ||
|
|
592920ee51 | ||
|
|
b75b84e282 | ||
|
|
f4b80c70e3 | ||
|
|
9870187af5 | ||
|
|
bbb436ddbb | ||
|
|
abf915fafe | ||
|
|
481814991e | ||
|
|
e94ee8f8d7 | ||
|
|
e660f62a59 | ||
|
|
a2a04002b6 | ||
|
|
93117c8264 | ||
|
|
b8118ac86a | ||
|
|
c08f1ddcaa | ||
|
|
d57f549a4c | ||
|
|
93e6f974f6 | ||
|
|
32f7dfece1 | ||
|
|
68b32b9b4f | ||
|
|
886ba1ea7f | ||
|
|
b128f05a37 | ||
|
|
36c9c9cdc4 | ||
|
|
342e9197b8 | ||
|
|
2205664fb4 | ||
|
|
7cdf471894 | ||
|
|
8719648ff5 | ||
|
|
c1455ee125 | ||
|
|
11d5e5c7d4 | ||
|
|
fba4e06c50 | ||
|
|
12a4c301b8 | ||
|
|
8a1cdf859e | ||
|
|
2f68317e5f | ||
|
|
0f4bac7aed | ||
|
|
e42461d96f | ||
|
|
9b716235a2 | ||
|
|
eb704dbaad | ||
|
|
105609ec20 | ||
|
|
9b390a624f | ||
|
|
0046ce5e69 | ||
|
|
b80d0ae85b | ||
|
|
1c0142f75c | ||
|
|
1ea6d15ee3 | ||
|
|
3cd5d59d87 | ||
|
|
d32a5905e8 | ||
|
|
e53a5da91e | ||
|
|
1a56272eaf | ||
|
|
3975028bd4 | ||
|
|
1c51ef8a69 | ||
|
|
6b0fe8d137 | ||
|
|
4a3d437b32 | ||
|
|
23f3ab6a66 | ||
|
|
ffa3cd1fff | ||
|
|
236de7e209 | ||
|
|
4e5cce8d15 | ||
|
|
184719e9f2 | ||
|
|
6c9e2502a5 | ||
|
|
0b1b866128 | ||
|
|
80ebe13841 | ||
|
|
328880609b | ||
|
|
71c307ab8a | ||
|
|
3ce68ced1e | ||
|
|
20817789bd | ||
|
|
2b63b55b34 | ||
|
|
64923e12fc | ||
|
|
6d4f92e1e8 | ||
|
|
fff6fa7d7a | ||
|
|
44db4587be | ||
|
|
dc0958150a | ||
|
|
9f27436c75 | ||
|
|
e60869e653 | ||
|
|
51e19d9d0b | ||
|
|
0fea29ad4d | ||
|
|
0a40b758c3 | ||
|
|
1191458d80 | ||
|
|
c0491a7b10 | ||
|
|
14e613bc92 | ||
|
|
98e37383c2 | ||
|
|
9e336d55e4 | ||
|
|
0e68caf0f7 | ||
|
|
c9c150b5a6 | ||
|
|
f97605430b | ||
|
|
454f31f6a4 | ||
|
|
f62bf6a4c3 | ||
|
|
a0dafbfd8c | ||
|
|
b5c052b2e6 | ||
|
|
1e690fcd7f | ||
|
|
479d0c2b12 | ||
|
|
ede185504c | ||
|
|
2db29e5ce2 | ||
|
|
7bb0d32be1 | ||
|
|
acb22f0131 | ||
|
|
4f99a170be | ||
|
|
17f5c4b8e6 | ||
|
|
598f9e2a55 | ||
|
|
d33573b29c | ||
|
|
bc55bcf3a2 | ||
|
|
6c0c1f6853 | ||
|
|
0cc02d311f | ||
|
|
13b9a6c5e3 | ||
|
|
ac2f2039f5 | ||
|
|
c8c8ed1775 | ||
|
|
6267469709 | ||
|
|
a1e39f71fc | ||
|
|
4b0acaf7a1 | ||
|
|
968267287b | ||
|
|
25303ee625 | ||
|
|
8c5e2237f4 | ||
|
|
57d009199d | ||
|
|
24cbf39a93 | ||
|
|
95f1ef70a7 | ||
|
|
680e2bcc0a | ||
|
|
cd3f7666be | ||
|
|
049fb4eff5 | ||
|
|
7cef4e6db7 | ||
|
|
da004da68a | ||
|
|
b29f2f88d0 | ||
|
|
52a8a90c0e | ||
|
|
7cb890b603 | ||
|
|
78652bdd71 | ||
|
|
29d222be83 | ||
|
|
e7fa730f81 | ||
|
|
33f070081c | ||
|
|
44463402a8 | ||
|
|
93c2c56612 | ||
|
|
91bf49cdb3 | ||
|
|
704759d29a | ||
|
|
513f433f17 | ||
|
|
5f41003fb1 | ||
|
|
2e0f25150c | ||
|
|
4f5bc992a0 | ||
|
|
a9e7508e92 | ||
|
|
1c2eb22956 | ||
|
|
a987249ca6 | ||
|
|
ab6d56c24e | ||
|
|
c4ce5d0afa | ||
|
|
43f4872fec | ||
|
|
cb31973d59 | ||
|
|
9f959ca3d4 | ||
|
|
454d6d28e7 | ||
|
|
8b70fef743 | ||
|
|
026b8f05d7 | ||
|
|
d8e591cd69 | ||
|
|
38cc193aea | ||
|
|
65b3e0226d | ||
|
|
b5e04a4cb3 | ||
|
|
c89c2892c4 | ||
|
|
5080a5530c | ||
|
|
77743ef406 | ||
|
|
f792fea048 | ||
|
|
16ad27099e | ||
|
|
3f5a4cb6f1 | ||
|
|
b88d9f4731 | ||
|
|
62b79b1959 | ||
|
|
be5a2bbe61 | ||
|
|
84edbed5ec | ||
|
|
aa631a1ba7 | ||
|
|
771b831da8 | ||
|
|
ce4c1c11b3 | ||
|
|
054a70bda4 | ||
|
|
ab0463bf2a | ||
|
|
2bffddb5fb | ||
|
|
d576e65858 | ||
|
|
e3d167dfd1 | ||
|
|
ba9533f0e2 | ||
|
|
e7a739c3d7 | ||
|
|
ab3a9a0364 | ||
|
|
7dd1bc04c4 | ||
|
|
8c4e943af0 | ||
|
|
7112da9cdc | ||
|
|
7a74437651 | ||
|
|
e22967d28d | ||
|
|
df6bb5a8b8 | ||
|
|
aa06940df5 | ||
|
|
3e5467b472 | ||
|
|
c2fe06dd95 | ||
|
|
510f54b904 | ||
|
|
57e005b775 | ||
|
|
aad260bb41 | ||
|
|
e3d39a2728 | ||
|
|
f59ced57bc | ||
|
|
7f085e159f | ||
|
|
db2253601d | ||
|
|
32a5186eea | ||
|
|
b0c416334f | ||
|
|
c30c9cbdbe | ||
|
|
8ec6e556a1 | ||
|
|
382f98ceed | ||
|
|
fbd5d79428 | ||
|
|
878008a9c5 | ||
|
|
132fe5e443 | ||
|
|
311cea5a4a | ||
|
|
88bb6e5a6a | ||
|
|
c117ca66d5 | ||
|
|
c20e8eb712 | ||
|
|
5be90fd36b | ||
|
|
32a56311e6 | ||
|
|
610f75fcb1 | ||
|
|
179868dff2 | ||
|
|
9f3c4f6240 | ||
|
|
d40fdd77ad | ||
|
|
9135ff2f77 | ||
|
|
8d46d32944 | ||
|
|
ae0c1730bb | ||
|
|
9badbf0b4e | ||
|
|
7285d82f00 | ||
|
|
e38f87eb1d | ||
|
|
e6050804f9 | ||
|
|
f919178734 | ||
|
|
05f918e666 | ||
|
|
b18ad77035 | ||
|
|
d80759cd7a | ||
|
|
ef4e77d78f | ||
|
|
bf98f62654 | ||
|
|
1f9925cf51 | ||
|
|
4bf8366687 | ||
|
|
21b4755587 | ||
|
|
b4163dd00f | ||
|
|
6908f415a1 | ||
|
|
746cd4bf77 | ||
|
|
39ea162aa9 | ||
|
|
5bd00adb59 | ||
|
|
7c4aedf716 | ||
|
|
28b1c62275 | ||
|
|
f3cdf368df | ||
|
|
4302348e8e | ||
|
|
cd6cb3352e | ||
|
|
d1895bb92e | ||
|
|
8d47644659 | ||
|
|
76f03b9adc | ||
|
|
46227f14a1 | ||
|
|
2d114a4d16 | ||
|
|
7deddabea6 | ||
|
|
e15f4de0dd | ||
|
|
f558957538 | ||
|
|
fa3920d3a3 | ||
|
|
48a04bff5a | ||
|
|
c30760aaa9 | ||
|
|
3636c5e95e | ||
|
|
ae0d868681 | ||
|
|
edbed92c95 | ||
|
|
b75b098ee9 | ||
|
|
4f2f345e23 | ||
|
|
41a4551c91 | ||
|
|
229dbe0905 | ||
|
|
d137086870 | ||
|
|
f53aa2d26b | ||
|
|
42c848b57b | ||
|
|
3e6e0463b9 | ||
|
|
ededc61a71 | ||
|
|
3747f5b097 | ||
|
|
64b0e09e87 | ||
|
|
790ccd984c | ||
|
|
5d0849d746 | ||
|
|
7f1750324f | ||
|
|
a63067da38 | ||
|
|
7a45048463 | ||
|
|
97a5e87448 | ||
|
|
11475590e7 | ||
|
|
7e88a735ad | ||
|
|
2f3e65d4ef | ||
|
|
cc18c1220a | ||
|
|
d2aa1b94e3 | ||
|
|
a97c1b46c0 | ||
|
|
6a3282a689 | ||
|
|
be27d89895 | ||
|
|
160508c907 | ||
|
|
5a3900a927 | ||
|
|
f2bfaf7aca | ||
|
|
d1cf7245f7 | ||
|
|
0de7551477 | ||
|
|
ac99708952 | ||
|
|
47b7bbeda7 | ||
|
|
bca0f2dd47 | ||
|
|
3efc7d5bc4 | ||
|
|
4b9ca3deee | ||
|
|
f622d3a1e6 | ||
|
|
ede1b9af92 | ||
|
|
2becc5dda9 | ||
|
|
7aad16964c | ||
|
|
b1af27c4f6 | ||
|
|
7cb16ef91d | ||
|
|
9358d59f20 | ||
|
|
9e037f1a02 | ||
|
|
266ebe5501 | ||
|
|
ce5270434c | ||
|
|
34834252ff | ||
|
|
861ba8a727 | ||
|
|
02e5ba5f94 | ||
|
|
81ba6c0234 | ||
|
|
5c47c24e28 | ||
|
|
752289e175 | ||
|
|
a24aaba6bc | ||
|
|
349785550c | ||
|
|
ab6511a833 | ||
|
|
a7b4c03188 | ||
|
|
a5f9506f49 | ||
|
|
8e6f4fae80 | ||
|
|
a952ab0a75 | ||
|
|
7cca6c4cd9 | ||
|
|
3945db60eb | ||
|
|
252b0dda9f | ||
|
|
0a2f1622f6 | ||
|
|
00817d6b89 | ||
|
|
06808ef4c4 | ||
|
|
3aba5b5a04 | ||
|
|
5c19efdc32 | ||
|
|
217dc57c24 | ||
|
|
90f54b98cd | ||
|
|
b143df3183 | ||
|
|
6fa22f5be2 | ||
|
|
d5de1f9d11 | ||
|
|
7cca39d069 | ||
|
|
a6a9d3427c | ||
|
|
af6549ffcd | ||
|
|
f0c91bb1f3 | ||
|
|
b1dceefac3 | ||
|
|
bb65945b4f | ||
|
|
1b8f6630bf | ||
|
|
5157838d83 | ||
|
|
ebabea54e1 | ||
|
|
0eaa7816e9 | ||
|
|
83149519f8 | ||
|
|
b3bda415da | ||
|
|
21291b53fd | ||
|
|
3eb748ff1f | ||
|
|
6d2c10ad02 | ||
|
|
ede9d961da |
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -3,7 +3,7 @@ name: CI
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
22
.github/workflows/devel_images.yml
vendored
22
.github/workflows/devel_images.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
branches:
|
||||
- devel
|
||||
- release_*
|
||||
- feature_*
|
||||
jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
@@ -20,6 +21,12 @@ jobs:
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Set lower case owner name
|
||||
run: |
|
||||
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
@@ -31,15 +38,18 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
|
||||
|
||||
- name: Build images
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
|
||||
- name: Push image
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -6,6 +6,10 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code
|
||||
issues: write # to label issues
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/label_pr.yml
vendored
4
.github/workflows/label_pr.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read # to determine modified files (actions/labeler)
|
||||
pull-requests: write # to add labels to PRs (actions/labeler)
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/promote.yml
vendored
3
.github/workflows/promote.yml
vendored
@@ -8,6 +8,9 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -157,7 +157,11 @@ use_dev_supervisor.txt
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/awx/ui/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile
|
||||
/Dockerfile.dev
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
@@ -6,6 +6,7 @@ recursive-include awx/templates *.html
|
||||
recursive-include awx/api/templates *.md *.html *.yml
|
||||
recursive-include awx/ui/build *.html
|
||||
recursive-include awx/ui/build *
|
||||
recursive-include awx/ui_next/build *
|
||||
recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
|
||||
153
Makefile
153
Makefile
@@ -1,4 +1,6 @@
|
||||
PYTHON ?= python3.9
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
@@ -35,10 +37,15 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
|
||||
DEV_DOCKER_OWNER ?= ansible
|
||||
# Docker will only accept lowercase, so github names like Paul need to be paul
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
@@ -84,7 +91,7 @@ clean-schema:
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete
|
||||
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
@@ -215,12 +222,6 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -228,7 +229,6 @@ dispatcher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -245,6 +245,34 @@ jupyter:
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) shell_plus --notebook
|
||||
|
||||
## Start the rsyslog configurer process in background in development environment.
|
||||
run-rsyslog-configurer:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_rsyslog_configurer
|
||||
|
||||
## Start cache_clear process in background in development environment.
|
||||
run-cache-clear:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_cache_clear
|
||||
|
||||
## Start the wsrelay process in background in development environment.
|
||||
run-wsrelay:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@@ -271,13 +299,13 @@ swagger: reports
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
BLACK_ARGS="--check" $(MAKE) black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
## Run egg_info_dev to generate awx.egg-info for development.
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
@@ -296,7 +324,7 @@ github_ci_setup:
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
make docker-compose-build
|
||||
$(MAKE) docker-compose-build
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
@@ -346,7 +374,7 @@ test_collection_sanity:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
@@ -411,12 +439,14 @@ ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
@if [ -d "/var/lib/awx" ] ; then \
|
||||
mkdir -p /var/lib/awx/public/static/css; \
|
||||
mkdir -p /var/lib/awx/public/static/js; \
|
||||
mkdir -p /var/lib/awx/public/static/media; \
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
|
||||
fi
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@@ -443,11 +473,12 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
|
||||
endif
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
@@ -493,9 +524,9 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
@@ -528,18 +559,25 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_devel image for docker compose development environment
|
||||
docker-compose-build: Dockerfile.dev
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
if [ "$(shell docker images | grep awx_devel)" ]; then \
|
||||
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
|
||||
fi
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
@@ -554,7 +592,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -571,11 +609,36 @@ VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
@echo "$(subst python,,$(PYTHON))"
|
||||
|
||||
.PHONY: version-for-buildyml
|
||||
version-for-buildyml:
|
||||
@echo $(firstword $(subst +, ,$(VERSION)))
|
||||
# version-for-buildyml prints a special version string for build.yml,
|
||||
# chopping off the sha after the '+' sign.
|
||||
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
|
||||
# This does the same thing in native make without
|
||||
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
|
||||
# Example:
|
||||
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
|
||||
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
@@ -590,13 +653,6 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -616,6 +672,7 @@ messages:
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
.PHONY: print-%
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -627,12 +684,12 @@ HELP_FILTER=.PHONY
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
@$(MAKE) -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@@ -653,3 +710,7 @@ help/generate:
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@@ -5,13 +5,11 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -35,7 +33,7 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
@@ -364,12 +362,7 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
qs = self.model._default_manager
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
qs = optimize_queryset(qs)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
@@ -512,6 +505,9 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# And optionally (user must have given access permission on parent object
|
||||
# to view sublist):
|
||||
# parent_access = 'read'
|
||||
# filter_read_permission sets whether or not to override the default intersection behavior
|
||||
# implemented here
|
||||
filter_read_permission = True
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListAPIView, self).get_description_context()
|
||||
@@ -526,12 +522,16 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model).distinct()
|
||||
sublist_qs = self.get_sublist_queryset(parent)
|
||||
return qs & sublist_qs
|
||||
if not self.filter_read_permission:
|
||||
return optimize_queryset(self.get_sublist_queryset(parent))
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
if hasattr(self, 'parent_key'):
|
||||
# This is vastly preferable for ReverseForeignKey relationships
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
|
||||
|
||||
def get_sublist_queryset(self, parent):
|
||||
return getattrd(parent, self.relationship).distinct()
|
||||
return getattrd(parent, self.relationship)
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
@@ -580,15 +580,6 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
d.update({'parent_key': getattr(self, 'parent_key', None)})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -967,16 +958,11 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
|
||||
@@ -25,6 +25,7 @@ __all__ = [
|
||||
'UserPermission',
|
||||
'IsSystemAdminOrAuditor',
|
||||
'WorkflowApprovalPermission',
|
||||
'AnalyticsPermission',
|
||||
]
|
||||
|
||||
|
||||
@@ -250,3 +251,16 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||
class WebhookKeyPermission(permissions.BasePermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
return request.user.can_access(view.model, 'admin', obj, request.data)
|
||||
|
||||
|
||||
class AnalyticsPermission(permissions.BasePermission):
|
||||
"""
|
||||
Allows GET/POST/OPTIONS to system admins and system auditors.
|
||||
"""
|
||||
|
||||
def has_permission(self, request, view):
|
||||
if not (request.user and request.user.is_authenticated):
|
||||
return False
|
||||
if request.method in ["GET", "POST", "OPTIONS"]:
|
||||
return request.user.is_superuser or request.user.is_system_auditor
|
||||
return request.user.is_superuser
|
||||
|
||||
@@ -8,6 +8,7 @@ import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
|
||||
# OAuth2
|
||||
from oauthlib import oauth2
|
||||
@@ -55,6 +56,8 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InstanceLink,
|
||||
@@ -109,11 +112,14 @@ from awx.main.utils import (
|
||||
encrypt_dict,
|
||||
prefetch_page_capabilities,
|
||||
truncate_stdout,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.signals import update_inventory_computed_fields
|
||||
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
|
||||
@@ -152,11 +158,12 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'kind',
|
||||
),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'constructed_host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
|
||||
@@ -185,6 +192,11 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
}
|
||||
|
||||
|
||||
# These fields can be edited on a constructed inventory's generated source (possibly by using the constructed
|
||||
# inventory's special API endpoint, but also by using the inventory sources endpoint).
|
||||
CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS = ('source_vars', 'update_cache_timeout', 'limit', 'verbosity')
|
||||
|
||||
|
||||
def reverse_gfk(content_object, request):
|
||||
"""
|
||||
Computes a reverse for a GenericForeignKey field.
|
||||
@@ -942,7 +954,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializer):
|
||||
password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
|
||||
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.'))
|
||||
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
|
||||
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
|
||||
is_system_auditor = serializers.BooleanField(default=False)
|
||||
@@ -969,7 +981,12 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
ret.pop('password', None)
|
||||
if self.get_external_account(obj):
|
||||
# If this is an external account it shouldn't have a password field
|
||||
ret.pop('password', None)
|
||||
else:
|
||||
# If its an internal account lets assume there is a password and return $encrypted$ to the user
|
||||
ret['password'] = '$encrypted$'
|
||||
if obj and type(self) is UserSerializer:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
@@ -983,13 +1000,31 @@ class UserSerializer(BaseSerializer):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
# Check if a password is too long
|
||||
password_max_length = User._meta.get_field('password').max_length
|
||||
if len(value) > password_max_length:
|
||||
raise serializers.ValidationError(_('Password max length is {}'.format(password_max_length)))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH', 0) and len(value) < getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'):
|
||||
raise serializers.ValidationError(_('Password must be at least {} characters long.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS', 0) and sum(c.isdigit() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'):
|
||||
raise serializers.ValidationError(_('Password must contain at least {} digits.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER', 0) and sum(c.isupper() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} uppercase characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER')))
|
||||
)
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL', 0) and sum(not c.isalnum() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} special characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL')))
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
# For now we're not raising an error, just not saving password for
|
||||
# users managed by LDAP who already have an unusable password set.
|
||||
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
|
||||
if new_password and not self.get_external_account(obj):
|
||||
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj):
|
||||
obj.set_password(new_password)
|
||||
obj.save(update_fields=['password'])
|
||||
|
||||
@@ -1666,13 +1701,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
res.update(
|
||||
dict(
|
||||
hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
|
||||
groups=self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
|
||||
root_groups=self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
|
||||
variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
|
||||
script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
|
||||
tree=self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
|
||||
inventory_sources=self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
|
||||
update_inventory_sources=self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1683,8 +1713,18 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.kind in ('', 'constructed'):
|
||||
# links not relevant for the "old" smart inventory
|
||||
res['groups'] = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk})
|
||||
res['root_groups'] = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk})
|
||||
res['update_inventory_sources'] = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk})
|
||||
res['inventory_sources'] = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk})
|
||||
res['tree'] = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk})
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.kind == 'constructed':
|
||||
res['input_inventories'] = self.reverse('api:inventory_input_inventories', kwargs={'pk': obj.pk})
|
||||
res['constructed_url'] = self.reverse('api:constructed_inventory_detail', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -1726,6 +1766,91 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
if not hasattr(instance, '_constructed_inv_src'):
|
||||
instance._constructed_inv_src = instance.inventory_sources.first()
|
||||
inv_src = instance._constructed_inv_src
|
||||
return super().get_attribute(inv_src) # yoink
|
||||
|
||||
|
||||
class ConstructedCharField(ConstructedFieldMixin, serializers.CharField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedIntegerField(ConstructedFieldMixin, serializers.IntegerField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedInventorySerializer(InventorySerializer):
|
||||
source_vars = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The source_vars for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
update_cache_timeout = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
default=None,
|
||||
help_text=_('The cache timeout for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
limit = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
verbosity = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
max_value=2,
|
||||
default=None,
|
||||
help_text=_('The verbosity level for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', '-host_filter') + CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
read_only_fields = ('*', 'kind')
|
||||
|
||||
def pop_inv_src_data(self, data):
|
||||
inv_src_data = {}
|
||||
for field in CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS:
|
||||
if field in data:
|
||||
# values always need to be removed, as they are not valid for Inventory model
|
||||
value = data.pop(field)
|
||||
# null is not valid for any of those fields, taken as not-provided
|
||||
if value is not None:
|
||||
inv_src_data[field] = value
|
||||
return inv_src_data
|
||||
|
||||
def apply_inv_src_data(self, inventory, inv_src_data):
|
||||
if inv_src_data:
|
||||
update_fields = []
|
||||
inv_src = inventory.inventory_sources.first()
|
||||
for field, value in inv_src_data.items():
|
||||
setattr(inv_src, field, value)
|
||||
update_fields.append(field)
|
||||
if update_fields:
|
||||
inv_src.save(update_fields=update_fields)
|
||||
|
||||
def create(self, validated_data):
|
||||
validated_data['kind'] = 'constructed'
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
inventory = super().create(validated_data)
|
||||
self.apply_inv_src_data(inventory, inv_src_data)
|
||||
return inventory
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
obj = super().update(obj, validated_data)
|
||||
self.apply_inv_src_data(obj, inv_src_data)
|
||||
return obj
|
||||
|
||||
|
||||
class InventoryScriptSerializer(InventorySerializer):
|
||||
class Meta:
|
||||
fields = ()
|
||||
@@ -1779,6 +1904,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.inventory.kind == 'constructed':
|
||||
res['original_host'] = self.reverse('api:host_detail', kwargs={'pk': obj.instance_id})
|
||||
res['ansible_facts'] = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id})
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
if obj.last_job:
|
||||
@@ -1800,6 +1928,10 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
|
||||
group_cnt = obj.groups.count()
|
||||
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
|
||||
if obj.inventory.kind == 'constructed':
|
||||
summaries_qs = obj.constructed_host_summaries
|
||||
else:
|
||||
summaries_qs = obj.job_host_summaries
|
||||
d.setdefault(
|
||||
'recent_jobs',
|
||||
[
|
||||
@@ -1810,7 +1942,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'status': j.job.status,
|
||||
'finished': j.job.finished,
|
||||
}
|
||||
for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
for j in summaries_qs.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
],
|
||||
)
|
||||
return d
|
||||
@@ -1835,8 +1967,8 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def validate_variables(self, value):
|
||||
@@ -1853,7 +1985,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
vars_dict = parse_yaml_or_json(variables)
|
||||
vars_dict['ansible_ssh_port'] = port
|
||||
attrs['variables'] = json.dumps(vars_dict)
|
||||
if Group.objects.filter(name=name, inventory=inventory).exists():
|
||||
if inventory and Group.objects.filter(name=name, inventory=inventory).exists():
|
||||
raise serializers.ValidationError(_('A Group with that name already exists.'))
|
||||
|
||||
return super(HostSerializer, self).validate(attrs)
|
||||
@@ -1934,8 +2066,8 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -1945,6 +2077,130 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return ret
|
||||
|
||||
|
||||
class BulkHostSerializer(HostSerializer):
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = (
|
||||
'name',
|
||||
'enabled',
|
||||
'instance_id',
|
||||
'description',
|
||||
'variables',
|
||||
)
|
||||
|
||||
|
||||
class BulkHostCreateSerializer(serializers.Serializer):
|
||||
inventory = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Inventory.objects.all(), required=True, write_only=True, help_text=_('Primary Key ID of inventory to add hosts to.')
|
||||
)
|
||||
hosts = serializers.ListField(
|
||||
child=BulkHostSerializer(),
|
||||
allow_empty=False,
|
||||
max_length=100000,
|
||||
write_only=True,
|
||||
help_text=_('List of hosts to be created, JSON. e.g. [{"name": "example.com"}, {"name": "127.0.0.1"}]'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('inventory', 'hosts')
|
||||
read_only_fields = ()
|
||||
|
||||
def raise_if_host_counts_violated(self, attrs):
|
||||
validation_info = get_licenser().validate()
|
||||
|
||||
org = attrs['inventory'].organization
|
||||
|
||||
if org:
|
||||
org_active_count = Host.objects.org_active_count(org.id)
|
||||
new_hosts = [h['name'] for h in attrs['hosts']]
|
||||
org_net_new_host_count = len(new_hosts) - Host.objects.filter(inventory__organization=1, name__in=new_hosts).values('name').distinct().count()
|
||||
if org.max_hosts > 0 and org_active_count + org_net_new_host_count > org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_(
|
||||
"You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts
|
||||
)
|
||||
)
|
||||
|
||||
# Don't check license if it is open license
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
|
||||
sys_free_instances = validation_info.get('free_instances', 0)
|
||||
system_net_new_host_count = Host.objects.exclude(name__in=new_hosts).count()
|
||||
|
||||
if system_net_new_host_count > sys_free_instances:
|
||||
hard_error = validation_info.get('trial', False) is True or validation_info['instance_count'] == 10
|
||||
if hard_error:
|
||||
# Only raise permission error for trial, otherwise just log a warning as we do in other inventory import situations
|
||||
raise PermissionDenied(_("Host count exceeds available instances."))
|
||||
logger.warning(_("Number of hosts allowed by license has been exceeded."))
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
inv = attrs['inventory']
|
||||
if inv.kind != '':
|
||||
raise serializers.ValidationError(_('Hosts can only be created in manual inventories (not smart or constructed types).'))
|
||||
if len(attrs['hosts']) > settings.BULK_HOST_MAX_CREATE:
|
||||
raise serializers.ValidationError(_('Number of hosts exceeds system setting BULK_HOST_MAX_CREATE'))
|
||||
if request and not request.user.is_superuser:
|
||||
if request.user not in inv.admin_role:
|
||||
raise serializers.ValidationError(_(f'Inventory with id {inv.id} not found or lack permissions to add hosts.'))
|
||||
current_hostnames = set(inv.hosts.values_list('name', flat=True))
|
||||
new_names = [host['name'] for host in attrs['hosts']]
|
||||
duplicate_new_names = [n for n in new_names if n in current_hostnames or new_names.count(n) > 1]
|
||||
if duplicate_new_names:
|
||||
raise serializers.ValidationError(_(f'Hostnames must be unique in an inventory. Duplicates found: {duplicate_new_names}'))
|
||||
|
||||
self.raise_if_host_counts_violated(attrs)
|
||||
|
||||
_now = now()
|
||||
for host in attrs['hosts']:
|
||||
host['created'] = _now
|
||||
host['modified'] = _now
|
||||
host['inventory'] = inv
|
||||
return attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
# This assumes total_hosts is up to date, and it can get out of date if the inventory computed fields have not been updated lately.
|
||||
# If we wanted to side step this we could query Hosts.objects.filter(inventory...)
|
||||
old_total_hosts = validated_data['inventory'].total_hosts
|
||||
result = [Host(**attrs) for attrs in validated_data['hosts']]
|
||||
try:
|
||||
Host.objects.bulk_create(result)
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError({"detail": _(f"cannot create host, host creation error {e}")})
|
||||
new_total_hosts = old_total_hosts + len(result)
|
||||
request = self.context.get('request', None)
|
||||
changes = {'total_hosts': [old_total_hosts, new_total_hosts]}
|
||||
activity_entry = ActivityStream.objects.create(
|
||||
operation='update',
|
||||
object1='inventory',
|
||||
changes=json.dumps(changes),
|
||||
actor=request.user,
|
||||
)
|
||||
activity_entry.inventory.add(validated_data['inventory'])
|
||||
|
||||
# This actually updates the cached "total_hosts" field on the inventory
|
||||
update_inventory_computed_fields.delay(validated_data['inventory'].id)
|
||||
return_keys = [k for k in BulkHostSerializer().fields.keys()] + ['id']
|
||||
return_data = {}
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')):
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
item['inventory'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
|
||||
host_data.append(item)
|
||||
return_data['url'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
|
||||
return_data['hosts'] = host_data
|
||||
return return_data
|
||||
|
||||
|
||||
class GroupTreeSerializer(GroupSerializer):
|
||||
children = serializers.SerializerMethodField()
|
||||
|
||||
@@ -2000,6 +2256,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'source',
|
||||
'source_path',
|
||||
'source_vars',
|
||||
'scm_branch',
|
||||
'credential',
|
||||
'enabled_var',
|
||||
'enabled_value',
|
||||
@@ -2009,6 +2266,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'custom_virtualenv',
|
||||
'timeout',
|
||||
'verbosity',
|
||||
'limit',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@@ -2115,8 +2373,8 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
if value and value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
# TODO: remove when old 'credential' fields are removed
|
||||
@@ -2160,14 +2418,25 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if self.instance and self.instance.source == 'constructed':
|
||||
allowed_fields = CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
for field in attrs:
|
||||
if attrs[field] != getattr(self.instance, field) and field not in allowed_fields:
|
||||
raise serializers.ValidationError({"error": _("Cannot change field '{}' on a constructed inventory source.").format(field)})
|
||||
elif get_field_from_model_or_attrs('source') == 'scm':
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
elif get_field_from_model_or_attrs('source') == 'constructed':
|
||||
raise serializers.ValidationError({"error": _('constructed not a valid source for inventory')})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
|
||||
if redundant_scm_fields:
|
||||
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
|
||||
|
||||
project = get_field_from_model_or_attrs('source_project')
|
||||
if get_field_from_model_or_attrs('scm_branch') and not project.allow_override:
|
||||
raise serializers.ValidationError({'scm_branch': _('Project does not allow overriding branch.')})
|
||||
|
||||
attrs = super(InventorySourceSerializer, self).validate(attrs)
|
||||
|
||||
# Check type consistency of source and cloud credential, if provided
|
||||
@@ -3900,6 +4169,7 @@ class JobHostSummarySerializer(BaseSerializer):
|
||||
'-description',
|
||||
'job',
|
||||
'host',
|
||||
'constructed_host',
|
||||
'host_name',
|
||||
'changed',
|
||||
'dark',
|
||||
@@ -4405,6 +4675,271 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
return accepted
|
||||
|
||||
|
||||
class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
|
||||
# We don't do a PrimaryKeyRelatedField for unified_job_template and others, because that increases the number
|
||||
# of database queries, rather we take them as integer and later convert them to objects in get_objectified_jobs
|
||||
unified_job_template = serializers.IntegerField(
|
||||
required=True, min_value=1, help_text=_('Primary key of the template for this job, can be a job template or inventory source.')
|
||||
)
|
||||
inventory = serializers.IntegerField(required=False, min_value=1)
|
||||
execution_environment = serializers.IntegerField(required=False, min_value=1)
|
||||
# many-to-many fields
|
||||
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
|
||||
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
|
||||
|
||||
def validate(self, attrs):
|
||||
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
|
||||
|
||||
def get_validation_exclusions(self, obj=None):
|
||||
ret = super().get_validation_exclusions(obj)
|
||||
ret.extend(['unified_job_template', 'inventory', 'execution_environment'])
|
||||
return ret
|
||||
|
||||
|
||||
class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(default='Bulk Job Launch', max_length=512, write_only=True, required=False, allow_blank=True) # limited by max name of jobs
|
||||
jobs = BulkJobNodeSerializer(
|
||||
many=True,
|
||||
allow_empty=False,
|
||||
write_only=True,
|
||||
max_length=100000,
|
||||
help_text=_('List of jobs to be launched, JSON. e.g. [{"unified_job_template": 7}, {"unified_job_template": 10}]'),
|
||||
)
|
||||
description = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
extra_vars = serializers.JSONField(write_only=True, required=False)
|
||||
organization = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Organization.objects.all(),
|
||||
required=False,
|
||||
default=None,
|
||||
allow_null=True,
|
||||
write_only=True,
|
||||
help_text=_('Inherit permissions from this organization. If not provided, a organization the user is a member of will be selected automatically.'),
|
||||
)
|
||||
inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True)
|
||||
limit = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
scm_branch = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
skip_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
job_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
fields = ('name', 'jobs', 'description', 'extra_vars', 'organization', 'inventory', 'limit', 'scm_branch', 'skip_tags', 'job_tags')
|
||||
read_only_fields = ()
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
identifiers = set()
|
||||
if len(attrs['jobs']) > settings.BULK_JOB_MAX_LAUNCH:
|
||||
raise serializers.ValidationError(_('Number of requested jobs exceeds system setting BULK_JOB_MAX_LAUNCH'))
|
||||
|
||||
for node in attrs['jobs']:
|
||||
if 'identifier' in node:
|
||||
if node['identifier'] in identifiers:
|
||||
raise serializers.ValidationError(_(f"Identifier {node['identifier']} not unique"))
|
||||
identifiers.add(node['identifier'])
|
||||
else:
|
||||
node['identifier'] = str(uuid4())
|
||||
|
||||
requested_ujts = {j['unified_job_template'] for j in attrs['jobs']}
|
||||
requested_use_inventories = {job['inventory'] for job in attrs['jobs'] if 'inventory' in job}
|
||||
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
|
||||
requested_use_credentials = set()
|
||||
requested_use_labels = set()
|
||||
# requested_use_instance_groups = set()
|
||||
for job in attrs['jobs']:
|
||||
for cred in job.get('credentials', []):
|
||||
requested_use_credentials.add(cred)
|
||||
for label in job.get('labels', []):
|
||||
requested_use_labels.add(label)
|
||||
# for instance_group in job.get('instance_groups', []):
|
||||
# requested_use_instance_groups.add(instance_group)
|
||||
|
||||
key_to_obj_map = {
|
||||
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
|
||||
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
|
||||
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
|
||||
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
|
||||
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
|
||||
}
|
||||
|
||||
ujts = {}
|
||||
for ujt in key_to_obj_map['unified_job_template'].values():
|
||||
ujts.setdefault(type(ujt), [])
|
||||
ujts[type(ujt)].append(ujt)
|
||||
|
||||
unallowed_types = set(ujts.keys()) - set([JobTemplate, Project, InventorySource, WorkflowJobTemplate])
|
||||
if unallowed_types:
|
||||
type_names = ' '.join([cls._meta.verbose_name.title() for cls in unallowed_types])
|
||||
raise serializers.ValidationError(_("Template types {type_names} not allowed in bulk jobs").format(type_names=type_names))
|
||||
|
||||
for model, obj_list in ujts.items():
|
||||
role_field = 'execute_role' if issubclass(model, (JobTemplate, WorkflowJobTemplate)) else 'update_role'
|
||||
self.check_list_permission(model, set([obj.id for obj in obj_list]), role_field)
|
||||
|
||||
self.check_organization_permission(attrs, request)
|
||||
|
||||
if 'inventory' in attrs:
|
||||
requested_use_inventories.add(attrs['inventory'].id)
|
||||
|
||||
self.check_list_permission(Inventory, requested_use_inventories, 'use_role')
|
||||
|
||||
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
|
||||
self.check_list_permission(Label, requested_use_labels)
|
||||
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
|
||||
|
||||
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
|
||||
|
||||
attrs['jobs'] = jobs_object
|
||||
if 'extra_vars' in attrs:
|
||||
extra_vars_dict = parse_yaml_or_json(attrs['extra_vars'])
|
||||
attrs['extra_vars'] = json.dumps(extra_vars_dict)
|
||||
attrs = super().validate(attrs)
|
||||
return attrs
|
||||
|
||||
def check_list_permission(self, model, id_list, role_field=None):
|
||||
if not id_list:
|
||||
return
|
||||
user = self.context['request'].user
|
||||
if role_field is None: # implies "read" level permission is required
|
||||
access_qs = user.get_queryset(model)
|
||||
else:
|
||||
access_qs = model.accessible_objects(user, role_field)
|
||||
|
||||
not_allowed = set(id_list) - set(access_qs.filter(id__in=id_list).values_list('id', flat=True))
|
||||
if not_allowed:
|
||||
raise serializers.ValidationError(
|
||||
_("{model_name} {not_allowed} not found or you don't have permissions to access it").format(
|
||||
model_name=model._meta.verbose_name_plural.title(), not_allowed=not_allowed
|
||||
)
|
||||
)
|
||||
|
||||
def create(self, validated_data):
|
||||
request = self.context.get('request', None)
|
||||
launch_user = request.user if request else None
|
||||
job_node_data = validated_data.pop('jobs')
|
||||
wfj_deferred_attr_names = ('skip_tags', 'limit', 'job_tags')
|
||||
wfj_deferred_vals = {}
|
||||
for item in wfj_deferred_attr_names:
|
||||
wfj_deferred_vals[item] = validated_data.pop(item, None)
|
||||
|
||||
wfj = WorkflowJob.objects.create(**validated_data, is_bulk_job=True, launch_type='manual', created_by=launch_user)
|
||||
for key, val in wfj_deferred_vals.items():
|
||||
if val:
|
||||
setattr(wfj, key, val)
|
||||
nodes = []
|
||||
node_m2m_objects = {}
|
||||
node_m2m_object_types_to_through_model = {
|
||||
'credentials': WorkflowJobNode.credentials.through,
|
||||
'labels': WorkflowJobNode.labels.through,
|
||||
# 'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
}
|
||||
node_deferred_attr_names = (
|
||||
'limit',
|
||||
'scm_branch',
|
||||
'verbosity',
|
||||
'forks',
|
||||
'diff_mode',
|
||||
'job_tags',
|
||||
'job_type',
|
||||
'skip_tags',
|
||||
'job_slice_count',
|
||||
'timeout',
|
||||
)
|
||||
node_deferred_attrs = {}
|
||||
for node_attrs in job_node_data:
|
||||
# we need to add any m2m objects after creation via the through model
|
||||
node_m2m_objects[node_attrs['identifier']] = {}
|
||||
node_deferred_attrs[node_attrs['identifier']] = {}
|
||||
for item in node_m2m_object_types_to_through_model.keys():
|
||||
if item in node_attrs:
|
||||
node_m2m_objects[node_attrs['identifier']][item] = node_attrs.pop(item)
|
||||
|
||||
# Some attributes are not accepted by WorkflowJobNode __init__, we have to set them after
|
||||
for item in node_deferred_attr_names:
|
||||
if item in node_attrs:
|
||||
node_deferred_attrs[node_attrs['identifier']][item] = node_attrs.pop(item)
|
||||
|
||||
# Create the node objects
|
||||
node_obj = WorkflowJobNode(workflow_job=wfj, created=wfj.created, modified=wfj.modified, **node_attrs)
|
||||
|
||||
# we can set the deferred attrs now
|
||||
for item, value in node_deferred_attrs[node_attrs['identifier']].items():
|
||||
setattr(node_obj, item, value)
|
||||
|
||||
# the node is now ready to be bulk created
|
||||
nodes.append(node_obj)
|
||||
|
||||
# we'll need this later when we do the m2m through model bulk create
|
||||
node_m2m_objects[node_attrs['identifier']]['node'] = node_obj
|
||||
|
||||
WorkflowJobNode.objects.bulk_create(nodes)
|
||||
|
||||
# Deal with the m2m objects we have to create once the node exists
|
||||
for field_name, through_model in node_m2m_object_types_to_through_model.items():
|
||||
through_model_objects = []
|
||||
for node_identifier in node_m2m_objects.keys():
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'credentials':
|
||||
for cred in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(credential=cred, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
|
||||
for label in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
|
||||
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
|
||||
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if through_model_objects:
|
||||
through_model.objects.bulk_create(through_model_objects)
|
||||
|
||||
wfj.save()
|
||||
wfj.signal_start()
|
||||
|
||||
return WorkflowJobSerializer().to_representation(wfj)
|
||||
|
||||
def check_organization_permission(self, attrs, request):
|
||||
# validate Organization
|
||||
# - If the orgs is not set, set it to the org of the launching user
|
||||
# - If the user is part of multiple orgs, throw a validation error saying user is part of multiple orgs, please provide one
|
||||
if not request.user.is_superuser:
|
||||
read_org_qs = Organization.accessible_objects(request.user, 'member_role')
|
||||
if 'organization' not in attrs or attrs['organization'] == None or attrs['organization'] == '':
|
||||
read_org_ct = read_org_qs.count()
|
||||
if read_org_ct == 1:
|
||||
attrs['organization'] = read_org_qs.first()
|
||||
elif read_org_ct > 1:
|
||||
raise serializers.ValidationError("User has permission to multiple Organizations, please set one of them in the request")
|
||||
else:
|
||||
raise serializers.ValidationError("User not part of any organization, please assign an organization to assign to the bulk job")
|
||||
else:
|
||||
allowed_orgs = set(read_org_qs.values_list('id', flat=True))
|
||||
requested_org = attrs['organization']
|
||||
if requested_org.id not in allowed_orgs:
|
||||
raise ValidationError(_(f"Organization {requested_org.id} not found or you don't have permissions to access it"))
|
||||
|
||||
def get_objectified_jobs(self, attrs, key_to_obj_map):
|
||||
objectified_jobs = []
|
||||
# This loop is generalized so we should only have to add related items to the key_to_obj_map
|
||||
for job in attrs['jobs']:
|
||||
objectified_job = {}
|
||||
for key, value in job.items():
|
||||
if key in key_to_obj_map:
|
||||
if isinstance(value, int):
|
||||
objectified_job[key] = key_to_obj_map[key][value]
|
||||
elif isinstance(value, list):
|
||||
objectified_job[key] = [key_to_obj_map[key][item] for item in value]
|
||||
else:
|
||||
objectified_job[key] = value
|
||||
objectified_jobs.append(objectified_job)
|
||||
return objectified_jobs
|
||||
|
||||
|
||||
class NotificationTemplateSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy']
|
||||
capabilities_prefetch = [{'copy': 'organization.admin'}]
|
||||
@@ -4988,6 +5523,32 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class HostMetricSerializer(BaseSerializer):
|
||||
show_capabilities = ['delete']
|
||||
|
||||
class Meta:
|
||||
model = HostMetric
|
||||
fields = (
|
||||
"id",
|
||||
"hostname",
|
||||
"url",
|
||||
"first_automation",
|
||||
"last_automation",
|
||||
"last_deleted",
|
||||
"automated_counter",
|
||||
"deleted_counter",
|
||||
"deleted",
|
||||
"used_in_inventories",
|
||||
)
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlySerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = HostMetricSummaryMonthly
|
||||
read_only_fields = ("id", "date", "license_consumed", "license_capacity", "hosts_added", "hosts_deleted", "indirectly_managed_hosts")
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capacity = serializers.SerializerMethodField()
|
||||
@@ -5073,6 +5634,8 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
res = super(InstanceGroupSerializer, self).get_related(obj)
|
||||
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
|
||||
res['access_list'] = self.reverse('api:instance_group_access_list', kwargs={'pk': obj.pk})
|
||||
res['object_roles'] = self.reverse('api:instance_group_object_role_list', kwargs={'pk': obj.pk})
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
Version 1 of the Ansible Tower REST API.
|
||||
|
||||
Make a GET request to this resource to obtain a list of all child resources
|
||||
available via the API.
|
||||
@@ -7,10 +7,12 @@ the following fields (some fields may not be visible to all users):
|
||||
* `project_base_dir`: Path on the server where projects and playbooks are \
|
||||
stored.
|
||||
* `project_local_paths`: List of directories beneath `project_base_dir` to
|
||||
use when creating/editing a project.
|
||||
use when creating/editing a manual project.
|
||||
* `time_zone`: The configured time zone for the server.
|
||||
* `license_info`: Information about the current license.
|
||||
* `version`: Version of Ansible Tower package installed.
|
||||
* `custom_virtualenvs`: Deprecated venv locations from before migration to
|
||||
execution environments. Export tooling is in `awx-manage` commands.
|
||||
* `eula`: The current End-User License Agreement
|
||||
{% endifmeth %}
|
||||
|
||||
41
awx/api/templates/api/bulk_host_create_view.md
Normal file
41
awx/api/templates/api/bulk_host_create_view.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Bulk Host Create
|
||||
|
||||
This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"inventory": 1,
|
||||
"hosts": [
|
||||
{"name": "example1.com", "variables": "ansible_connection: local"},
|
||||
{"name": "example2.com"}
|
||||
]
|
||||
}
|
||||
|
||||
Return data:
|
||||
|
||||
{
|
||||
"url": "/api/v2/inventories/3/hosts/",
|
||||
"hosts": [
|
||||
{
|
||||
"name": "example1.com",
|
||||
"enabled": true,
|
||||
"instance_id": "",
|
||||
"description": "",
|
||||
"variables": "ansible_connection: local",
|
||||
"id": 1255,
|
||||
"url": "/api/v2/hosts/1255/",
|
||||
"inventory": "/api/v2/inventories/3/"
|
||||
},
|
||||
{
|
||||
"name": "example2.com",
|
||||
"enabled": true,
|
||||
"instance_id": "",
|
||||
"description": "",
|
||||
"variables": "",
|
||||
"id": 1256,
|
||||
"url": "/api/v2/hosts/1256/",
|
||||
"inventory": "/api/v2/inventories/3/"
|
||||
}
|
||||
]
|
||||
}
|
||||
13
awx/api/templates/api/bulk_job_launch_view.md
Normal file
13
awx/api/templates/api/bulk_job_launch_view.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Bulk Job Launch
|
||||
|
||||
This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"name": "my bulk job",
|
||||
"jobs": [
|
||||
{"unified_job_template": 7, "inventory": 2},
|
||||
{"unified_job_template": 7, "credentials": [3]}
|
||||
]
|
||||
}
|
||||
3
awx/api/templates/api/bulk_view.md
Normal file
3
awx/api/templates/api/bulk_view.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Bulk Actions
|
||||
|
||||
This endpoint lists available bulk action APIs.
|
||||
@@ -1,11 +0,0 @@
|
||||
# List Fact Scans for a Host Specific Host Scan
|
||||
|
||||
Make a GET request to this resource to retrieve system tracking data for a particular scan
|
||||
|
||||
You may filter by datetime:
|
||||
|
||||
`?datetime=2015-06-01`
|
||||
|
||||
and module
|
||||
|
||||
`?datetime=2015-06-01&module=ansible`
|
||||
@@ -1,11 +0,0 @@
|
||||
# List Fact Scans for a Host by Module and Date
|
||||
|
||||
Make a GET request to this resource to retrieve system tracking scans by module and date/time
|
||||
|
||||
You may filter scan runs using the `from` and `to` properties:
|
||||
|
||||
`?from=2015-06-01%2012:00:00&to=2015-06-03`
|
||||
|
||||
You may also filter by module
|
||||
|
||||
`?module=packages`
|
||||
@@ -1 +0,0 @@
|
||||
# List Red Hat Insights for a Host
|
||||
18
awx/api/templates/api/host_metric_detail.md
Normal file
18
awx/api/templates/api/host_metric_detail.md
Normal file
@@ -0,0 +1,18 @@
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
record containing the following fields:
|
||||
|
||||
{% include "api/_result_fields_common.md" %}
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth DELETE %}
|
||||
# Delete {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
|
||||
|
||||
A soft deletion will mark the `deleted` field as true and exclude the host
|
||||
metric from license calculations.
|
||||
This may be undone later if the same hostname is automated again afterwards.
|
||||
{% endifmeth %}
|
||||
@@ -1,21 +0,0 @@
|
||||
{% ifmeth GET %}
|
||||
# Determine if a Job can be started
|
||||
|
||||
Make a GET request to this resource to determine if the job can be started and
|
||||
whether any passwords are required to start the job. The response will include
|
||||
the following fields:
|
||||
|
||||
* `can_start`: Flag indicating if this job can be started (boolean, read-only)
|
||||
* `passwords_needed_to_start`: Password names required to start the job (array,
|
||||
read-only)
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth POST %}
|
||||
# Start a Job
|
||||
Make a POST request to this resource to start the job. If any passwords are
|
||||
required, they must be passed via POST data.
|
||||
|
||||
If successful, the response status code will be 202. If any required passwords
|
||||
are not provided, a 400 status code will be returned. If the job cannot be
|
||||
started, a 405 status code will be returned.
|
||||
{% endifmeth %}
|
||||
@@ -2,6 +2,7 @@ receptor_user: awx
|
||||
receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
|
||||
31
awx/api/urls/analytics.py
Normal file
31
awx/api/urls/analytics.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
import awx.api.views.analytics as analytics
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
|
||||
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
|
||||
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
|
||||
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
|
||||
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
|
||||
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
|
||||
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
|
||||
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
|
||||
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
|
||||
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
|
||||
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
|
||||
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
|
||||
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
|
||||
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
|
||||
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
|
||||
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
|
||||
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
|
||||
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
|
||||
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
10
awx/api/urls/host_metric.py
Normal file
10
awx/api/urls/host_metric.py
Normal file
@@ -0,0 +1,10 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import HostMetricList, HostMetricDetail
|
||||
|
||||
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -3,7 +3,14 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import InstanceGroupList, InstanceGroupDetail, InstanceGroupUnifiedJobsList, InstanceGroupInstanceList
|
||||
from awx.api.views import (
|
||||
InstanceGroupList,
|
||||
InstanceGroupDetail,
|
||||
InstanceGroupUnifiedJobsList,
|
||||
InstanceGroupInstanceList,
|
||||
InstanceGroupAccessList,
|
||||
InstanceGroupObjectRolesList,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
@@ -11,6 +18,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', InstanceGroupDetail.as_view(), name='instance_group_detail'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceGroupUnifiedJobsList.as_view(), name='instance_group_unified_jobs_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instances/$', InstanceGroupInstanceList.as_view(), name='instance_group_instance_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/access_list/$', InstanceGroupAccessList.as_view(), name='instance_group_access_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/object_roles/$', InstanceGroupObjectRolesList.as_view(), name='instance_group_object_role_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -6,7 +6,10 @@ from django.urls import re_path
|
||||
from awx.api.views.inventory import (
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
ConstructedInventoryDetail,
|
||||
ConstructedInventoryList,
|
||||
InventoryActivityStreamList,
|
||||
InventoryInputInventoriesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
@@ -37,6 +40,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
|
||||
@@ -48,4 +52,10 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
# Constructed inventory special views
|
||||
constructed_inventory_urls = [
|
||||
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls', 'constructed_inventory_urls']
|
||||
|
||||
@@ -30,19 +30,29 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
BulkView,
|
||||
BulkHostCreateView,
|
||||
BulkJobLaunchView,
|
||||
)
|
||||
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
from .project_update import urls as project_update_urls
|
||||
from .inventory import urls as inventory_urls
|
||||
from .inventory import urls as inventory_urls, constructed_inventory_urls
|
||||
from .execution_environments import urls as execution_environment_urls
|
||||
from .team import urls as team_urls
|
||||
from .host import urls as host_urls
|
||||
from .host_metric import urls as host_metric_urls
|
||||
from .group import urls as group_urls
|
||||
from .inventory_source import urls as inventory_source_urls
|
||||
from .inventory_update import urls as inventory_update_urls
|
||||
@@ -73,7 +83,7 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
|
||||
from .analytics import urls as analytics_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -110,7 +120,11 @@ v2_urls = [
|
||||
re_path(r'^project_updates/', include(project_update_urls)),
|
||||
re_path(r'^teams/', include(team_urls)),
|
||||
re_path(r'^inventories/', include(inventory_urls)),
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@@ -134,8 +148,12 @@ v2_urls = [
|
||||
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
|
||||
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ from collections import OrderedDict
|
||||
|
||||
from urllib3.exceptions import ConnectTimeoutError
|
||||
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
@@ -30,7 +29,7 @@ from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import HttpResponse
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
@@ -63,7 +62,7 @@ from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
@@ -466,6 +465,23 @@ class InstanceGroupUnifiedJobsList(SubListAPIView):
|
||||
relationship = "unifiedjob_set"
|
||||
|
||||
|
||||
class InstanceGroupAccessList(ResourceAccessList):
|
||||
model = models.User # needs to be User for AccessLists
|
||||
parent_model = models.InstanceGroup
|
||||
|
||||
|
||||
class InstanceGroupObjectRolesList(SubListAPIView):
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.InstanceGroup
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetachAPIView):
|
||||
name = _("Instance Group's Instances")
|
||||
model = models.Instance
|
||||
@@ -778,13 +794,7 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
@@ -1531,6 +1541,41 @@ class HostRelatedSearchMixin(object):
|
||||
return ret
|
||||
|
||||
|
||||
class HostMetricList(ListAPIView):
|
||||
name = _("Host Metrics List")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('hostname', 'deleted')
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
name = _("Host Metric Detail")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
self.get_object().soft_delete()
|
||||
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1559,6 +1604,8 @@ class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
def delete(self, request, *args, **kwargs):
|
||||
if self.get_object().inventory.pending_deletion:
|
||||
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
if self.get_object().inventory.kind == 'constructed':
|
||||
return Response({"error": _("Delete constructed inventory hosts from input inventory.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
return super(HostDetail, self).delete(request, *args, **kwargs)
|
||||
|
||||
|
||||
@@ -1566,6 +1613,14 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
model = models.Host
|
||||
serializer_class = serializers.AnsibleFactsSerializer
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.inventory.kind == 'constructed':
|
||||
# If this is a constructed inventory host, it is not the source of truth about facts
|
||||
# redirect to the original input inventory host instead
|
||||
return HttpResponseRedirect(reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id}, request=self.request))
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
model = models.Host
|
||||
@@ -1573,13 +1628,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
parent_model = models.Inventory
|
||||
relationship = 'hosts'
|
||||
parent_key = 'inventory'
|
||||
|
||||
def get_queryset(self):
|
||||
inventory = self.get_parent_object()
|
||||
qs = getattrd(inventory, self.relationship).all()
|
||||
# Apply queryset optimizations
|
||||
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
|
||||
return qs
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -2520,16 +2569,7 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'credentials'
|
||||
|
||||
def get_queryset(self):
|
||||
# Return the full list of credentials
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
sublist_qs = getattrd(parent, self.relationship)
|
||||
sublist_qs = sublist_qs.prefetch_related(
|
||||
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
|
||||
)
|
||||
return sublist_qs
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
@@ -2631,7 +2671,10 @@ class JobTemplateCallback(GenericAPIView):
|
||||
# Permission class should have already validated host_config_key.
|
||||
job_template = self.get_object()
|
||||
# Attempt to find matching hosts based on remote address.
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
if job_template.inventory:
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
else:
|
||||
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
# If the host is not found, update the inventory before trying to
|
||||
# match again.
|
||||
inventory_sources_already_updated = []
|
||||
@@ -2716,6 +2759,7 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class JobTemplateAccessList(ResourceAccessList):
|
||||
@@ -2806,16 +2850,7 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
|
||||
relationship = ''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
'''
|
||||
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
|
||||
'relationship'
|
||||
'''
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if created:
|
||||
@@ -2890,14 +2925,7 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = ''
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
@@ -3076,9 +3104,8 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
relationship = 'workflow_job_template_nodes'
|
||||
parent_key = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3170,9 +3197,8 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
relationship = 'workflow_job_nodes'
|
||||
parent_key = 'workflow_job'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
@@ -3486,11 +3512,7 @@ class BaseJobHostSummariesList(SubListAPIView):
|
||||
relationship = 'job_host_summaries'
|
||||
name = _('Job Host Summaries List')
|
||||
search_fields = ('host_name',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostJobHostSummariesList(BaseJobHostSummariesList):
|
||||
|
||||
296
awx/api/views/analytics.py
Normal file
296
awx/api/views/analytics.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import requests
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils import translation
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
|
||||
AWX_ANALYTICS_API_PREFIX = 'analytics'
|
||||
|
||||
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
|
||||
ERROR_MISSING_URL = "missing-url"
|
||||
ERROR_MISSING_USER = "missing-user"
|
||||
ERROR_MISSING_PASSWORD = "missing-password"
|
||||
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
|
||||
ERROR_NOT_FOUND = "not-found"
|
||||
ERROR_UNAUTHORIZED = "unauthorized"
|
||||
ERROR_UNKNOWN = "unknown"
|
||||
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
|
||||
|
||||
logger = logging.getLogger('awx.api.views.analytics')
|
||||
|
||||
|
||||
class MissingSettings(Exception):
|
||||
"""Settings are not correct Exception"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GetNotAllowedMixin(object):
|
||||
def get(self, request, format=None):
|
||||
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
|
||||
|
||||
class AnalyticsRootView(APIView):
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
name = _('Automation Analytics')
|
||||
swagger_topic = 'Automation Analytics'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class AnalyticsGenericView(APIView):
|
||||
"""
|
||||
Example:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
params = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_by': 'name:asc',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_options': 'name',
|
||||
'sort_order': 'asc',
|
||||
'tags': [],
|
||||
'slug': [],
|
||||
'name': [],
|
||||
'description': '',
|
||||
}
|
||||
|
||||
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
|
||||
headers=headers, json=json_data)
|
||||
|
||||
return Response(response.json(), status=response.status_code)
|
||||
"""
|
||||
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
|
||||
@staticmethod
|
||||
def _request_headers(request):
|
||||
headers = {}
|
||||
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
|
||||
if request.headers.get(header, None):
|
||||
headers[header] = request.headers.get(header)
|
||||
headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _get_analytics_path(request_path):
|
||||
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
|
||||
path_specific = parts[-1]
|
||||
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
|
||||
|
||||
def _get_analytics_url(self, request_path):
|
||||
analytics_path = self._get_analytics_path(request_path)
|
||||
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
|
||||
if not url:
|
||||
raise MissingSettings(ERROR_MISSING_URL)
|
||||
url_parts = urlparse.urlsplit(url)
|
||||
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
|
||||
return analytics_url
|
||||
|
||||
@staticmethod
|
||||
def _get_setting(setting_name, default, error_message):
|
||||
setting = getattr(settings, setting_name, default)
|
||||
if not setting:
|
||||
raise MissingSettings(error_message)
|
||||
return setting
|
||||
|
||||
@staticmethod
|
||||
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
|
||||
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
|
||||
if message:
|
||||
text["error"]["message"] = message
|
||||
return Response(text, status=status_code)
|
||||
|
||||
def _error_response_404(self, response):
|
||||
try:
|
||||
json_response = response.json()
|
||||
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
|
||||
message = json_response.get('error', None)
|
||||
if message:
|
||||
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
|
||||
|
||||
# Standard 404 problem => HTTP 404
|
||||
message = json_response.get('detail', None) or response.text
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
# Unexpected text => still HTTP 404
|
||||
message = response.text
|
||||
|
||||
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@staticmethod
|
||||
def _update_response_links(json_response):
|
||||
if not json_response.get('links', None):
|
||||
return
|
||||
|
||||
for key, value in json_response['links'].items():
|
||||
if value:
|
||||
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
|
||||
|
||||
def _forward_response(self, response):
|
||||
try:
|
||||
content_type = response.headers.get('content-type', '')
|
||||
if content_type.find('application/json') != -1:
|
||||
json_response = response.json()
|
||||
self._update_response_links(json_response)
|
||||
|
||||
return Response(json_response, status=response.status_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Response error: {e}")
|
||||
|
||||
return Response(response.content, status=response.status_code)
|
||||
|
||||
def _send_to_analytics(self, request, method):
|
||||
try:
|
||||
headers = self._request_headers(request)
|
||||
|
||||
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
|
||||
url = self._get_analytics_url(request.path)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
else:
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
auth=(rh_user, rh_password),
|
||||
verify=settings.INSIGHTS_CERT_PATH,
|
||||
params=request.query_params,
|
||||
headers=headers,
|
||||
json=request.data,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
#
|
||||
elif response.status_code == status.HTTP_404_NOT_FOUND:
|
||||
return self._error_response_404(response)
|
||||
#
|
||||
# Success or not a 401/404 errors are just forwarded
|
||||
#
|
||||
else:
|
||||
return self._forward_response(response)
|
||||
|
||||
except MissingSettings as e:
|
||||
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
|
||||
return self._error_response(e.args[0], remote=False)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Analytics API: Request error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class AnalyticsGenericListView(AnalyticsGenericView):
|
||||
def get(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsGenericDetailView(AnalyticsGenericView):
|
||||
def get(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsAuthorizedView(AnalyticsGenericListView):
|
||||
name = _("Authorized")
|
||||
|
||||
|
||||
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Reports")
|
||||
swagger_topic = "Automation Analytics"
|
||||
|
||||
|
||||
class AnalyticsReportDetail(AnalyticsGenericDetailView):
|
||||
name = _("Report")
|
||||
|
||||
|
||||
class AnalyticsReportOptionsList(AnalyticsGenericListView):
|
||||
name = _("Report Options")
|
||||
|
||||
|
||||
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Adoption Rate")
|
||||
|
||||
|
||||
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Event Explorer")
|
||||
|
||||
|
||||
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Host Explorer")
|
||||
|
||||
|
||||
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Job Explorer")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Templates")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Template For Hosts")
|
||||
|
||||
|
||||
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("ROI Templates")
|
||||
69
awx/api/views/bulk.py
Normal file
69
awx/api/views/bulk.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.reverse import reverse
|
||||
from rest_framework import status
|
||||
from rest_framework.response import Response
|
||||
|
||||
from awx.main.models import UnifiedJob, Host
|
||||
from awx.api.generics import (
|
||||
GenericAPIView,
|
||||
APIView,
|
||||
)
|
||||
from awx.api import (
|
||||
serializers,
|
||||
renderers,
|
||||
)
|
||||
|
||||
|
||||
class BulkView(APIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
renderer_classes = [
|
||||
renderers.BrowsableAPIRenderer,
|
||||
JSONRenderer,
|
||||
]
|
||||
allowed_methods = ['GET', 'OPTIONS']
|
||||
|
||||
def get(self, request, format=None):
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['host_create'] = reverse('api:bulk_host_create', request=request)
|
||||
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
class BulkJobLaunchView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = UnifiedJob
|
||||
serializer_class = serializers.BulkJobLaunchSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
data = OrderedDict()
|
||||
data['detail'] = "Specify a list of unified job templates to launch alongside their launchtime parameters"
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
bulkjob_serializer = serializers.BulkJobLaunchSerializer(data=request.data, context={'request': request})
|
||||
if bulkjob_serializer.is_valid():
|
||||
result = bulkjob_serializer.create(bulkjob_serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(bulkjob_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class BulkHostCreateView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = Host
|
||||
serializer_class = serializers.BulkHostCreateSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
return Response({"detail": "Bulk create hosts with this endpoint"}, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
serializer = serializers.BulkHostCreateSerializer(data=request.data, context={'request': request})
|
||||
if serializer.is_valid():
|
||||
result = serializer.create(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
@@ -14,6 +14,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import serializers
|
||||
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
@@ -31,6 +32,7 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
ConstructedInventorySerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
@@ -79,7 +81,9 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
|
||||
# Do not allow changes to an Inventory kind.
|
||||
if kind is not None and obj.kind != kind:
|
||||
return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return Response(
|
||||
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
)
|
||||
return super(InventoryDetail, self).update(request, *args, **kwargs)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@@ -94,6 +98,29 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class ConstructedInventoryDetail(InventoryDetail):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
|
||||
class ConstructedInventoryList(InventoryList):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
r = super().get_queryset()
|
||||
return r.filter(kind='constructed')
|
||||
|
||||
|
||||
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'input_inventories'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind == 'constructed':
|
||||
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
|
||||
|
||||
|
||||
class InventoryActivityStreamList(SubListAPIView):
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
|
||||
@@ -61,12 +61,6 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
@@ -207,6 +201,7 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
@@ -214,6 +209,7 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
|
||||
@@ -98,10 +98,14 @@ class ApiVersionRootView(APIView):
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@@ -121,6 +125,8 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -271,6 +277,9 @@ class ApiV2ConfigView(APIView):
|
||||
|
||||
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
|
||||
|
||||
# Guarding against settings.UI_NEXT being set to a non-boolean value
|
||||
ui_next_state = settings.UI_NEXT if settings.UI_NEXT in (True, False) else False
|
||||
|
||||
data = dict(
|
||||
time_zone=settings.TIME_ZONE,
|
||||
license_info=license_data,
|
||||
@@ -279,6 +288,7 @@ class ApiV2ConfigView(APIView):
|
||||
analytics_status=pendo_state,
|
||||
analytics_collectors=all_collectors(),
|
||||
become_methods=PRIVILEGE_ESCALATION_METHODS,
|
||||
ui_next=ui_next_state,
|
||||
)
|
||||
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
|
||||
@@ -5,11 +5,13 @@ import threading
|
||||
import time
|
||||
import os
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
@@ -157,7 +159,7 @@ class EncryptedCacheProxy(object):
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(TransientSetting(pk=obj_id, value=value), 'value')
|
||||
@@ -166,11 +168,6 @@ class EncryptedCacheProxy(object):
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
@@ -186,6 +183,22 @@ def get_settings_to_cache(registry):
|
||||
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
|
||||
|
||||
|
||||
# Will first attempt to get the setting from the database in synchronous mode.
|
||||
# If call from async context, it will attempt to get the setting from the database in a thread.
|
||||
def _get_setting_from_db(registry, key):
|
||||
def get_settings_from_db_sync(registry, key):
|
||||
field = registry.get_setting_field(key)
|
||||
if not field.read_only or key == 'INSTALL_UUID':
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
try:
|
||||
return get_settings_from_db_sync(registry, key)
|
||||
except SynchronousOnlyOperation:
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(get_settings_from_db_sync, registry, key)
|
||||
return future.result()
|
||||
|
||||
|
||||
def get_cache_value(value):
|
||||
"""Returns the proper special cache setting for a value
|
||||
based on instance type.
|
||||
@@ -345,7 +358,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting_id = None
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
setting = _get_setting_from_db(self.registry, name)
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
|
||||
@@ -94,9 +94,7 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
@@ -112,7 +110,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
# sure that the _Forbidden validator doesn't get used for the
|
||||
# fields. See also https://github.com/ansible/awx/issues/4099.
|
||||
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request(
|
||||
'patch',
|
||||
@@ -126,7 +124,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
@@ -136,7 +134,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
@@ -155,16 +153,14 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
|
||||
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert not response.data['FOO_BAR']
|
||||
@@ -173,7 +169,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
|
||||
@@ -26,10 +26,11 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
@@ -118,7 +119,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@@ -133,7 +137,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
@@ -588,17 +588,39 @@ class InstanceAccess(BaseAccess):
|
||||
|
||||
|
||||
class InstanceGroupAccess(BaseAccess):
|
||||
"""
|
||||
I can see Instance Groups when I am:
|
||||
- a superuser(system administrator)
|
||||
- at least read_role on the instance group
|
||||
I can edit Instance Groups when I am:
|
||||
- a superuser
|
||||
- admin role on the Instance group
|
||||
I can add/delete Instance Groups:
|
||||
- a superuser(system administrator)
|
||||
I can use Instance Groups when I have:
|
||||
- use_role on the instance group
|
||||
"""
|
||||
|
||||
model = InstanceGroup
|
||||
prefetch_related = ('instances',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return InstanceGroup.objects.filter(organization__in=Organization.accessible_pk_qs(self.user, 'admin_role')).distinct()
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
|
||||
@check_superuser
|
||||
def can_use(self, obj):
|
||||
return self.user in obj.use_role
|
||||
|
||||
def can_add(self, data):
|
||||
return self.user.is_superuser
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser
|
||||
return self.can_admin(obj)
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj):
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
@@ -845,7 +867,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
if relationship == "instance_groups":
|
||||
if self.user.is_superuser:
|
||||
if self.user in obj.admin_role and self.user in sub_obj.use_role:
|
||||
return True
|
||||
return False
|
||||
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -934,7 +956,7 @@ class InventoryAccess(BaseAccess):
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if relationship == "instance_groups":
|
||||
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role:
|
||||
if self.user in sub_obj.use_role and self.user in obj.admin_role:
|
||||
return True
|
||||
return False
|
||||
return super(InventoryAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -1671,11 +1693,12 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
return self.user.is_superuser or self.user in obj.admin_role
|
||||
|
||||
@check_superuser
|
||||
# object here is the job template. sub_object here is what is being attached
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == "instance_groups":
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||
return self.user in sub_obj.use_role and self.user in obj.admin_role
|
||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
@check_superuser
|
||||
@@ -1852,8 +1875,6 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
def _related_filtered_queryset(self, cls):
|
||||
if cls is Label:
|
||||
return LabelAccess(self.user).filtered_queryset()
|
||||
elif cls is InstanceGroup:
|
||||
return InstanceGroupAccess(self.user).filtered_queryset()
|
||||
else:
|
||||
return cls._accessible_pk_qs(cls, self.user, 'use_role')
|
||||
|
||||
@@ -1865,6 +1886,7 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data, template=None):
|
||||
# WARNING: duplicated with BulkJobLaunchSerializer, check when changing permission levels
|
||||
# This is a special case, we don't check related many-to-many elsewhere
|
||||
# launch RBAC checks use this
|
||||
if 'reference_obj' in data:
|
||||
@@ -1997,7 +2019,16 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
return self.model.objects.filter(
|
||||
Q(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(workflow_job__organization__in=Organization.objects.filter(Q(admin_role__members=self.user)))
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
|
||||
if obj.workflow_job.is_bulk_job and obj.workflow_job.created_by_id == self.user.id:
|
||||
return True
|
||||
return super().can_read(obj)
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
@@ -2123,7 +2154,16 @@ class WorkflowJobAccess(BaseAccess):
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
return WorkflowJob.objects.filter(
|
||||
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
|
||||
if obj.is_bulk_job and obj.created_by_id == self.user.id:
|
||||
return True
|
||||
return super().can_read(obj)
|
||||
|
||||
def can_add(self, data):
|
||||
# Old add-start system for launching jobs is being depreciated, and
|
||||
@@ -2912,3 +2952,19 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
|
||||
def optimize_queryset(queryset):
|
||||
"""
|
||||
A utility method in case you already have a queryset and just want to
|
||||
apply the standard optimizations for that model.
|
||||
In other words, use if you do not want to start from filtered_queryset for some reason.
|
||||
"""
|
||||
if not queryset.model or queryset.model not in access_registry:
|
||||
return queryset
|
||||
access_class = access_registry[queryset.model]
|
||||
if access_class.select_related:
|
||||
queryset = queryset.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
queryset = queryset.prefetch_related(*access_class.prefetch_related)
|
||||
return queryset
|
||||
|
||||
@@ -4,11 +4,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
|
||||
@@ -65,7 +65,7 @@ class FixedSlidingWindow:
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager:
|
||||
class RelayWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
@@ -74,7 +74,7 @@ class BroadcastWebsocketStatsManager:
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
|
||||
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
@@ -107,7 +107,7 @@ class BroadcastWebsocketStatsManager:
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats:
|
||||
class RelayWebsocketStats:
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, Min
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather):
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@@ -69,6 +69,54 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def host_metric_slicing(key, since, until, last_gather, **kwargs):
|
||||
"""
|
||||
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
|
||||
"""
|
||||
from awx.main.models.inventory import HostMetric
|
||||
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# Check if full sync should be done
|
||||
full_sync_enabled = kwargs.get('full_sync_enabled', False)
|
||||
last_entry = None
|
||||
if not full_sync_enabled:
|
||||
#
|
||||
# If not, try incremental sync first
|
||||
#
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = last_entries.get(key)
|
||||
if not last_entry:
|
||||
#
|
||||
# If not done before, switch to full sync
|
||||
#
|
||||
full_sync_enabled = True
|
||||
|
||||
if full_sync_enabled:
|
||||
#
|
||||
# Find the lowest date for full sync
|
||||
#
|
||||
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
|
||||
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
|
||||
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
|
||||
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
|
||||
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
|
||||
|
||||
if not last_entry:
|
||||
# empty table
|
||||
return []
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
end = min(start + timedelta(days=30), until)
|
||||
yield (start, end)
|
||||
start = end
|
||||
|
||||
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
@@ -83,7 +131,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
@register('config', '1.6', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -107,10 +155,13 @@ def config(since, **kwargs):
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'usage': license_info.get('usage'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'subscription_id': license_info.get('subscription_id'),
|
||||
'account_number': license_info.get('account_number'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
@@ -119,6 +170,7 @@ def config(since, **kwargs):
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@@ -233,11 +285,13 @@ def projects_by_scm_type(since, **kwargs):
|
||||
return counts
|
||||
|
||||
|
||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||
@register('instance_info', '1.3', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
|
||||
)
|
||||
for tm_instance in tm_models.instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
@@ -534,3 +588,25 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
|
||||
@register(
|
||||
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
|
||||
)
|
||||
def host_metric_table(since, full_path, until, **kwargs):
|
||||
host_metric_query = '''COPY (SELECT main_hostmetric.id,
|
||||
main_hostmetric.hostname,
|
||||
main_hostmetric.first_automation,
|
||||
main_hostmetric.last_automation,
|
||||
main_hostmetric.last_deleted,
|
||||
main_hostmetric.deleted,
|
||||
main_hostmetric.automated_counter,
|
||||
main_hostmetric.deleted_counter,
|
||||
main_hostmetric.used_in_inventories
|
||||
FROM main_hostmetric
|
||||
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
|
||||
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
|
||||
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
@@ -52,7 +52,7 @@ def all_collectors():
|
||||
}
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=None):
|
||||
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
@@ -71,6 +71,7 @@ def register(key, version, description=None, format='json', expensive=None):
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
f.__awx_full_sync_interval__ = full_sync_interval
|
||||
return f
|
||||
|
||||
return decorate
|
||||
@@ -259,10 +260,19 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# These slicer functions may return a generator. The `since` parameter is
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
#
|
||||
# Or it can force full table sync if interval is given
|
||||
kwargs = dict()
|
||||
full_sync_enabled = False
|
||||
if func.__awx_full_sync_interval__:
|
||||
last_full_sync = last_entries.get(f"{key}_full")
|
||||
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
|
||||
|
||||
kwargs['full_sync_enabled'] = full_sync_enabled
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@@ -301,6 +311,12 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
succeeded = False
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
# update full sync timestamp if successfully shipped
|
||||
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
|
||||
with disable_activity_stream():
|
||||
last_entries[f"{key}_full"] = now()
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
@@ -359,9 +375,7 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(
|
||||
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
|
||||
)
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -9,7 +9,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
@@ -264,13 +264,6 @@ class Metrics:
|
||||
data[field] = self.METRICS[field].decode(self.conn)
|
||||
return data
|
||||
|
||||
def store_metrics(self, data_json):
|
||||
# called when receiving metrics from other instances
|
||||
data = json.loads(data_json)
|
||||
if self.instance_name != data['instance']:
|
||||
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
|
||||
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
|
||||
|
||||
def should_pipe_execute(self):
|
||||
if self.metrics_have_changed is False:
|
||||
return False
|
||||
@@ -305,13 +298,15 @@ class Metrics:
|
||||
try:
|
||||
current_time = time.time()
|
||||
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
|
||||
serialized_metrics = self.serialize_local_metrics()
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
'metrics': serialized_metrics,
|
||||
}
|
||||
# store a local copy as well
|
||||
self.store_metrics(json.dumps(payload))
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
|
||||
@@ -10,7 +10,7 @@ from rest_framework import serializers
|
||||
# AWX
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@@ -282,6 +282,16 @@ register(
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RUNNER_KEEPALIVE_SECONDS',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('K8S Ansible Runner Keep-Alive Message Interval'),
|
||||
help_text=_('Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
placeholder=240, # intended to be under common 5 minute idle timeout
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_TASK_ENV',
|
||||
field_class=fields.KeyValueField,
|
||||
@@ -765,6 +775,62 @@ register(
|
||||
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_JOB_MAX_LAUNCH',
|
||||
field_class=fields.IntegerField,
|
||||
default=100,
|
||||
label=_('Max jobs to allow bulk jobs to launch'),
|
||||
help_text=_('Max jobs to allow bulk jobs to launch'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_HOST_MAX_CREATE',
|
||||
field_class=fields.IntegerField,
|
||||
default=100,
|
||||
label=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'UI_NEXT',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable Preview of New User Interface'),
|
||||
help_text=_('Enable preview of new user interface.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTION_USAGE_MODEL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[
|
||||
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
|
||||
(
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
|
||||
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
|
||||
),
|
||||
],
|
||||
default='',
|
||||
allow_blank=True,
|
||||
label=_('Defines subscription usage model and shows Host Metrics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CLEANUP_HOST_METRICS_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last cleanup date for HostMetrics'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -38,6 +38,8 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True',
|
||||
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
|
||||
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
@@ -63,7 +65,7 @@ ENV_BLOCKLIST = frozenset(
|
||||
'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST',
|
||||
'PROJECT_REVISION',
|
||||
'SUPERVISOR_WEB_CONFIG_PATH',
|
||||
'SUPERVISOR_CONFIG_PATH',
|
||||
)
|
||||
)
|
||||
|
||||
@@ -106,3 +108,9 @@ JOB_VARIABLE_PREFIXES = [
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
# Values for setting SUBSCRIPTION_USAGE_MODEL
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
|
||||
|
||||
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
|
||||
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
import redis
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
@@ -80,7 +81,7 @@ class WebsocketSecretAuthHelper:
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
@@ -100,6 +101,21 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
async def receive_json(self, data):
|
||||
(group, message) = unwrap_broadcast_msg(data)
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
async def consumer_subscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
async def consumer_unsubscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
@@ -128,6 +144,11 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@@ -176,9 +197,20 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
if len(old_groups):
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(group_name, self.channel_name)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
|
||||
|
||||
@@ -200,9 +232,11 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
@@ -212,16 +246,6 @@ def emit_channel_notification(group, payload):
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
group,
|
||||
{"type": "internal.message", "text": payload_dumped},
|
||||
)
|
||||
)
|
||||
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -54,6 +54,12 @@ aim_inputs = {
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
},
|
||||
{'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
|
||||
{
|
||||
'id': 'object_property',
|
||||
'label': _('Object Property'),
|
||||
'type': 'string',
|
||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
||||
},
|
||||
{
|
||||
'id': 'reason',
|
||||
'label': _('Reason'),
|
||||
@@ -74,6 +80,7 @@ def aim_backend(**kwargs):
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
object_property = kwargs.get('object_property', '')
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
@@ -98,7 +105,18 @@ def aim_backend(**kwargs):
|
||||
allow_redirects=False,
|
||||
)
|
||||
raise_for_status(res)
|
||||
return res.json()['Content']
|
||||
# CCP returns the property name capitalized, username is camel case
|
||||
# so we need to handle that case
|
||||
if object_property == '':
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'username':
|
||||
object_property = 'UserName'
|
||||
elif object_property not in res:
|
||||
raise KeyError('Property {} not found in object'.format(object_property))
|
||||
else:
|
||||
object_property = object_property.capitalize()
|
||||
|
||||
return res.json()[object_property]
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -35,8 +35,14 @@ dsv_inputs = {
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_field',
|
||||
'label': _('Secret Field'),
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -52,5 +58,5 @@ if settings.DEBUG:
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -17,6 +17,12 @@ tss_inputs = {
|
||||
'help_text': _('The (Application) user username'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'domain',
|
||||
'label': _('Domain'),
|
||||
'help_text': _('The (Application) user domain'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': _('Password'),
|
||||
@@ -44,12 +50,18 @@ tss_inputs = {
|
||||
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
if 'domain' in kwargs:
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
secret_dict = secret_server.get_secret(kwargs['secret_id'])
|
||||
secret = ServerSecret(**secret_dict)
|
||||
|
||||
return secret.fields[kwargs['secret_field']].value
|
||||
if isinstance(secret.fields[kwargs['secret_field']].value, str) == False:
|
||||
return secret.fields[kwargs['secret_field']].value.text
|
||||
else:
|
||||
return secret.fields[kwargs['secret_field']].value
|
||||
|
||||
|
||||
tss_plugin = CredentialPlugin(
|
||||
|
||||
@@ -63,7 +63,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.settings.application_name import get_application_name
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
@@ -14,6 +16,29 @@ def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
def get_task_queuename():
|
||||
if os.getenv('AWX_COMPONENT') != 'web':
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
random_task_instance = (
|
||||
Instance.objects.filter(
|
||||
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
|
||||
node_state=Instance.States.READY,
|
||||
enabled=True,
|
||||
)
|
||||
.only('hostname')
|
||||
.order_by('?')
|
||||
.first()
|
||||
)
|
||||
|
||||
if random_task_instance is None:
|
||||
raise ValueError('No task instances are READY and Enabled.')
|
||||
|
||||
return random_task_instance.hostname
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
@@ -60,10 +85,11 @@ def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
conf = settings.DATABASES['default'].copy()
|
||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
else:
|
||||
|
||||
@@ -6,7 +6,7 @@ from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@@ -21,7 +21,7 @@ class Control(object):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queuename = host or get_task_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@@ -10,6 +10,7 @@ from django_guid import set_guid
|
||||
from django_guid.utils import generate_guid
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
@@ -21,6 +22,9 @@ class Scheduler(Scheduler):
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warning('periodic beat started')
|
||||
|
||||
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
|
||||
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
|
||||
@@ -70,7 +70,7 @@ def reap_waiting(instance=None, status='failed', job_explanation=None, grace_per
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None, ref_time=None):
|
||||
"""
|
||||
Reap all jobs in running for this instance.
|
||||
"""
|
||||
@@ -79,9 +79,11 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
|
||||
else:
|
||||
hostname = instance.hostname
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
)
|
||||
base_Q = Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
if ref_time:
|
||||
jobs = UnifiedJob.objects.filter(base_Q & Q(started__lte=ref_time))
|
||||
else:
|
||||
jobs = UnifiedJob.objects.filter(base_Q)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
|
||||
@@ -18,6 +18,7 @@ from django.conf import settings
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -219,6 +220,7 @@ class BaseWorker(object):
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
set_connection_name('worker') # set application_name to distinguish from other dispatcher processes
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
|
||||
@@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
|
||||
`awx.main.dispatch.publish`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
@staticmethod
|
||||
def resolve_callable(task):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
@@ -46,7 +46,8 @@ class TaskWorker(BaseWorker):
|
||||
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
@staticmethod
|
||||
def run_callable(body):
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
"""
|
||||
|
||||
@@ -954,6 +954,16 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
|
||||
def get_queryset(self):
|
||||
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
|
||||
|
||||
def add(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().add(*objects)
|
||||
|
||||
def remove(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().remove(*objects)
|
||||
|
||||
return OrderedManyRelatedManager
|
||||
|
||||
return add_custom_queryset_to_many_related_manager(
|
||||
@@ -971,13 +981,12 @@ class OrderedManyToManyField(models.ManyToManyField):
|
||||
by a special `position` column on the M2M table
|
||||
"""
|
||||
|
||||
def _update_m2m_position(self, sender, **kwargs):
|
||||
if kwargs.get('action') in ('post_add', 'post_remove'):
|
||||
order_with_respect_to = None
|
||||
for field in sender._meta.local_fields:
|
||||
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
|
||||
order_with_respect_to = field.name
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
|
||||
def _update_m2m_position(self, sender, instance, action, **kwargs):
|
||||
if action in ('post_add', 'post_remove'):
|
||||
descriptor = getattr(instance, self.name)
|
||||
order_with_respect_to = descriptor.source_field_name
|
||||
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})):
|
||||
if ig.position != i:
|
||||
ig.position = i
|
||||
ig.save()
|
||||
|
||||
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
@@ -1,5 +1,6 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -31,5 +32,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
|
||||
|
||||
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
|
||||
|
||||
def handle(self, **options):
|
||||
self._enable_disable_auth(options.get('enable'), options.get('disable'))
|
||||
|
||||
@@ -1,53 +1,230 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
import datetime
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.analytics.collectors import config
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import tarfile
|
||||
import csv
|
||||
|
||||
CSV_PREFERRED_ROW_COUNT = 500000
|
||||
BATCHED_FETCH_COUNT = 10000
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'This is for offline licensing usage'
|
||||
|
||||
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'hostname',
|
||||
'first_automation',
|
||||
'last_automation',
|
||||
'last_deleted',
|
||||
'automated_counter',
|
||||
'deleted_counter',
|
||||
'deleted',
|
||||
'used_in_inventories',
|
||||
).order_by('first_automation')[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'date',
|
||||
'license_consumed',
|
||||
'license_capacity',
|
||||
'hosts_added',
|
||||
'hosts_deleted',
|
||||
'indirectly_managed_hosts',
|
||||
).order_by(
|
||||
'date'
|
||||
)[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
|
||||
offset = 0
|
||||
list_of_queryset = []
|
||||
while True:
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
else:
|
||||
yield list_of_queryset
|
||||
|
||||
offset += len(list_of_queryset)
|
||||
|
||||
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_queryset(result, offset, fetch_count)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
|
||||
|
||||
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
|
||||
with open(csv_file, mode, newline='') as output_file:
|
||||
try:
|
||||
keys = list_of_queryset[0].keys() if list_of_queryset else []
|
||||
dict_writer = csv.DictWriter(output_file, keys)
|
||||
if always_header or first_write:
|
||||
dict_writer.writeheader()
|
||||
dict_writer.writerows(list_of_queryset)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
|
||||
csv_file = f'{temp_dir}/{type}{index+1}.csv'
|
||||
arcname_file = f'{type}{index+1}.csv'
|
||||
|
||||
first_write = True if index == 0 else False
|
||||
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
csv_iteration = 1
|
||||
|
||||
offset = 0
|
||||
rows_written_per_csv = 0
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
|
||||
while True:
|
||||
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
|
||||
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
|
||||
arcname_file = f'{type}{csv_iteration}.csv'
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header)
|
||||
|
||||
offset += to_fetch
|
||||
rows_written_per_csv += to_fetch
|
||||
always_header = False
|
||||
|
||||
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
|
||||
|
||||
if not remaining_rows_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
rows_written_per_csv = 0
|
||||
always_header = True
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
csv_iteration += 1
|
||||
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
|
||||
to_fetch = remaining_rows_per_csv
|
||||
|
||||
if rows_written_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def config_for_tar(self, options, temp_dir):
|
||||
config_json = json.dumps(config(options.get('since')))
|
||||
config_file = f'{temp_dir}/config.json'
|
||||
arcname_file = 'config.json'
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(config_json)
|
||||
return config_file, arcname_file
|
||||
|
||||
def output_json(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
|
||||
csv_file = csv_detail[0]
|
||||
|
||||
with open(csv_file) as f:
|
||||
reader = csv.DictReader(f)
|
||||
rows = list(reader)
|
||||
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
|
||||
def output_csv(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
|
||||
csv_file = csv_detail[0]
|
||||
with open(csv_file) as f:
|
||||
sys.stdout.write(f.read())
|
||||
|
||||
def output_tarball(self, options, filter_kwargs):
|
||||
always_header = True
|
||||
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
|
||||
|
||||
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
|
||||
|
||||
if rows_per_file <= BATCHED_FETCH_COUNT:
|
||||
csv_function = self.csv_for_tar
|
||||
else:
|
||||
csv_function = self.csv_for_tar_batched_fetch
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
config_file, arcname_file = self.config_for_tar(options, temp_dir)
|
||||
tar.add(config_file, arcname=arcname_file)
|
||||
|
||||
tar.close()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--json', action='store_true', help='Select output as JSON')
|
||||
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
|
||||
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
since = options.get('since')
|
||||
until = options.get('until')
|
||||
|
||||
if since is None and until is None:
|
||||
print("No Arguments received")
|
||||
return None
|
||||
|
||||
if since is not None and since.tzinfo is None:
|
||||
since = since.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
if until is not None and until.tzinfo is None:
|
||||
until = until.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
filter_kwargs = {}
|
||||
if since is not None:
|
||||
filter_kwargs['last_automation__gte'] = since
|
||||
if until is not None:
|
||||
filter_kwargs['last_automation__lte'] = until
|
||||
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
filter_kwargs_host_metrics_summary = {}
|
||||
if since is not None:
|
||||
filter_kwargs_host_metrics_summary['date__gte'] = since
|
||||
|
||||
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
|
||||
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
|
||||
return
|
||||
|
||||
# if --json flag is set, output the result in json format
|
||||
if options['json']:
|
||||
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
|
||||
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
self.output_json(options, filter_kwargs)
|
||||
elif options['csv']:
|
||||
self.output_csv(options, filter_kwargs)
|
||||
elif options['tarball']:
|
||||
self.output_tarball(options, filter_kwargs)
|
||||
|
||||
# --json flag is not set, output in plain text
|
||||
else:
|
||||
print(f"Total Number of hosts automated: {len(result)}")
|
||||
for item in result:
|
||||
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:")
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
|
||||
for item in list_of_queryset:
|
||||
print(
|
||||
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
|
||||
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
|
||||
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation']
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
@@ -458,12 +458,19 @@ class Command(BaseCommand):
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
all_obj = self.inventory
|
||||
db_variables = all_obj.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.debug('Inventory variables unmodified')
|
||||
@@ -522,16 +529,32 @@ class Command(BaseCommand):
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
db_variables = db_host.variables_dict
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_host.variables
|
||||
else:
|
||||
db_variables.update(mem_host.variables)
|
||||
mem_variables = mem_host.variables
|
||||
update_fields = []
|
||||
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
mem_variables.pop(var.format(prefix), None)
|
||||
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_variables
|
||||
else:
|
||||
db_variables.update(mem_variables)
|
||||
|
||||
if db_variables != db_host.variables_dict:
|
||||
db_host.variables = json.dumps(db_variables)
|
||||
update_fields.append('variables')
|
||||
# Update host enabled flag.
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
enabled = self._get_enabled(mem_variables)
|
||||
if enabled is not None and db_host.enabled != enabled:
|
||||
db_host.enabled = enabled
|
||||
update_fields.append('enabled')
|
||||
@@ -540,12 +563,6 @@ class Command(BaseCommand):
|
||||
old_name = db_host.name
|
||||
db_host.name = mem_host.name
|
||||
update_fields.append('name')
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
# Update host and display message(s) on what changed.
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
@@ -654,13 +671,19 @@ class Command(BaseCommand):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
host_attrs = dict(description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
if self.instance_id_var:
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
host_attrs['instance_id'] = instance_id
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
import_vars.pop(var.format(prefix), None)
|
||||
host_attrs['variables'] = json.dumps(import_vars)
|
||||
try:
|
||||
sanitize_jinja(mem_host_name)
|
||||
except ValueError as e:
|
||||
@@ -851,6 +874,7 @@ class Command(BaseCommand):
|
||||
logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))
|
||||
|
||||
# Create ad-hoc inventory source and inventory update objects
|
||||
ee = get_default_execution_environment()
|
||||
with ignore_inventory_computed_fields():
|
||||
source = Command.get_source_absolute_path(raw_source)
|
||||
|
||||
@@ -860,14 +884,22 @@ class Command(BaseCommand):
|
||||
source_path=os.path.abspath(source),
|
||||
overwrite=bool(options.get('overwrite', False)),
|
||||
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
||||
execution_environment=ee,
|
||||
)
|
||||
inventory_update = inventory_source.create_inventory_update(
|
||||
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
_eager_fields=dict(
|
||||
status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd(), execution_environment=ee
|
||||
)
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
try:
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
except SystemExit:
|
||||
logger.debug("Error occurred while running ansible-inventory")
|
||||
inventory_update.cancel()
|
||||
sys.exit(1)
|
||||
|
||||
status, tb, exc = 'error', '', None
|
||||
try:
|
||||
|
||||
@@ -44,16 +44,18 @@ class Command(BaseCommand):
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
end_color = '\033[0m'
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
end_color = ''
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
|
||||
print()
|
||||
|
||||
32
awx/main/management/commands/run_cache_clear.py
Normal file
32
awx/main/management/commands/run_cache_clear.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.cache_clear')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Cache Clear
|
||||
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
|
||||
"""
|
||||
|
||||
help = 'Launch the cache clear daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
body = json.loads(e.payload)
|
||||
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
|
||||
TaskWorker.run_callable(body)
|
||||
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in cache clear main loop')
|
||||
raise
|
||||
@@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@@ -76,7 +76,7 @@ class Command(BaseCommand):
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
74
awx/main/management/commands/run_heartbeet.py
Normal file
74
awx/main/management/commands/run_heartbeet.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def notify_listener_and_exit(self, *args):
|
||||
with pg_bus_conn(new_connection=False) as conn:
|
||||
conn.notify('web_heartbeet', self.construct_payload(action='offline'))
|
||||
sys.exit(0)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
|
||||
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
logger = logging.getLogger('awx.main.rsyslog_configurer')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Rsyslog Configurer
|
||||
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
|
||||
for pg_notify then calls reconfigure_rsyslog
|
||||
"""
|
||||
|
||||
help = 'Launch the rsyslog_configurer daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
logger.info("Change in logging settings found. Restarting rsyslogd")
|
||||
# clear the cache of relevant settings then restart
|
||||
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
|
||||
cache.delete_many(setting_keys)
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
body = json.loads(e.payload)
|
||||
TaskWorker.run_callable(body)
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
|
||||
raise
|
||||
@@ -13,13 +13,13 @@ from django.db import connection
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
RelayWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -98,8 +98,9 @@ class Command(BaseCommand):
|
||||
try:
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
connection.close() # Because of async nature, main loop will use new connection, so close this
|
||||
except Exception as exc:
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
@@ -130,9 +131,9 @@ class Command(BaseCommand):
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
stats_all = RelayWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
@@ -151,22 +152,19 @@ class Command(BaseCommand):
|
||||
host_stats = Command.get_connection_status(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{my_hostname}" to:')
|
||||
print(f'Relay websocket connection status from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
|
||||
print(f'\nRelay websocket connection stats from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
@@ -79,6 +79,11 @@ class HostManager(models.Manager):
|
||||
return qs
|
||||
|
||||
|
||||
class HostMetricActiveManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(deleted=False)
|
||||
|
||||
|
||||
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
|
||||
# Create IG mapping by union of all groups their instances are members of
|
||||
ig_ig_mapping = {}
|
||||
|
||||
17
awx/main/migrations/0175_workflowjob_is_bulk_job.py
Normal file
17
awx/main/migrations/0175_workflowjob_is_bulk_job.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 3.2.16 on 2023-01-05 15:39
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0174_ensure_org_ee_admin_roles'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='is_bulk_job',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
32
awx/main/migrations/0176_inventorysource_scm_branch.py
Normal file
32
awx/main/migrations/0176_inventorysource_scm_branch.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# Generated by Django 3.2.16 on 2023-03-03 20:44
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0175_workflowjob_is_bulk_job'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='scm_branch',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
|
||||
max_length=1024,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='scm_branch',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
|
||||
max_length=1024,
|
||||
),
|
||||
),
|
||||
]
|
||||
48
awx/main/migrations/0177_instance_group_role_addition.py
Normal file
48
awx/main/migrations/0177_instance_group_role_addition.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-17 02:45
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0176_inventorysource_scm_branch'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
parent_role=['singleton:system_administrator'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
parent_role=['singleton:system_auditor', 'use_role', 'admin_role'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role'], related_name='+', to='main.role'
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0178_instance_group_admin_migration.py
Normal file
18
awx/main/migrations/0178_instance_group_admin_migration.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-17 02:45
|
||||
|
||||
from django.db import migrations
|
||||
from awx.main.migrations import _rbac as rbac
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _OrgAdmin_to_use_ig as oamigrate
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
|
||||
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
dependencies = [
|
||||
('main', '0177_instance_group_role_addition'),
|
||||
]
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(rbac.create_roles),
|
||||
migrations.RunPython(oamigrate.migrate_org_admin_to_use),
|
||||
]
|
||||
18
awx/main/migrations/0179_change_cyberark_plugin_names.py
Normal file
18
awx/main/migrations/0179_change_cyberark_plugin_names.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.16 on 2023-03-16 15:16
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.migrations._credentialtypes import migrate_credential_type
|
||||
from awx.main.models import CredentialType
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
def update_cyberark_plugin_names(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
migrate_credential_type(apps, 'aim')
|
||||
migrate_credential_type(apps, 'conjur')
|
||||
|
||||
dependencies = [
|
||||
('main', '0178_instance_group_admin_migration'),
|
||||
]
|
||||
|
||||
operations = [migrations.RunPython(update_cyberark_plugin_names)]
|
||||
43
awx/main/migrations/0180_add_hostmetric_fields.py
Normal file
43
awx/main/migrations/0180_add_hostmetric_fields.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-03 09:40
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0179_change_cyberark_plugin_names'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(model_name='hostmetric', name='hostname', field=models.CharField(max_length=512, primary_key=False, serialize=True, unique=True)),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='last_deleted',
|
||||
field=models.DateTimeField(db_index=True, null=True, help_text='When the host was last deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='automated_counter',
|
||||
field=models.BigIntegerField(default=0, help_text='How many times was the host automated'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted_counter',
|
||||
field=models.IntegerField(default=0, help_text='How many times was the host deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted',
|
||||
field=models.BooleanField(
|
||||
default=False, help_text='Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='used_in_inventories',
|
||||
field=models.IntegerField(null=True, help_text='How many inventories contain this host'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
),
|
||||
]
|
||||
33
awx/main/migrations/0181_hostmetricsummarymonthly.py
Normal file
33
awx/main/migrations/0181_hostmetricsummarymonthly.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-10 12:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0180_add_hostmetric_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='HostMetricSummaryMonthly',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('date', models.DateField(unique=True)),
|
||||
('license_consumed', models.BigIntegerField(default=0, help_text='How many unique hosts are consumed from the license')),
|
||||
('license_capacity', models.BigIntegerField(default=0, help_text="'License capacity as max. number of unique hosts")),
|
||||
(
|
||||
'hosts_added',
|
||||
models.IntegerField(default=0, help_text='How many hosts were added in the associated month, consuming more license capacity'),
|
||||
),
|
||||
(
|
||||
'hosts_deleted',
|
||||
models.IntegerField(default=0, help_text='How many hosts were deleted in the associated month, freeing the license capacity'),
|
||||
),
|
||||
(
|
||||
'indirectly_managed_hosts',
|
||||
models.IntegerField(default=0, help_text='Manually entered number indirectly managed hosts for a certain month'),
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
138
awx/main/migrations/0182_constructed_inventory.py
Normal file
138
awx/main/migrations/0182_constructed_inventory.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 14:20
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0181_hostmetricsummarymonthly'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryConstructedInventoryMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
(
|
||||
'constructed_inventory',
|
||||
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory', related_name='constructed_inventory_memberships'),
|
||||
),
|
||||
('input_inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='input_inventories',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
help_text='Only valid for constructed inventories, this links to the inventories that will be used.',
|
||||
related_name='destination_inventories',
|
||||
through='main.InventoryConstructedInventoryMembership',
|
||||
to='main.Inventory',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='kind',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('', 'Hosts have a direct link to this inventory.'),
|
||||
('smart', 'Hosts for inventory generated using the host_filter property.'),
|
||||
('constructed', 'Parse list of source inventories with the constructed inventory plugin.'),
|
||||
],
|
||||
default='',
|
||||
help_text='Kind of inventory being represented.',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='constructed_host',
|
||||
field=models.ForeignKey(
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name='constructed_host_summaries',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
]
|
||||
20
awx/main/migrations/_OrgAdmin_to_use_ig.py
Normal file
20
awx/main/migrations/_OrgAdmin_to_use_ig.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
from awx.main.models import Organization
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def migrate_org_admin_to_use(apps, schema_editor):
|
||||
logger.info('Initiated migration from Org admin to use role')
|
||||
roles_added = 0
|
||||
for org in Organization.objects.prefetch_related('admin_role__members').iterator():
|
||||
igs = list(org.instance_groups.all())
|
||||
if not igs:
|
||||
continue
|
||||
for admin in org.admin_role.members.filter(is_superuser=False):
|
||||
for ig in igs:
|
||||
ig.use_role.members.add(admin)
|
||||
roles_added += 1
|
||||
if roles_added:
|
||||
logger.info(f'Migration converted {roles_added} from organization admin to use role')
|
||||
@@ -1,6 +1,9 @@
|
||||
import logging
|
||||
|
||||
from awx.main.models import CredentialType
|
||||
from django.db.models import Q
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
DEPRECATED_CRED_KIND = {
|
||||
'rax': {
|
||||
@@ -76,3 +79,14 @@ def add_tower_verify_field(apps, schema_editor):
|
||||
def remove_become_methods(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def migrate_credential_type(apps, namespace):
|
||||
ns_types = apps.get_model('main', 'CredentialType').objects.filter(namespace=namespace).order_by('created')
|
||||
if ns_types.count() == 2:
|
||||
original, renamed = ns_types.all()
|
||||
logger.info(f'There are credential types to migrate in the "{namespace}" namespace: {original.name}')
|
||||
apps.get_model('main', 'Credential').objects.filter(credential_type_id=original.id).update(credential_type_id=renamed.id)
|
||||
|
||||
logger.info(f'Removing old credential type: {renamed.name}')
|
||||
original.delete()
|
||||
|
||||
@@ -29,6 +29,7 @@ def create_roles(apps, schema_editor):
|
||||
'Project',
|
||||
'Credential',
|
||||
'JobTemplate',
|
||||
'InstanceGroup',
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ from awx.main.models.inventory import ( # noqa
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Inventory,
|
||||
InventoryConstructedInventoryMembership,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
|
||||
@@ -7,6 +7,7 @@ from collections import defaultdict
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc, now
|
||||
@@ -536,25 +537,38 @@ class JobEvent(BasePlaybookEvent):
|
||||
return
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric # circular import
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
|
||||
if self.job.inventory.kind == 'constructed':
|
||||
all_hosts = Host.objects.filter(id__in=self.job.inventory.hosts.values_list(Cast('instance_id', output_field=models.IntegerField()))).only(
|
||||
'id', 'name'
|
||||
)
|
||||
constructed_host_map = self.host_map
|
||||
host_map = {host.name: host.id for host in all_hosts}
|
||||
else:
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
constructed_host_map = {}
|
||||
host_map = self.host_map
|
||||
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = self.host_map.get(host, None)
|
||||
host_id = host_map.get(host)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
constructed_host_id = constructed_host_map.get(host)
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
summary = JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats)
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, constructed_host_id=constructed_host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
@@ -575,12 +589,26 @@ class JobEvent(BasePlaybookEvent):
|
||||
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@staticmethod
|
||||
def _update_host_metrics(updated_hosts_list):
|
||||
from awx.main.models import HostMetric # circular import
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
# bulk-update
|
||||
batch_start, batch_size = 0, 1000
|
||||
while batch_start <= len(updated_hosts_list):
|
||||
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
|
||||
HostMetric.objects.filter(hostname__in=batched_host_list).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
)
|
||||
HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
|
||||
batch_start += batch_size
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@@ -17,15 +17,20 @@ from django.db.models import Sum
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONBlob
|
||||
from awx.main.fields import JSONBlob, ImplicitRoleField
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
||||
from awx.main.models.mixins import RelatedJobsMixin
|
||||
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
||||
@@ -352,7 +357,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
|
||||
|
||||
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin, ResourceMixin):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
|
||||
name = models.CharField(max_length=250, unique=True)
|
||||
@@ -379,6 +384,24 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
default='',
|
||||
)
|
||||
)
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
]
|
||||
)
|
||||
use_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'admin_role',
|
||||
]
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
'use_role',
|
||||
'admin_role',
|
||||
]
|
||||
)
|
||||
|
||||
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
|
||||
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
|
||||
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
|
||||
|
||||
@@ -9,6 +9,8 @@ import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -17,6 +19,7 @@ from django.db import models, connection
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.db import transaction
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.urls import resolve
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
@@ -32,7 +35,7 @@ from awx.main.fields import (
|
||||
SmartFilterField,
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
@@ -49,15 +52,25 @@ from awx.main.models.notifications import (
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.inventory')
|
||||
|
||||
|
||||
class InventoryConstructedInventoryMembership(models.Model):
|
||||
constructed_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE, related_name='constructed_inventory_memberships')
|
||||
input_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
"""
|
||||
an inventory source contains lists and hosts.
|
||||
@@ -67,6 +80,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
KIND_CHOICES = [
|
||||
('', _('Hosts have a direct link to this inventory.')),
|
||||
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
||||
('constructed', _('Parse list of source inventories with the constructed inventory plugin.')),
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -139,6 +153,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
default=None,
|
||||
help_text=_('Filter that will be applied to the hosts of this inventory.'),
|
||||
)
|
||||
input_inventories = OrderedManyToManyField(
|
||||
'Inventory',
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
related_name='destination_inventories',
|
||||
help_text=_('Only valid for constructed inventories, this links to the inventories that will be used.'),
|
||||
through='InventoryConstructedInventoryMembership',
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
blank=True,
|
||||
@@ -187,6 +209,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
if request is not None:
|
||||
# circular import
|
||||
from awx.api.urls.inventory import constructed_inventory_urls
|
||||
|
||||
route = resolve(request.path_info)
|
||||
if any(route.url_name == url.name for url in constructed_inventory_urls):
|
||||
return reverse('api:constructed_inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
@@ -338,13 +368,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(
|
||||
remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id,
|
||||
remote_host_enabled=str(host.enabled).lower(),
|
||||
remote_host_id=host.id,
|
||||
)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
for prefix in ('host', 'tower'):
|
||||
tower_dict = {
|
||||
f'remote_{prefix}_enabled': str(host.enabled).lower(),
|
||||
f'remote_{prefix}_id': host.id,
|
||||
}
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
|
||||
@@ -431,12 +460,24 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
connection.on_commit(on_commit)
|
||||
|
||||
def _enforce_constructed_source(self):
|
||||
"""
|
||||
Constructed inventory should always have exactly 1 inventory source, constructed type
|
||||
this enforces that requirement
|
||||
"""
|
||||
if self.kind == 'constructed':
|
||||
if not self.inventory_sources.exists():
|
||||
self.inventory_sources.create(
|
||||
source='constructed', name=f'Auto-created source for: {self.name}'[:512], overwrite=True, overwrite_vars=True, update_on_launch=True
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Inventory, self).save(*args, **kwargs)
|
||||
if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite':
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields()
|
||||
self._enforce_constructed_source()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -820,9 +861,64 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
|
||||
|
||||
class HostMetric(models.Model):
|
||||
hostname = models.CharField(primary_key=True, max_length=512)
|
||||
hostname = models.CharField(unique=True, max_length=512)
|
||||
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
|
||||
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
|
||||
last_deleted = models.DateTimeField(null=True, db_index=True, help_text=_('When the host was last deleted'))
|
||||
automated_counter = models.BigIntegerField(default=0, help_text=_('How many times was the host automated'))
|
||||
deleted_counter = models.IntegerField(default=0, help_text=_('How many times was the host deleted'))
|
||||
deleted = models.BooleanField(
|
||||
default=False, help_text=_('Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption')
|
||||
)
|
||||
used_in_inventories = models.IntegerField(null=True, help_text=_('How many inventories contain this host'))
|
||||
|
||||
objects = models.Manager()
|
||||
active_objects = HostMetricActiveManager()
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_metric_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def soft_delete(self):
|
||||
if not self.deleted:
|
||||
self.deleted_counter = (self.deleted_counter or 0) + 1
|
||||
self.last_deleted = now()
|
||||
self.deleted = True
|
||||
self.save(update_fields=['deleted', 'deleted_counter', 'last_deleted'])
|
||||
|
||||
def soft_restore(self):
|
||||
if self.deleted:
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
||||
"""
|
||||
|
||||
date = models.DateField(unique=True)
|
||||
license_consumed = models.BigIntegerField(default=0, help_text=_("How many unique hosts are consumed from the license"))
|
||||
license_capacity = models.BigIntegerField(default=0, help_text=_("'License capacity as max. number of unique hosts"))
|
||||
hosts_added = models.IntegerField(default=0, help_text=_("How many hosts were added in the associated month, consuming more license capacity"))
|
||||
hosts_deleted = models.IntegerField(default=0, help_text=_("How many hosts were deleted in the associated month, freeing the license capacity"))
|
||||
indirectly_managed_hosts = models.IntegerField(default=0, help_text=("Manually entered number indirectly managed hosts for a certain month"))
|
||||
|
||||
|
||||
class InventorySourceOptions(BaseModel):
|
||||
@@ -834,6 +930,7 @@ class InventorySourceOptions(BaseModel):
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('file', _('File, Directory or Script')),
|
||||
('constructed', _('Template additional groups and hostvars at runtime')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
@@ -872,6 +969,12 @@ class InventorySourceOptions(BaseModel):
|
||||
default='',
|
||||
help_text=_('Inventory source variables in YAML or JSON format.'),
|
||||
)
|
||||
scm_branch = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
blank=True,
|
||||
help_text=_('Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.'),
|
||||
)
|
||||
enabled_var = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
@@ -907,7 +1010,7 @@ class InventorySourceOptions(BaseModel):
|
||||
host_filter = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Regex where only matching hosts will be imported.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.'),
|
||||
)
|
||||
overwrite = models.BooleanField(
|
||||
default=False,
|
||||
@@ -927,6 +1030,21 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default=1,
|
||||
)
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_("Enter host, group or pattern match"),
|
||||
)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
if self.inventory.kind == 'constructed':
|
||||
return get_control_plane_execution_environment()
|
||||
return super().resolve_execution_environment()
|
||||
|
||||
@staticmethod
|
||||
def cloud_credential_validation(source, cred):
|
||||
@@ -1361,8 +1479,8 @@ class PluginFileInjector(object):
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
@@ -1546,5 +1664,18 @@ class insights(PluginFileInjector):
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class constructed(PluginFileInjector):
|
||||
plugin_name = 'constructed'
|
||||
namespace = 'ansible'
|
||||
collection = 'builtin'
|
||||
|
||||
def build_env(self, *args, **kwargs):
|
||||
env = super().build_env(*args, **kwargs)
|
||||
# Enable script inventory plugin so we pick up the script files from source inventories
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
|
||||
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
|
||||
return env
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
InventorySourceOptions.injectors[cls.__name__] = cls
|
||||
|
||||
@@ -2,12 +2,8 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
@@ -15,11 +11,9 @@ from urllib.parse import urljoin
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
from django.db.models.query import QuerySet
|
||||
from django.db.models.functions import Cast
|
||||
|
||||
# from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
|
||||
@@ -28,6 +22,7 @@ from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
@@ -44,7 +39,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -60,8 +55,6 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
@@ -578,12 +571,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='jobs',
|
||||
editable=False,
|
||||
through='JobHostSummary',
|
||||
)
|
||||
hosts = models.ManyToManyField('Host', related_name='jobs', editable=False, through='JobHostSummary', through_fields=('job', 'host'))
|
||||
artifacts = JSONBlob(
|
||||
default=dict,
|
||||
blank=True,
|
||||
@@ -831,6 +819,9 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
for name in JOB_VARIABLE_PREFIXES:
|
||||
r['{}_job_template_id'.format(name)] = self.job_template.pk
|
||||
r['{}_job_template_name'.format(name)] = self.job_template.name
|
||||
if self.execution_node:
|
||||
for name in JOB_VARIABLE_PREFIXES:
|
||||
r['{}_execution_node'.format(name)] = self.execution_node
|
||||
return r
|
||||
|
||||
'''
|
||||
@@ -845,109 +836,26 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
|
||||
"""Return value is an iterable for the relevant hosts for this job"""
|
||||
if not self.inventory:
|
||||
return []
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
if filters:
|
||||
host_queryset = host_queryset.filter(**filters)
|
||||
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if isinstance(host_queryset, QuerySet):
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
def get_hosts_for_fact_cache(self):
|
||||
"""
|
||||
Builds the queryset to use for writing or finalizing the fact cache
|
||||
these need to be the 'real' hosts associated with the job.
|
||||
For constructed inventories, that means the original (input inventory) hosts
|
||||
when slicing, that means only returning hosts in that slice
|
||||
"""
|
||||
Host = JobHostSummary._meta.get_field('host').related_model
|
||||
if not self.inventory_id:
|
||||
return Host.objects.none()
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
if timeout > 0:
|
||||
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
if self.inventory.kind == 'constructed':
|
||||
id_field = Host._meta.get_field('id')
|
||||
host_qs = Host.objects.filter(id__in=self.inventory.hosts.exclude(instance_id='').values_list(Cast('instance_id', output_field=id_field)))
|
||||
else:
|
||||
hosts = self._get_inventory_hosts()
|
||||
host_qs = self.inventory.hosts
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
host_qs = host_qs.only(*HOST_FACTS_FIELDS)
|
||||
host_qs = self.inventory.get_sliced_hosts(host_qs, self.job_slice_number, self.job_slice_count)
|
||||
return host_qs
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
@@ -1169,6 +1077,15 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False)
|
||||
constructed_host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='constructed_host_summaries',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
)
|
||||
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
|
||||
@@ -284,7 +284,7 @@ class JobNotificationMixin(object):
|
||||
'workflow_url',
|
||||
'scm_branch',
|
||||
'artifacts',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark' 'processed', 'rescued', 'ignored']},
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark', 'processed', 'rescued', 'ignored']},
|
||||
{
|
||||
'summary_fields': [
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
@@ -1567,7 +1567,7 @@ class UnifiedJob(
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
return self.controller_node or self.execution_node or get_task_queuename()
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@@ -650,6 +650,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
help_text=_("If automatically created for a sliced job run, the job template " "the workflow job was created from."),
|
||||
)
|
||||
is_sliced_job = models.BooleanField(default=False)
|
||||
is_bulk_job = models.BooleanField(default=False)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
@@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.conf import settings
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
@@ -85,6 +85,8 @@ class RunnerCallback:
|
||||
# which generate job events from two 'streams':
|
||||
# ansible-inventory and the awx.main.commands.inventory_import
|
||||
# logger
|
||||
if event_data.get('event') == 'keepalive':
|
||||
return
|
||||
|
||||
if event_data.get(self.event_data_key, None):
|
||||
if self.event_data_key != 'job_id':
|
||||
|
||||
117
awx/main/tasks/facts.py
Normal file
117
awx/main/tasks/facts.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import codecs
|
||||
import datetime
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=job_id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
@@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
@@ -37,6 +37,7 @@ from awx.main.constants import (
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
HOST_FACTS_FIELDS,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@@ -63,6 +64,7 @@ from awx.main.tasks.callback import (
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
|
||||
@@ -315,17 +317,22 @@ class BaseTask(object):
|
||||
|
||||
return env
|
||||
|
||||
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
|
||||
script_data = inventory.get_script_data(**script_params)
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
@@ -450,6 +457,9 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
@@ -548,7 +558,8 @@ class BaseTask(object):
|
||||
params['module'] = self.build_module_name(self.instance)
|
||||
params['module_args'] = self.build_module_args(self.instance)
|
||||
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
# TODO: refactor into a better BasTask method
|
||||
if self.should_use_fact_cache():
|
||||
# Enable Ansible fact cache.
|
||||
params['fact_cache_type'] = 'jsonfile'
|
||||
else:
|
||||
@@ -759,7 +770,7 @@ class SourceControlMixin(BaseTask):
|
||||
|
||||
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
|
||||
self.acquire_lock(project, self.instance.id)
|
||||
|
||||
is_commit = False
|
||||
try:
|
||||
original_branch = None
|
||||
failed_reason = project.get_reason_if_failed()
|
||||
@@ -771,6 +782,7 @@ class SourceControlMixin(BaseTask):
|
||||
if os.path.exists(project_path):
|
||||
git_repo = git.Repo(project_path)
|
||||
if git_repo.head.is_detached:
|
||||
is_commit = True
|
||||
original_branch = git_repo.head.commit
|
||||
else:
|
||||
original_branch = git_repo.active_branch
|
||||
@@ -782,7 +794,11 @@ class SourceControlMixin(BaseTask):
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
original_branch.checkout()
|
||||
if is_commit:
|
||||
git_repo.head.set_commit(original_branch)
|
||||
git_repo.head.reset(index=True, working_tree=True)
|
||||
else:
|
||||
original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
|
||||
@@ -790,7 +806,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@@ -998,6 +1014,9 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return self.instance.use_fact_cache
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||
return job.playbook
|
||||
|
||||
@@ -1063,8 +1082,11 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
@@ -1078,10 +1100,14 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
return
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
self.facts_write_time,
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
@@ -1095,7 +1121,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@@ -1417,7 +1443,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return params
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@@ -1464,8 +1490,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source == 'scm':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@@ -1518,6 +1542,22 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args = ['ansible-inventory', '--list', '--export']
|
||||
|
||||
# special case for constructed inventories, we pass source inventories from database
|
||||
# these must come in order, and in order _before_ the constructed inventory itself
|
||||
if inventory_update.inventory.kind == 'constructed':
|
||||
inventory_update.log_lifecycle("start_job_fact_cache")
|
||||
for input_inventory in inventory_update.inventory.input_inventories.all():
|
||||
args.append('-i')
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
|
||||
args.append(to_container_path(source_inv_path, private_data_dir))
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
# Add arguments for the source inventory file/script/thing
|
||||
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
||||
container_location = os.path.join(CONTAINER_ROOT, rel_path)
|
||||
@@ -1525,6 +1565,11 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args.append('-i')
|
||||
args.append(container_location)
|
||||
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
|
||||
# limit should be usable in ansible-inventory 2.15+
|
||||
if inventory_update.limit:
|
||||
args.append('--limit')
|
||||
args.append(inventory_update.limit)
|
||||
|
||||
args.append('--output')
|
||||
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
|
||||
@@ -1540,6 +1585,9 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return bool(self.instance.source == 'constructed')
|
||||
|
||||
def build_inventory(self, inventory_update, private_data_dir):
|
||||
return None # what runner expects in order to not deal with inventory
|
||||
|
||||
@@ -1581,7 +1629,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
if inventory_update.source == 'scm':
|
||||
if not source_project:
|
||||
raise RuntimeError('Could not find project to run SCM inventory update from.')
|
||||
self.sync_and_copy(source_project, private_data_dir)
|
||||
self.sync_and_copy(source_project, private_data_dir, scm_branch=inventory_update.inventory_source.scm_branch)
|
||||
else:
|
||||
# If source is not SCM make an empty project directory, content is built inside inventory folder
|
||||
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)
|
||||
@@ -1658,7 +1706,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -1811,7 +1859,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
@@ -28,7 +28,7 @@ from awx.main.utils.common import (
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
# Receptorctl
|
||||
@@ -526,6 +526,10 @@ class AWXReceptorJob:
|
||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||
|
||||
if settings.AWX_RUNNER_KEEPALIVE_SECONDS:
|
||||
pod_spec['spec']['containers'][0].setdefault('env', [])
|
||||
pod_spec['spec']['containers'][0]['env'].append({'name': 'ANSIBLE_RUNNER_KEEPALIVE_SECONDS', 'value': str(settings.AWX_RUNNER_KEEPALIVE_SECONDS)})
|
||||
|
||||
# Enforce EE Pull Policy
|
||||
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
|
||||
if self.task and self.task.instance.execution_environment:
|
||||
@@ -635,7 +639,7 @@ class AWXReceptorJob:
|
||||
#
|
||||
RECEPTOR_CONFIG_STARTER = (
|
||||
{'local-only': None},
|
||||
{'log-level': 'debug'},
|
||||
{'log-level': 'info'},
|
||||
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
|
||||
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
|
||||
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
|
||||
@@ -664,6 +668,7 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
|
||||
'cert': '/etc/receptor/tls/receptor.crt',
|
||||
'key': '/etc/receptor/tls/receptor.key',
|
||||
'mintls13': False,
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -708,7 +713,7 @@ def write_receptor_config():
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@@ -47,10 +47,11 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
@@ -59,7 +60,6 @@ from awx.main.utils.common import (
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
@@ -115,9 +115,6 @@ def dispatch_startup():
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@@ -132,7 +129,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@@ -244,8 +241,10 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
@task(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
@@ -254,9 +253,6 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@@ -286,7 +282,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -317,7 +313,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@@ -330,7 +326,7 @@ def gather_analytics():
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@@ -378,12 +374,26 @@ def handle_removed_image(remove_images=None):
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
||||
|
||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@@ -402,7 +412,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@@ -496,7 +506,7 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
@@ -581,12 +591,12 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
active_task_ids = []
|
||||
for task_list in worker_tasks.values():
|
||||
active_task_ids.extend(task_list)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@@ -622,7 +632,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@@ -642,7 +652,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@@ -708,7 +718,7 @@ def schedule_manager_success_or_error(instance):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -720,7 +730,7 @@ def handle_work_success(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -760,7 +770,7 @@ def handle_work_error(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -801,7 +811,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -817,7 +827,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -882,16 +892,9 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
if model is None:
|
||||
@@ -903,6 +906,28 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, u
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning("Object or user no longer exists.")
|
||||
return
|
||||
|
||||
o2m_to_preserve = {}
|
||||
fields_to_preserve = set(getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []))
|
||||
|
||||
for field in model._meta.get_fields():
|
||||
if field.name in fields_to_preserve:
|
||||
if field.one_to_many:
|
||||
try:
|
||||
field_val = getattr(obj, field.name)
|
||||
except AttributeError:
|
||||
continue
|
||||
o2m_to_preserve[field.name] = field_val
|
||||
|
||||
sub_obj_list = []
|
||||
for o2m in o2m_to_preserve:
|
||||
for sub_obj in o2m_to_preserve[o2m].all():
|
||||
sub_model = type(sub_obj)
|
||||
sub_obj_list.append((sub_model.__module__, sub_model.__name__, sub_obj.pk))
|
||||
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
|
||||
copy_mapping = {}
|
||||
for sub_obj_setup in sub_obj_list:
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
@@ -10,4 +9,4 @@
|
||||
"CONTROLLER_USERNAME": "fooo",
|
||||
"CONTROLLER_OAUTH_TOKEN": "",
|
||||
"CONTROLLER_VERIFY_SSL": "False"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"AWS_SESSION_TOKEN": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"INSIGHTS_USER": "fooo",
|
||||
"INSIGHTS_PASSWORD": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user