mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 19:44:43 -03:30
Compare commits
281 Commits
21.14.0
...
feature_us
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
05e9b29460 | ||
|
|
7f020052db | ||
|
|
53260213ba | ||
|
|
7d1ee37689 | ||
|
|
45c13c25a4 | ||
|
|
ba0e9831d2 | ||
|
|
92dce85468 | ||
|
|
77139e4138 | ||
|
|
b28e14c630 | ||
|
|
bf5594e338 | ||
|
|
f012a69c93 | ||
|
|
0fb334e372 | ||
|
|
b7c5cbac3f | ||
|
|
eb7407593f | ||
|
|
287596234c | ||
|
|
ee7b3470da | ||
|
|
0faa1c8a24 | ||
|
|
77175d2862 | ||
|
|
22464a5838 | ||
|
|
3919ea6270 | ||
|
|
9d9f650051 | ||
|
|
66a3cb6b09 | ||
|
|
d282393035 | ||
|
|
6ea3b20912 | ||
|
|
3025ef0dfa | ||
|
|
397d58c459 | ||
|
|
d739a4a90a | ||
|
|
3fe64ad101 | ||
|
|
919d1e5d40 | ||
|
|
7fda4b0675 | ||
|
|
d8af19d169 | ||
|
|
1821e540f7 | ||
|
|
77be6c7495 | ||
|
|
baed869d93 | ||
|
|
b87ff45c07 | ||
|
|
7acc0067f5 | ||
|
|
0a13762f11 | ||
|
|
2c673c8f1f | ||
|
|
8c187c74fc | ||
|
|
2ce9440bab | ||
|
|
765487390f | ||
|
|
086722149c | ||
|
|
c10ada6f44 | ||
|
|
b350cd053d | ||
|
|
d0acb1c53f | ||
|
|
f61b73010a | ||
|
|
adb89cd48f | ||
|
|
3e509b3d55 | ||
|
|
f0badea9d3 | ||
|
|
6a1ec0dc89 | ||
|
|
329fb88bbb | ||
|
|
177f8cb7b2 | ||
|
|
b43107a5e9 | ||
|
|
4857685e1c | ||
|
|
8ba1a2bcf7 | ||
|
|
e7c80fe1e8 | ||
|
|
33f1c35292 | ||
|
|
ba899324f2 | ||
|
|
9c236eb8dd | ||
|
|
36559a4539 | ||
|
|
7a4b3ed139 | ||
|
|
cd5cc64d6a | ||
|
|
71a11ea3ad | ||
|
|
cfbbc4cb92 | ||
|
|
592920ee51 | ||
|
|
b75b84e282 | ||
|
|
f4b80c70e3 | ||
|
|
9870187af5 | ||
|
|
bbb436ddbb | ||
|
|
abf915fafe | ||
|
|
481814991e | ||
|
|
e94ee8f8d7 | ||
|
|
e660f62a59 | ||
|
|
a2a04002b6 | ||
|
|
93117c8264 | ||
|
|
b8118ac86a | ||
|
|
c08f1ddcaa | ||
|
|
d57f549a4c | ||
|
|
93e6f974f6 | ||
|
|
32f7dfece1 | ||
|
|
68b32b9b4f | ||
|
|
886ba1ea7f | ||
|
|
b128f05a37 | ||
|
|
36c9c9cdc4 | ||
|
|
342e9197b8 | ||
|
|
2205664fb4 | ||
|
|
7cdf471894 | ||
|
|
8719648ff5 | ||
|
|
c1455ee125 | ||
|
|
11d5e5c7d4 | ||
|
|
fba4e06c50 | ||
|
|
12a4c301b8 | ||
|
|
8a1cdf859e | ||
|
|
2f68317e5f | ||
|
|
0f4bac7aed | ||
|
|
e42461d96f | ||
|
|
9b716235a2 | ||
|
|
eb704dbaad | ||
|
|
105609ec20 | ||
|
|
9b390a624f | ||
|
|
0046ce5e69 | ||
|
|
b80d0ae85b | ||
|
|
1c0142f75c | ||
|
|
1ea6d15ee3 | ||
|
|
3cd5d59d87 | ||
|
|
d32a5905e8 | ||
|
|
e53a5da91e | ||
|
|
1a56272eaf | ||
|
|
3975028bd4 | ||
|
|
1c51ef8a69 | ||
|
|
6b0fe8d137 | ||
|
|
4a3d437b32 | ||
|
|
23f3ab6a66 | ||
|
|
ffa3cd1fff | ||
|
|
236de7e209 | ||
|
|
4e5cce8d15 | ||
|
|
184719e9f2 | ||
|
|
6c9e2502a5 | ||
|
|
0b1b866128 | ||
|
|
80ebe13841 | ||
|
|
328880609b | ||
|
|
71c307ab8a | ||
|
|
3ce68ced1e | ||
|
|
20817789bd | ||
|
|
2b63b55b34 | ||
|
|
64923e12fc | ||
|
|
6d4f92e1e8 | ||
|
|
fff6fa7d7a | ||
|
|
44db4587be | ||
|
|
dc0958150a | ||
|
|
9f27436c75 | ||
|
|
e60869e653 | ||
|
|
51e19d9d0b | ||
|
|
0fea29ad4d | ||
|
|
0a40b758c3 | ||
|
|
1191458d80 | ||
|
|
c0491a7b10 | ||
|
|
14e613bc92 | ||
|
|
98e37383c2 | ||
|
|
9e336d55e4 | ||
|
|
0e68caf0f7 | ||
|
|
c9c150b5a6 | ||
|
|
f97605430b | ||
|
|
454f31f6a4 | ||
|
|
f62bf6a4c3 | ||
|
|
a0dafbfd8c | ||
|
|
b5c052b2e6 | ||
|
|
1e690fcd7f | ||
|
|
479d0c2b12 | ||
|
|
ede185504c | ||
|
|
2db29e5ce2 | ||
|
|
7bb0d32be1 | ||
|
|
acb22f0131 | ||
|
|
4f99a170be | ||
|
|
17f5c4b8e6 | ||
|
|
598f9e2a55 | ||
|
|
d33573b29c | ||
|
|
bc55bcf3a2 | ||
|
|
6c0c1f6853 | ||
|
|
0cc02d311f | ||
|
|
13b9a6c5e3 | ||
|
|
ac2f2039f5 | ||
|
|
c8c8ed1775 | ||
|
|
6267469709 | ||
|
|
a1e39f71fc | ||
|
|
4b0acaf7a1 | ||
|
|
968267287b | ||
|
|
25303ee625 | ||
|
|
8c5e2237f4 | ||
|
|
57d009199d | ||
|
|
24cbf39a93 | ||
|
|
95f1ef70a7 | ||
|
|
680e2bcc0a | ||
|
|
cd3f7666be | ||
|
|
049fb4eff5 | ||
|
|
7cef4e6db7 | ||
|
|
da004da68a | ||
|
|
b29f2f88d0 | ||
|
|
52a8a90c0e | ||
|
|
7cb890b603 | ||
|
|
78652bdd71 | ||
|
|
29d222be83 | ||
|
|
e7fa730f81 | ||
|
|
33f070081c | ||
|
|
44463402a8 | ||
|
|
93c2c56612 | ||
|
|
91bf49cdb3 | ||
|
|
704759d29a | ||
|
|
513f433f17 | ||
|
|
5f41003fb1 | ||
|
|
2e0f25150c | ||
|
|
4f5bc992a0 | ||
|
|
a9e7508e92 | ||
|
|
1c2eb22956 | ||
|
|
a987249ca6 | ||
|
|
ab6d56c24e | ||
|
|
c4ce5d0afa | ||
|
|
43f4872fec | ||
|
|
cb31973d59 | ||
|
|
9f959ca3d4 | ||
|
|
454d6d28e7 | ||
|
|
8b70fef743 | ||
|
|
026b8f05d7 | ||
|
|
d8e591cd69 | ||
|
|
38cc193aea | ||
|
|
65b3e0226d | ||
|
|
b5e04a4cb3 | ||
|
|
c89c2892c4 | ||
|
|
5080a5530c | ||
|
|
77743ef406 | ||
|
|
f792fea048 | ||
|
|
16ad27099e | ||
|
|
3f5a4cb6f1 | ||
|
|
b88d9f4731 | ||
|
|
62b79b1959 | ||
|
|
be5a2bbe61 | ||
|
|
84edbed5ec | ||
|
|
aa631a1ba7 | ||
|
|
771b831da8 | ||
|
|
ce4c1c11b3 | ||
|
|
054a70bda4 | ||
|
|
ab0463bf2a | ||
|
|
2bffddb5fb | ||
|
|
d576e65858 | ||
|
|
e3d167dfd1 | ||
|
|
ba9533f0e2 | ||
|
|
e7a739c3d7 | ||
|
|
ab3a9a0364 | ||
|
|
7dd1bc04c4 | ||
|
|
8c4e943af0 | ||
|
|
7112da9cdc | ||
|
|
7a74437651 | ||
|
|
e22967d28d | ||
|
|
df6bb5a8b8 | ||
|
|
aa06940df5 | ||
|
|
3e5467b472 | ||
|
|
c2fe06dd95 | ||
|
|
510f54b904 | ||
|
|
57e005b775 | ||
|
|
aad260bb41 | ||
|
|
e3d39a2728 | ||
|
|
f59ced57bc | ||
|
|
7f085e159f | ||
|
|
db2253601d | ||
|
|
b0c416334f | ||
|
|
c30c9cbdbe | ||
|
|
8ec6e556a1 | ||
|
|
382f98ceed | ||
|
|
fbd5d79428 | ||
|
|
878008a9c5 | ||
|
|
132fe5e443 | ||
|
|
311cea5a4a | ||
|
|
88bb6e5a6a | ||
|
|
c117ca66d5 | ||
|
|
c20e8eb712 | ||
|
|
5be90fd36b | ||
|
|
32a56311e6 | ||
|
|
610f75fcb1 | ||
|
|
179868dff2 | ||
|
|
9f3c4f6240 | ||
|
|
d40fdd77ad | ||
|
|
9135ff2f77 | ||
|
|
8d46d32944 | ||
|
|
ae0c1730bb | ||
|
|
9badbf0b4e | ||
|
|
7285d82f00 | ||
|
|
e38f87eb1d | ||
|
|
e6050804f9 | ||
|
|
f919178734 | ||
|
|
05f918e666 | ||
|
|
b18ad77035 | ||
|
|
d80759cd7a | ||
|
|
ef4e77d78f | ||
|
|
7c4aedf716 | ||
|
|
76f03b9adc | ||
|
|
af6549ffcd | ||
|
|
b3bda415da | ||
|
|
21291b53fd | ||
|
|
3eb748ff1f | ||
|
|
6d2c10ad02 | ||
|
|
ede9d961da |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -19,6 +19,8 @@ body:
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -3,7 +3,7 @@ name: CI
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -6,6 +6,10 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code
|
||||
issues: write # to label issues
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/label_pr.yml
vendored
4
.github/workflows/label_pr.yml
vendored
@@ -7,6 +7,10 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read # to determine modified files (actions/labeler)
|
||||
pull-requests: write # to add labels to PRs (actions/labeler)
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/promote.yml
vendored
3
.github/workflows/promote.yml
vendored
@@ -8,6 +8,9 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -157,9 +157,10 @@ use_dev_supervisor.txt
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/awx/ui/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile
|
||||
/Dockerfile.dev
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
|
||||
126
Makefile
126
Makefile
@@ -1,6 +1,6 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON ?= python3.9
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
@@ -37,10 +37,15 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
|
||||
DEV_DOCKER_OWNER ?= ansible
|
||||
# Docker will only accept lowercase, so github names like Paul need to be paul
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
@@ -86,7 +91,7 @@ clean-schema:
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete
|
||||
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
@@ -217,12 +222,6 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -230,7 +229,6 @@ dispatcher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -247,6 +245,34 @@ jupyter:
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) shell_plus --notebook
|
||||
|
||||
## Start the rsyslog configurer process in background in development environment.
|
||||
run-rsyslog-configurer:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_rsyslog_configurer
|
||||
|
||||
## Start cache_clear process in background in development environment.
|
||||
run-cache-clear:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_cache_clear
|
||||
|
||||
## Start the wsrelay process in background in development environment.
|
||||
run-wsrelay:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@@ -273,13 +299,13 @@ swagger: reports
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" make black
|
||||
BLACK_ARGS="--check" $(MAKE) black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
## Run egg_info_dev to generate awx.egg-info for development.
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
@@ -298,7 +324,7 @@ github_ci_setup:
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
make docker-compose-build
|
||||
$(MAKE) docker-compose-build
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
@@ -348,7 +374,7 @@ test_collection_sanity:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
@@ -498,7 +524,9 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
@@ -531,16 +559,25 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_devel image for docker compose development environment
|
||||
docker-compose-build: Dockerfile.dev
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
@@ -555,7 +592,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -572,12 +609,36 @@ VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
@echo "$(subst python,,$(PYTHON))"
|
||||
|
||||
.PHONY: version-for-buildyml
|
||||
version-for-buildyml:
|
||||
@echo $(firstword $(subst +, ,$(VERSION)))
|
||||
# version-for-buildyml prints a special version string for build.yml,
|
||||
# chopping off the sha after the '+' sign.
|
||||
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
|
||||
# This does the same thing in native make without
|
||||
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
|
||||
# Example:
|
||||
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
|
||||
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
@@ -592,13 +653,6 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -618,6 +672,7 @@ messages:
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
.PHONY: print-%
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -629,12 +684,12 @@ HELP_FILTER=.PHONY
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@make -s help/generate
|
||||
@$(MAKE) -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@@ -658,5 +713,4 @@ help/generate:
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@make -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@@ -5,13 +5,11 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -35,7 +33,7 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
@@ -364,12 +362,7 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
qs = self.model._default_manager
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
qs = optimize_queryset(qs)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
@@ -512,6 +505,9 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# And optionally (user must have given access permission on parent object
|
||||
# to view sublist):
|
||||
# parent_access = 'read'
|
||||
# filter_read_permission sets whether or not to override the default intersection behavior
|
||||
# implemented here
|
||||
filter_read_permission = True
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListAPIView, self).get_description_context()
|
||||
@@ -526,12 +522,16 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model).distinct()
|
||||
sublist_qs = self.get_sublist_queryset(parent)
|
||||
return qs & sublist_qs
|
||||
if not self.filter_read_permission:
|
||||
return optimize_queryset(self.get_sublist_queryset(parent))
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
if hasattr(self, 'parent_key'):
|
||||
# This is vastly preferable for ReverseForeignKey relationships
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
|
||||
|
||||
def get_sublist_queryset(self, parent):
|
||||
return getattrd(parent, self.relationship).distinct()
|
||||
return getattrd(parent, self.relationship)
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
@@ -580,15 +580,6 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
d.update({'parent_key': getattr(self, 'parent_key', None)})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -967,16 +958,11 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
|
||||
@@ -25,6 +25,7 @@ __all__ = [
|
||||
'UserPermission',
|
||||
'IsSystemAdminOrAuditor',
|
||||
'WorkflowApprovalPermission',
|
||||
'AnalyticsPermission',
|
||||
]
|
||||
|
||||
|
||||
@@ -250,3 +251,16 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||
class WebhookKeyPermission(permissions.BasePermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
return request.user.can_access(view.model, 'admin', obj, request.data)
|
||||
|
||||
|
||||
class AnalyticsPermission(permissions.BasePermission):
|
||||
"""
|
||||
Allows GET/POST/OPTIONS to system admins and system auditors.
|
||||
"""
|
||||
|
||||
def has_permission(self, request, view):
|
||||
if not (request.user and request.user.is_authenticated):
|
||||
return False
|
||||
if request.method in ["GET", "POST", "OPTIONS"]:
|
||||
return request.user.is_superuser or request.user.is_system_auditor
|
||||
return request.user.is_superuser
|
||||
|
||||
@@ -56,6 +56,8 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InstanceLink,
|
||||
@@ -156,6 +158,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'kind',
|
||||
),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'constructed_host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
@@ -189,6 +192,11 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
}
|
||||
|
||||
|
||||
# These fields can be edited on a constructed inventory's generated source (possibly by using the constructed
|
||||
# inventory's special API endpoint, but also by using the inventory sources endpoint).
|
||||
CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS = ('source_vars', 'update_cache_timeout', 'limit', 'verbosity')
|
||||
|
||||
|
||||
def reverse_gfk(content_object, request):
|
||||
"""
|
||||
Computes a reverse for a GenericForeignKey field.
|
||||
@@ -946,7 +954,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializer):
|
||||
password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
|
||||
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.'))
|
||||
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
|
||||
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
|
||||
is_system_auditor = serializers.BooleanField(default=False)
|
||||
@@ -973,7 +981,12 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
ret.pop('password', None)
|
||||
if self.get_external_account(obj):
|
||||
# If this is an external account it shouldn't have a password field
|
||||
ret.pop('password', None)
|
||||
else:
|
||||
# If its an internal account lets assume there is a password and return $encrypted$ to the user
|
||||
ret['password'] = '$encrypted$'
|
||||
if obj and type(self) is UserSerializer:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
@@ -987,13 +1000,31 @@ class UserSerializer(BaseSerializer):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
# Check if a password is too long
|
||||
password_max_length = User._meta.get_field('password').max_length
|
||||
if len(value) > password_max_length:
|
||||
raise serializers.ValidationError(_('Password max length is {}'.format(password_max_length)))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH', 0) and len(value) < getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'):
|
||||
raise serializers.ValidationError(_('Password must be at least {} characters long.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS', 0) and sum(c.isdigit() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'):
|
||||
raise serializers.ValidationError(_('Password must contain at least {} digits.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER', 0) and sum(c.isupper() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} uppercase characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER')))
|
||||
)
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL', 0) and sum(not c.isalnum() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} special characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL')))
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
# For now we're not raising an error, just not saving password for
|
||||
# users managed by LDAP who already have an unusable password set.
|
||||
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
|
||||
if new_password and not self.get_external_account(obj):
|
||||
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj):
|
||||
obj.set_password(new_password)
|
||||
obj.save(update_fields=['password'])
|
||||
|
||||
@@ -1670,13 +1701,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
res.update(
|
||||
dict(
|
||||
hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
|
||||
groups=self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
|
||||
root_groups=self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
|
||||
variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
|
||||
script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
|
||||
tree=self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
|
||||
inventory_sources=self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
|
||||
update_inventory_sources=self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1687,8 +1713,18 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.kind in ('', 'constructed'):
|
||||
# links not relevant for the "old" smart inventory
|
||||
res['groups'] = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk})
|
||||
res['root_groups'] = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk})
|
||||
res['update_inventory_sources'] = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk})
|
||||
res['inventory_sources'] = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk})
|
||||
res['tree'] = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk})
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.kind == 'constructed':
|
||||
res['input_inventories'] = self.reverse('api:inventory_input_inventories', kwargs={'pk': obj.pk})
|
||||
res['constructed_url'] = self.reverse('api:constructed_inventory_detail', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -1730,6 +1766,91 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
if not hasattr(instance, '_constructed_inv_src'):
|
||||
instance._constructed_inv_src = instance.inventory_sources.first()
|
||||
inv_src = instance._constructed_inv_src
|
||||
return super().get_attribute(inv_src) # yoink
|
||||
|
||||
|
||||
class ConstructedCharField(ConstructedFieldMixin, serializers.CharField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedIntegerField(ConstructedFieldMixin, serializers.IntegerField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedInventorySerializer(InventorySerializer):
|
||||
source_vars = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The source_vars for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
update_cache_timeout = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
default=None,
|
||||
help_text=_('The cache timeout for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
limit = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
verbosity = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
max_value=2,
|
||||
default=None,
|
||||
help_text=_('The verbosity level for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', '-host_filter') + CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
read_only_fields = ('*', 'kind')
|
||||
|
||||
def pop_inv_src_data(self, data):
|
||||
inv_src_data = {}
|
||||
for field in CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS:
|
||||
if field in data:
|
||||
# values always need to be removed, as they are not valid for Inventory model
|
||||
value = data.pop(field)
|
||||
# null is not valid for any of those fields, taken as not-provided
|
||||
if value is not None:
|
||||
inv_src_data[field] = value
|
||||
return inv_src_data
|
||||
|
||||
def apply_inv_src_data(self, inventory, inv_src_data):
|
||||
if inv_src_data:
|
||||
update_fields = []
|
||||
inv_src = inventory.inventory_sources.first()
|
||||
for field, value in inv_src_data.items():
|
||||
setattr(inv_src, field, value)
|
||||
update_fields.append(field)
|
||||
if update_fields:
|
||||
inv_src.save(update_fields=update_fields)
|
||||
|
||||
def create(self, validated_data):
|
||||
validated_data['kind'] = 'constructed'
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
inventory = super().create(validated_data)
|
||||
self.apply_inv_src_data(inventory, inv_src_data)
|
||||
return inventory
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
obj = super().update(obj, validated_data)
|
||||
self.apply_inv_src_data(obj, inv_src_data)
|
||||
return obj
|
||||
|
||||
|
||||
class InventoryScriptSerializer(InventorySerializer):
|
||||
class Meta:
|
||||
fields = ()
|
||||
@@ -1783,6 +1904,9 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.inventory.kind == 'constructed':
|
||||
res['original_host'] = self.reverse('api:host_detail', kwargs={'pk': obj.instance_id})
|
||||
res['ansible_facts'] = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id})
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
if obj.last_job:
|
||||
@@ -1804,6 +1928,10 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
|
||||
group_cnt = obj.groups.count()
|
||||
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
|
||||
if obj.inventory.kind == 'constructed':
|
||||
summaries_qs = obj.constructed_host_summaries
|
||||
else:
|
||||
summaries_qs = obj.job_host_summaries
|
||||
d.setdefault(
|
||||
'recent_jobs',
|
||||
[
|
||||
@@ -1814,7 +1942,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'status': j.job.status,
|
||||
'finished': j.job.finished,
|
||||
}
|
||||
for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
for j in summaries_qs.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
],
|
||||
)
|
||||
return d
|
||||
@@ -1839,8 +1967,8 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def validate_variables(self, value):
|
||||
@@ -1938,8 +2066,8 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -2062,7 +2190,7 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if not settings.IS_TESTING_MODE:
|
||||
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')):
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
@@ -2138,6 +2266,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'custom_virtualenv',
|
||||
'timeout',
|
||||
'verbosity',
|
||||
'limit',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@@ -2244,8 +2373,8 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
if value and value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart or Constructed Inventories")})
|
||||
return value
|
||||
|
||||
# TODO: remove when old 'credential' fields are removed
|
||||
@@ -2289,9 +2418,16 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if self.instance and self.instance.source == 'constructed':
|
||||
allowed_fields = CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
for field in attrs:
|
||||
if attrs[field] != getattr(self.instance, field) and field not in allowed_fields:
|
||||
raise serializers.ValidationError({"error": _("Cannot change field '{}' on a constructed inventory source.").format(field)})
|
||||
elif get_field_from_model_or_attrs('source') == 'scm':
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
elif get_field_from_model_or_attrs('source') == 'constructed':
|
||||
raise serializers.ValidationError({"error": _('constructed not a valid source for inventory')})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
|
||||
if redundant_scm_fields:
|
||||
@@ -4033,6 +4169,7 @@ class JobHostSummarySerializer(BaseSerializer):
|
||||
'-description',
|
||||
'job',
|
||||
'host',
|
||||
'constructed_host',
|
||||
'host_name',
|
||||
'changed',
|
||||
'dark',
|
||||
@@ -5386,6 +5523,32 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class HostMetricSerializer(BaseSerializer):
|
||||
show_capabilities = ['delete']
|
||||
|
||||
class Meta:
|
||||
model = HostMetric
|
||||
fields = (
|
||||
"id",
|
||||
"hostname",
|
||||
"url",
|
||||
"first_automation",
|
||||
"last_automation",
|
||||
"last_deleted",
|
||||
"automated_counter",
|
||||
"deleted_counter",
|
||||
"deleted",
|
||||
"used_in_inventories",
|
||||
)
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlySerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = HostMetricSummaryMonthly
|
||||
read_only_fields = ("id", "date", "license_consumed", "license_capacity", "hosts_added", "hosts_deleted", "indirectly_managed_hosts")
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capacity = serializers.SerializerMethodField()
|
||||
|
||||
18
awx/api/templates/api/host_metric_detail.md
Normal file
18
awx/api/templates/api/host_metric_detail.md
Normal file
@@ -0,0 +1,18 @@
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
record containing the following fields:
|
||||
|
||||
{% include "api/_result_fields_common.md" %}
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth DELETE %}
|
||||
# Delete {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
|
||||
|
||||
A soft deletion will mark the `deleted` field as true and exclude the host
|
||||
metric from license calculations.
|
||||
This may be undone later if the same hostname is automated again afterwards.
|
||||
{% endifmeth %}
|
||||
@@ -2,6 +2,7 @@ receptor_user: awx
|
||||
receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
|
||||
31
awx/api/urls/analytics.py
Normal file
31
awx/api/urls/analytics.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
import awx.api.views.analytics as analytics
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
|
||||
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
|
||||
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
|
||||
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
|
||||
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
|
||||
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
|
||||
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
|
||||
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
|
||||
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
|
||||
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
|
||||
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
|
||||
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
|
||||
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
|
||||
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
|
||||
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
|
||||
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
|
||||
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
|
||||
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
|
||||
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
10
awx/api/urls/host_metric.py
Normal file
10
awx/api/urls/host_metric.py
Normal file
@@ -0,0 +1,10 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import HostMetricList, HostMetricDetail
|
||||
|
||||
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -6,7 +6,10 @@ from django.urls import re_path
|
||||
from awx.api.views.inventory import (
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
ConstructedInventoryDetail,
|
||||
ConstructedInventoryList,
|
||||
InventoryActivityStreamList,
|
||||
InventoryInputInventoriesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
@@ -37,6 +40,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
|
||||
@@ -48,4 +52,10 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
# Constructed inventory special views
|
||||
constructed_inventory_urls = [
|
||||
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls', 'constructed_inventory_urls']
|
||||
|
||||
@@ -30,6 +30,7 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
@@ -41,15 +42,17 @@ from awx.api.views.bulk import (
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
from .project_update import urls as project_update_urls
|
||||
from .inventory import urls as inventory_urls
|
||||
from .inventory import urls as inventory_urls, constructed_inventory_urls
|
||||
from .execution_environments import urls as execution_environment_urls
|
||||
from .team import urls as team_urls
|
||||
from .host import urls as host_urls
|
||||
from .host_metric import urls as host_metric_urls
|
||||
from .group import urls as group_urls
|
||||
from .inventory_source import urls as inventory_source_urls
|
||||
from .inventory_update import urls as inventory_update_urls
|
||||
@@ -80,7 +83,7 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
|
||||
from .analytics import urls as analytics_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -117,7 +120,11 @@ v2_urls = [
|
||||
re_path(r'^project_updates/', include(project_update_urls)),
|
||||
re_path(r'^teams/', include(team_urls)),
|
||||
re_path(r'^inventories/', include(inventory_urls)),
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@@ -141,6 +148,7 @@ v2_urls = [
|
||||
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
|
||||
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
|
||||
@@ -17,7 +17,6 @@ from collections import OrderedDict
|
||||
|
||||
from urllib3.exceptions import ConnectTimeoutError
|
||||
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
@@ -30,7 +29,7 @@ from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import HttpResponse
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
@@ -63,7 +62,7 @@ from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
@@ -795,13 +794,7 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
@@ -1548,6 +1541,41 @@ class HostRelatedSearchMixin(object):
|
||||
return ret
|
||||
|
||||
|
||||
class HostMetricList(ListAPIView):
|
||||
name = _("Host Metrics List")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('hostname', 'deleted')
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
name = _("Host Metric Detail")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
self.get_object().soft_delete()
|
||||
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1576,6 +1604,8 @@ class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
def delete(self, request, *args, **kwargs):
|
||||
if self.get_object().inventory.pending_deletion:
|
||||
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
if self.get_object().inventory.kind == 'constructed':
|
||||
return Response({"error": _("Delete constructed inventory hosts from input inventory.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
return super(HostDetail, self).delete(request, *args, **kwargs)
|
||||
|
||||
|
||||
@@ -1583,6 +1613,14 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
model = models.Host
|
||||
serializer_class = serializers.AnsibleFactsSerializer
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.inventory.kind == 'constructed':
|
||||
# If this is a constructed inventory host, it is not the source of truth about facts
|
||||
# redirect to the original input inventory host instead
|
||||
return HttpResponseRedirect(reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id}, request=self.request))
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
model = models.Host
|
||||
@@ -1590,13 +1628,7 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
parent_model = models.Inventory
|
||||
relationship = 'hosts'
|
||||
parent_key = 'inventory'
|
||||
|
||||
def get_queryset(self):
|
||||
inventory = self.get_parent_object()
|
||||
qs = getattrd(inventory, self.relationship).all()
|
||||
# Apply queryset optimizations
|
||||
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
|
||||
return qs
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -2537,16 +2569,7 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'credentials'
|
||||
|
||||
def get_queryset(self):
|
||||
# Return the full list of credentials
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
sublist_qs = getattrd(parent, self.relationship)
|
||||
sublist_qs = sublist_qs.prefetch_related(
|
||||
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
|
||||
)
|
||||
return sublist_qs
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
@@ -2648,7 +2671,10 @@ class JobTemplateCallback(GenericAPIView):
|
||||
# Permission class should have already validated host_config_key.
|
||||
job_template = self.get_object()
|
||||
# Attempt to find matching hosts based on remote address.
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
if job_template.inventory:
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
else:
|
||||
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
# If the host is not found, update the inventory before trying to
|
||||
# match again.
|
||||
inventory_sources_already_updated = []
|
||||
@@ -2733,6 +2759,7 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class JobTemplateAccessList(ResourceAccessList):
|
||||
@@ -2823,16 +2850,7 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
|
||||
relationship = ''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
'''
|
||||
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
|
||||
'relationship'
|
||||
'''
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if created:
|
||||
@@ -2907,14 +2925,7 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = ''
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
@@ -3093,11 +3104,8 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
relationship = 'workflow_job_template_nodes'
|
||||
parent_key = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3189,11 +3197,8 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
relationship = 'workflow_job_nodes'
|
||||
parent_key = 'workflow_job'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
@@ -3507,11 +3512,7 @@ class BaseJobHostSummariesList(SubListAPIView):
|
||||
relationship = 'job_host_summaries'
|
||||
name = _('Job Host Summaries List')
|
||||
search_fields = ('host_name',)
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class HostJobHostSummariesList(BaseJobHostSummariesList):
|
||||
|
||||
296
awx/api/views/analytics.py
Normal file
296
awx/api/views/analytics.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import requests
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils import translation
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
|
||||
AWX_ANALYTICS_API_PREFIX = 'analytics'
|
||||
|
||||
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
|
||||
ERROR_MISSING_URL = "missing-url"
|
||||
ERROR_MISSING_USER = "missing-user"
|
||||
ERROR_MISSING_PASSWORD = "missing-password"
|
||||
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
|
||||
ERROR_NOT_FOUND = "not-found"
|
||||
ERROR_UNAUTHORIZED = "unauthorized"
|
||||
ERROR_UNKNOWN = "unknown"
|
||||
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
|
||||
|
||||
logger = logging.getLogger('awx.api.views.analytics')
|
||||
|
||||
|
||||
class MissingSettings(Exception):
|
||||
"""Settings are not correct Exception"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GetNotAllowedMixin(object):
|
||||
def get(self, request, format=None):
|
||||
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
|
||||
|
||||
class AnalyticsRootView(APIView):
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
name = _('Automation Analytics')
|
||||
swagger_topic = 'Automation Analytics'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class AnalyticsGenericView(APIView):
|
||||
"""
|
||||
Example:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
params = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_by': 'name:asc',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_options': 'name',
|
||||
'sort_order': 'asc',
|
||||
'tags': [],
|
||||
'slug': [],
|
||||
'name': [],
|
||||
'description': '',
|
||||
}
|
||||
|
||||
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
|
||||
headers=headers, json=json_data)
|
||||
|
||||
return Response(response.json(), status=response.status_code)
|
||||
"""
|
||||
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
|
||||
@staticmethod
|
||||
def _request_headers(request):
|
||||
headers = {}
|
||||
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
|
||||
if request.headers.get(header, None):
|
||||
headers[header] = request.headers.get(header)
|
||||
headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _get_analytics_path(request_path):
|
||||
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
|
||||
path_specific = parts[-1]
|
||||
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
|
||||
|
||||
def _get_analytics_url(self, request_path):
|
||||
analytics_path = self._get_analytics_path(request_path)
|
||||
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
|
||||
if not url:
|
||||
raise MissingSettings(ERROR_MISSING_URL)
|
||||
url_parts = urlparse.urlsplit(url)
|
||||
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
|
||||
return analytics_url
|
||||
|
||||
@staticmethod
|
||||
def _get_setting(setting_name, default, error_message):
|
||||
setting = getattr(settings, setting_name, default)
|
||||
if not setting:
|
||||
raise MissingSettings(error_message)
|
||||
return setting
|
||||
|
||||
@staticmethod
|
||||
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
|
||||
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
|
||||
if message:
|
||||
text["error"]["message"] = message
|
||||
return Response(text, status=status_code)
|
||||
|
||||
def _error_response_404(self, response):
|
||||
try:
|
||||
json_response = response.json()
|
||||
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
|
||||
message = json_response.get('error', None)
|
||||
if message:
|
||||
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
|
||||
|
||||
# Standard 404 problem => HTTP 404
|
||||
message = json_response.get('detail', None) or response.text
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
# Unexpected text => still HTTP 404
|
||||
message = response.text
|
||||
|
||||
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@staticmethod
|
||||
def _update_response_links(json_response):
|
||||
if not json_response.get('links', None):
|
||||
return
|
||||
|
||||
for key, value in json_response['links'].items():
|
||||
if value:
|
||||
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
|
||||
|
||||
def _forward_response(self, response):
|
||||
try:
|
||||
content_type = response.headers.get('content-type', '')
|
||||
if content_type.find('application/json') != -1:
|
||||
json_response = response.json()
|
||||
self._update_response_links(json_response)
|
||||
|
||||
return Response(json_response, status=response.status_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Response error: {e}")
|
||||
|
||||
return Response(response.content, status=response.status_code)
|
||||
|
||||
def _send_to_analytics(self, request, method):
|
||||
try:
|
||||
headers = self._request_headers(request)
|
||||
|
||||
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
|
||||
url = self._get_analytics_url(request.path)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
else:
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
auth=(rh_user, rh_password),
|
||||
verify=settings.INSIGHTS_CERT_PATH,
|
||||
params=request.query_params,
|
||||
headers=headers,
|
||||
json=request.data,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
#
|
||||
elif response.status_code == status.HTTP_404_NOT_FOUND:
|
||||
return self._error_response_404(response)
|
||||
#
|
||||
# Success or not a 401/404 errors are just forwarded
|
||||
#
|
||||
else:
|
||||
return self._forward_response(response)
|
||||
|
||||
except MissingSettings as e:
|
||||
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
|
||||
return self._error_response(e.args[0], remote=False)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Analytics API: Request error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class AnalyticsGenericListView(AnalyticsGenericView):
|
||||
def get(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsGenericDetailView(AnalyticsGenericView):
|
||||
def get(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsAuthorizedView(AnalyticsGenericListView):
|
||||
name = _("Authorized")
|
||||
|
||||
|
||||
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Reports")
|
||||
swagger_topic = "Automation Analytics"
|
||||
|
||||
|
||||
class AnalyticsReportDetail(AnalyticsGenericDetailView):
|
||||
name = _("Report")
|
||||
|
||||
|
||||
class AnalyticsReportOptionsList(AnalyticsGenericListView):
|
||||
name = _("Report Options")
|
||||
|
||||
|
||||
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Adoption Rate")
|
||||
|
||||
|
||||
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Event Explorer")
|
||||
|
||||
|
||||
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Host Explorer")
|
||||
|
||||
|
||||
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Job Explorer")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Templates")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Template For Hosts")
|
||||
|
||||
|
||||
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("ROI Templates")
|
||||
@@ -14,6 +14,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import serializers
|
||||
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
@@ -31,6 +32,7 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
ConstructedInventorySerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
@@ -79,7 +81,9 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
|
||||
# Do not allow changes to an Inventory kind.
|
||||
if kind is not None and obj.kind != kind:
|
||||
return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return Response(
|
||||
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
)
|
||||
return super(InventoryDetail, self).update(request, *args, **kwargs)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@@ -94,6 +98,29 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class ConstructedInventoryDetail(InventoryDetail):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
|
||||
class ConstructedInventoryList(InventoryList):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
r = super().get_queryset()
|
||||
return r.filter(kind='constructed')
|
||||
|
||||
|
||||
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'input_inventories'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind == 'constructed':
|
||||
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
|
||||
|
||||
|
||||
class InventoryActivityStreamList(SubListAPIView):
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
|
||||
@@ -61,12 +61,6 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
@@ -207,6 +201,7 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
@@ -214,6 +209,7 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
|
||||
@@ -98,10 +98,14 @@ class ApiVersionRootView(APIView):
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@@ -122,6 +126,7 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -5,11 +5,13 @@ import threading
|
||||
import time
|
||||
import os
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
@@ -157,7 +159,7 @@ class EncryptedCacheProxy(object):
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(TransientSetting(pk=obj_id, value=value), 'value')
|
||||
@@ -166,11 +168,6 @@ class EncryptedCacheProxy(object):
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
@@ -186,6 +183,22 @@ def get_settings_to_cache(registry):
|
||||
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
|
||||
|
||||
|
||||
# Will first attempt to get the setting from the database in synchronous mode.
|
||||
# If call from async context, it will attempt to get the setting from the database in a thread.
|
||||
def _get_setting_from_db(registry, key):
|
||||
def get_settings_from_db_sync(registry, key):
|
||||
field = registry.get_setting_field(key)
|
||||
if not field.read_only or key == 'INSTALL_UUID':
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
try:
|
||||
return get_settings_from_db_sync(registry, key)
|
||||
except SynchronousOnlyOperation:
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(get_settings_from_db_sync, registry, key)
|
||||
return future.result()
|
||||
|
||||
|
||||
def get_cache_value(value):
|
||||
"""Returns the proper special cache setting for a value
|
||||
based on instance type.
|
||||
@@ -345,7 +358,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting_id = None
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
setting = _get_setting_from_db(self.registry, name)
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
|
||||
@@ -94,9 +94,7 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
@@ -112,7 +110,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
# sure that the _Forbidden validator doesn't get used for the
|
||||
# fields. See also https://github.com/ansible/awx/issues/4099.
|
||||
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request(
|
||||
'patch',
|
||||
@@ -126,7 +124,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
@@ -136,7 +134,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
@@ -155,16 +153,14 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
|
||||
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert not response.data['FOO_BAR']
|
||||
@@ -173,7 +169,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
|
||||
@@ -26,10 +26,11 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
@@ -118,7 +119,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@@ -133,7 +137,10 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
@@ -2952,3 +2952,19 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
|
||||
def optimize_queryset(queryset):
|
||||
"""
|
||||
A utility method in case you already have a queryset and just want to
|
||||
apply the standard optimizations for that model.
|
||||
In other words, use if you do not want to start from filtered_queryset for some reason.
|
||||
"""
|
||||
if not queryset.model or queryset.model not in access_registry:
|
||||
return queryset
|
||||
access_class = access_registry[queryset.model]
|
||||
if access_class.select_related:
|
||||
queryset = queryset.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
queryset = queryset.prefetch_related(*access_class.prefetch_related)
|
||||
return queryset
|
||||
|
||||
@@ -4,11 +4,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
|
||||
@@ -65,7 +65,7 @@ class FixedSlidingWindow:
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class BroadcastWebsocketStatsManager:
|
||||
class RelayWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
@@ -74,7 +74,7 @@ class BroadcastWebsocketStatsManager:
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
|
||||
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
@@ -107,7 +107,7 @@ class BroadcastWebsocketStatsManager:
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class BroadcastWebsocketStats:
|
||||
class RelayWebsocketStats:
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, Min
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather):
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@@ -69,6 +69,54 @@ def four_hour_slicing(key, since, until, last_gather):
|
||||
start = end
|
||||
|
||||
|
||||
def host_metric_slicing(key, since, until, last_gather, **kwargs):
|
||||
"""
|
||||
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
|
||||
"""
|
||||
from awx.main.models.inventory import HostMetric
|
||||
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# Check if full sync should be done
|
||||
full_sync_enabled = kwargs.get('full_sync_enabled', False)
|
||||
last_entry = None
|
||||
if not full_sync_enabled:
|
||||
#
|
||||
# If not, try incremental sync first
|
||||
#
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = last_entries.get(key)
|
||||
if not last_entry:
|
||||
#
|
||||
# If not done before, switch to full sync
|
||||
#
|
||||
full_sync_enabled = True
|
||||
|
||||
if full_sync_enabled:
|
||||
#
|
||||
# Find the lowest date for full sync
|
||||
#
|
||||
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
|
||||
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
|
||||
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
|
||||
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
|
||||
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
|
||||
|
||||
if not last_entry:
|
||||
# empty table
|
||||
return []
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
end = min(start + timedelta(days=30), until)
|
||||
yield (start, end)
|
||||
start = end
|
||||
|
||||
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
@@ -83,7 +131,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
@register('config', '1.6', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -107,10 +155,13 @@ def config(since, **kwargs):
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'usage': license_info.get('usage'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'subscription_id': license_info.get('subscription_id'),
|
||||
'account_number': license_info.get('account_number'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
@@ -119,6 +170,7 @@ def config(since, **kwargs):
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@@ -536,3 +588,25 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
|
||||
@register(
|
||||
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
|
||||
)
|
||||
def host_metric_table(since, full_path, until, **kwargs):
|
||||
host_metric_query = '''COPY (SELECT main_hostmetric.id,
|
||||
main_hostmetric.hostname,
|
||||
main_hostmetric.first_automation,
|
||||
main_hostmetric.last_automation,
|
||||
main_hostmetric.last_deleted,
|
||||
main_hostmetric.deleted,
|
||||
main_hostmetric.automated_counter,
|
||||
main_hostmetric.deleted_counter,
|
||||
main_hostmetric.used_in_inventories
|
||||
FROM main_hostmetric
|
||||
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
|
||||
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
|
||||
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
@@ -52,7 +52,7 @@ def all_collectors():
|
||||
}
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=None):
|
||||
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
@@ -71,6 +71,7 @@ def register(key, version, description=None, format='json', expensive=None):
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
f.__awx_full_sync_interval__ = full_sync_interval
|
||||
return f
|
||||
|
||||
return decorate
|
||||
@@ -259,10 +260,19 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# These slicer functions may return a generator. The `since` parameter is
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
#
|
||||
# Or it can force full table sync if interval is given
|
||||
kwargs = dict()
|
||||
full_sync_enabled = False
|
||||
if func.__awx_full_sync_interval__:
|
||||
last_full_sync = last_entries.get(f"{key}_full")
|
||||
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
|
||||
|
||||
kwargs['full_sync_enabled'] = full_sync_enabled
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@@ -301,6 +311,12 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
succeeded = False
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
# update full sync timestamp if successfully shipped
|
||||
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
|
||||
with disable_activity_stream():
|
||||
last_entries[f"{key}_full"] = now()
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
@@ -359,9 +375,7 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(
|
||||
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
|
||||
)
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -9,7 +9,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = 'awx_metrics'
|
||||
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
@@ -264,13 +264,6 @@ class Metrics:
|
||||
data[field] = self.METRICS[field].decode(self.conn)
|
||||
return data
|
||||
|
||||
def store_metrics(self, data_json):
|
||||
# called when receiving metrics from other instances
|
||||
data = json.loads(data_json)
|
||||
if self.instance_name != data['instance']:
|
||||
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
|
||||
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
|
||||
|
||||
def should_pipe_execute(self):
|
||||
if self.metrics_have_changed is False:
|
||||
return False
|
||||
@@ -305,13 +298,15 @@ class Metrics:
|
||||
try:
|
||||
current_time = time.time()
|
||||
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
|
||||
serialized_metrics = self.serialize_local_metrics()
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
'metrics': serialized_metrics,
|
||||
}
|
||||
# store a local copy as well
|
||||
self.store_metrics(json.dumps(payload))
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
|
||||
@@ -10,7 +10,7 @@ from rest_framework import serializers
|
||||
# AWX
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@@ -805,6 +805,32 @@ register(
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTION_USAGE_MODEL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[
|
||||
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
|
||||
(
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
|
||||
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
|
||||
),
|
||||
],
|
||||
default='',
|
||||
allow_blank=True,
|
||||
label=_('Defines subscription usage model and shows Host Metrics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CLEANUP_HOST_METRICS_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last cleanup date for HostMetrics'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -38,6 +38,8 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True',
|
||||
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
|
||||
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
@@ -63,7 +65,7 @@ ENV_BLOCKLIST = frozenset(
|
||||
'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST',
|
||||
'PROJECT_REVISION',
|
||||
'SUPERVISOR_WEB_CONFIG_PATH',
|
||||
'SUPERVISOR_CONFIG_PATH',
|
||||
)
|
||||
)
|
||||
|
||||
@@ -106,3 +108,9 @@ JOB_VARIABLE_PREFIXES = [
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
# Values for setting SUBSCRIPTION_USAGE_MODEL
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
|
||||
|
||||
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
|
||||
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
import redis
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
@@ -80,7 +81,7 @@ class WebsocketSecretAuthHelper:
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
@@ -100,6 +101,21 @@ class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
async def receive_json(self, data):
|
||||
(group, message) = unwrap_broadcast_msg(data)
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
async def consumer_subscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
async def consumer_unsubscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
@@ -128,6 +144,11 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@@ -176,9 +197,20 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
if len(old_groups):
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(group_name, self.channel_name)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
|
||||
|
||||
@@ -200,9 +232,11 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
@@ -212,16 +246,6 @@ def emit_channel_notification(group, payload):
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
group,
|
||||
{"type": "internal.message", "text": payload_dumped},
|
||||
)
|
||||
)
|
||||
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -54,6 +54,12 @@ aim_inputs = {
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
},
|
||||
{'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
|
||||
{
|
||||
'id': 'object_property',
|
||||
'label': _('Object Property'),
|
||||
'type': 'string',
|
||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
||||
},
|
||||
{
|
||||
'id': 'reason',
|
||||
'label': _('Reason'),
|
||||
@@ -74,6 +80,7 @@ def aim_backend(**kwargs):
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
object_property = kwargs.get('object_property', '')
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
@@ -98,7 +105,18 @@ def aim_backend(**kwargs):
|
||||
allow_redirects=False,
|
||||
)
|
||||
raise_for_status(res)
|
||||
return res.json()['Content']
|
||||
# CCP returns the property name capitalized, username is camel case
|
||||
# so we need to handle that case
|
||||
if object_property == '':
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'username':
|
||||
object_property = 'UserName'
|
||||
elif object_property not in res:
|
||||
raise KeyError('Property {} not found in object'.format(object_property))
|
||||
else:
|
||||
object_property = object_property.capitalize()
|
||||
|
||||
return res.json()[object_property]
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -35,8 +35,14 @@ dsv_inputs = {
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_field',
|
||||
'label': _('Secret Field'),
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -52,5 +58,5 @@ if settings.DEBUG:
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -17,6 +17,12 @@ tss_inputs = {
|
||||
'help_text': _('The (Application) user username'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'domain',
|
||||
'label': _('Domain'),
|
||||
'help_text': _('The (Application) user domain'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': _('Password'),
|
||||
@@ -44,7 +50,10 @@ tss_inputs = {
|
||||
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
if 'domain' in kwargs:
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
secret_dict = secret_server.get_secret(kwargs['secret_id'])
|
||||
secret = ServerSecret(**secret_dict)
|
||||
|
||||
@@ -63,7 +63,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.settings.application_name import get_application_name
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
@@ -14,6 +16,29 @@ def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
def get_task_queuename():
|
||||
if os.getenv('AWX_COMPONENT') != 'web':
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
random_task_instance = (
|
||||
Instance.objects.filter(
|
||||
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
|
||||
node_state=Instance.States.READY,
|
||||
enabled=True,
|
||||
)
|
||||
.only('hostname')
|
||||
.order_by('?')
|
||||
.first()
|
||||
)
|
||||
|
||||
if random_task_instance is None:
|
||||
raise ValueError('No task instances are READY and Enabled.')
|
||||
|
||||
return random_task_instance.hostname
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
@@ -60,10 +85,11 @@ def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
conf = settings.DATABASES['default'].copy()
|
||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
else:
|
||||
|
||||
@@ -6,7 +6,7 @@ from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@@ -21,7 +21,7 @@ class Control(object):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_local_queuename()
|
||||
self.queuename = host or get_task_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@@ -10,6 +10,7 @@ from django_guid import set_guid
|
||||
from django_guid.utils import generate_guid
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
@@ -21,6 +22,9 @@ class Scheduler(Scheduler):
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warning('periodic beat started')
|
||||
|
||||
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
|
||||
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
|
||||
@@ -18,6 +18,7 @@ from django.conf import settings
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -219,6 +220,7 @@ class BaseWorker(object):
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
set_connection_name('worker') # set application_name to distinguish from other dispatcher processes
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
|
||||
@@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
|
||||
`awx.main.dispatch.publish`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
@staticmethod
|
||||
def resolve_callable(task):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
@@ -46,7 +46,8 @@ class TaskWorker(BaseWorker):
|
||||
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
@staticmethod
|
||||
def run_callable(body):
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
"""
|
||||
|
||||
@@ -954,6 +954,16 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
|
||||
def get_queryset(self):
|
||||
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
|
||||
|
||||
def add(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().add(*objects)
|
||||
|
||||
def remove(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().remove(*objects)
|
||||
|
||||
return OrderedManyRelatedManager
|
||||
|
||||
return add_custom_queryset_to_many_related_manager(
|
||||
@@ -971,13 +981,12 @@ class OrderedManyToManyField(models.ManyToManyField):
|
||||
by a special `position` column on the M2M table
|
||||
"""
|
||||
|
||||
def _update_m2m_position(self, sender, **kwargs):
|
||||
if kwargs.get('action') in ('post_add', 'post_remove'):
|
||||
order_with_respect_to = None
|
||||
for field in sender._meta.local_fields:
|
||||
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
|
||||
order_with_respect_to = field.name
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
|
||||
def _update_m2m_position(self, sender, instance, action, **kwargs):
|
||||
if action in ('post_add', 'post_remove'):
|
||||
descriptor = getattr(instance, self.name)
|
||||
order_with_respect_to = descriptor.source_field_name
|
||||
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})):
|
||||
if ig.position != i:
|
||||
ig.position = i
|
||||
ig.save()
|
||||
|
||||
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
22
awx/main/management/commands/cleanup_host_metrics.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
@@ -1,5 +1,6 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -31,5 +32,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
|
||||
|
||||
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
|
||||
|
||||
def handle(self, **options):
|
||||
self._enable_disable_auth(options.get('enable'), options.get('disable'))
|
||||
|
||||
@@ -1,53 +1,230 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
import datetime
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.analytics.collectors import config
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import tarfile
|
||||
import csv
|
||||
|
||||
CSV_PREFERRED_ROW_COUNT = 500000
|
||||
BATCHED_FETCH_COUNT = 10000
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'This is for offline licensing usage'
|
||||
|
||||
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'hostname',
|
||||
'first_automation',
|
||||
'last_automation',
|
||||
'last_deleted',
|
||||
'automated_counter',
|
||||
'deleted_counter',
|
||||
'deleted',
|
||||
'used_in_inventories',
|
||||
).order_by('first_automation')[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'date',
|
||||
'license_consumed',
|
||||
'license_capacity',
|
||||
'hosts_added',
|
||||
'hosts_deleted',
|
||||
'indirectly_managed_hosts',
|
||||
).order_by(
|
||||
'date'
|
||||
)[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
|
||||
offset = 0
|
||||
list_of_queryset = []
|
||||
while True:
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
else:
|
||||
yield list_of_queryset
|
||||
|
||||
offset += len(list_of_queryset)
|
||||
|
||||
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_queryset(result, offset, fetch_count)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
|
||||
|
||||
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
|
||||
with open(csv_file, mode, newline='') as output_file:
|
||||
try:
|
||||
keys = list_of_queryset[0].keys() if list_of_queryset else []
|
||||
dict_writer = csv.DictWriter(output_file, keys)
|
||||
if always_header or first_write:
|
||||
dict_writer.writeheader()
|
||||
dict_writer.writerows(list_of_queryset)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
|
||||
csv_file = f'{temp_dir}/{type}{index+1}.csv'
|
||||
arcname_file = f'{type}{index+1}.csv'
|
||||
|
||||
first_write = True if index == 0 else False
|
||||
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
csv_iteration = 1
|
||||
|
||||
offset = 0
|
||||
rows_written_per_csv = 0
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
|
||||
while True:
|
||||
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
|
||||
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
|
||||
arcname_file = f'{type}{csv_iteration}.csv'
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header)
|
||||
|
||||
offset += to_fetch
|
||||
rows_written_per_csv += to_fetch
|
||||
always_header = False
|
||||
|
||||
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
|
||||
|
||||
if not remaining_rows_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
rows_written_per_csv = 0
|
||||
always_header = True
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
csv_iteration += 1
|
||||
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
|
||||
to_fetch = remaining_rows_per_csv
|
||||
|
||||
if rows_written_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def config_for_tar(self, options, temp_dir):
|
||||
config_json = json.dumps(config(options.get('since')))
|
||||
config_file = f'{temp_dir}/config.json'
|
||||
arcname_file = 'config.json'
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(config_json)
|
||||
return config_file, arcname_file
|
||||
|
||||
def output_json(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
|
||||
csv_file = csv_detail[0]
|
||||
|
||||
with open(csv_file) as f:
|
||||
reader = csv.DictReader(f)
|
||||
rows = list(reader)
|
||||
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
|
||||
def output_csv(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
|
||||
csv_file = csv_detail[0]
|
||||
with open(csv_file) as f:
|
||||
sys.stdout.write(f.read())
|
||||
|
||||
def output_tarball(self, options, filter_kwargs):
|
||||
always_header = True
|
||||
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
|
||||
|
||||
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
|
||||
|
||||
if rows_per_file <= BATCHED_FETCH_COUNT:
|
||||
csv_function = self.csv_for_tar
|
||||
else:
|
||||
csv_function = self.csv_for_tar_batched_fetch
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
config_file, arcname_file = self.config_for_tar(options, temp_dir)
|
||||
tar.add(config_file, arcname=arcname_file)
|
||||
|
||||
tar.close()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--json', action='store_true', help='Select output as JSON')
|
||||
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
|
||||
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
since = options.get('since')
|
||||
until = options.get('until')
|
||||
|
||||
if since is None and until is None:
|
||||
print("No Arguments received")
|
||||
return None
|
||||
|
||||
if since is not None and since.tzinfo is None:
|
||||
since = since.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
if until is not None and until.tzinfo is None:
|
||||
until = until.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
filter_kwargs = {}
|
||||
if since is not None:
|
||||
filter_kwargs['last_automation__gte'] = since
|
||||
if until is not None:
|
||||
filter_kwargs['last_automation__lte'] = until
|
||||
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
filter_kwargs_host_metrics_summary = {}
|
||||
if since is not None:
|
||||
filter_kwargs_host_metrics_summary['date__gte'] = since
|
||||
|
||||
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
|
||||
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
|
||||
return
|
||||
|
||||
# if --json flag is set, output the result in json format
|
||||
if options['json']:
|
||||
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
|
||||
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
self.output_json(options, filter_kwargs)
|
||||
elif options['csv']:
|
||||
self.output_csv(options, filter_kwargs)
|
||||
elif options['tarball']:
|
||||
self.output_tarball(options, filter_kwargs)
|
||||
|
||||
# --json flag is not set, output in plain text
|
||||
else:
|
||||
print(f"Total Number of hosts automated: {len(result)}")
|
||||
for item in result:
|
||||
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:")
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
|
||||
for item in list_of_queryset:
|
||||
print(
|
||||
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
|
||||
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
|
||||
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation']
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
@@ -458,12 +458,19 @@ class Command(BaseCommand):
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
all_obj = self.inventory
|
||||
db_variables = all_obj.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.debug('Inventory variables unmodified')
|
||||
@@ -522,16 +529,32 @@ class Command(BaseCommand):
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
db_variables = db_host.variables_dict
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_host.variables
|
||||
else:
|
||||
db_variables.update(mem_host.variables)
|
||||
mem_variables = mem_host.variables
|
||||
update_fields = []
|
||||
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
mem_variables.pop(var.format(prefix), None)
|
||||
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_variables
|
||||
else:
|
||||
db_variables.update(mem_variables)
|
||||
|
||||
if db_variables != db_host.variables_dict:
|
||||
db_host.variables = json.dumps(db_variables)
|
||||
update_fields.append('variables')
|
||||
# Update host enabled flag.
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
enabled = self._get_enabled(mem_variables)
|
||||
if enabled is not None and db_host.enabled != enabled:
|
||||
db_host.enabled = enabled
|
||||
update_fields.append('enabled')
|
||||
@@ -540,12 +563,6 @@ class Command(BaseCommand):
|
||||
old_name = db_host.name
|
||||
db_host.name = mem_host.name
|
||||
update_fields.append('name')
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
# Update host and display message(s) on what changed.
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
@@ -654,13 +671,19 @@ class Command(BaseCommand):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
host_attrs = dict(description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
if self.instance_id_var:
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
host_attrs['instance_id'] = instance_id
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
import_vars.pop(var.format(prefix), None)
|
||||
host_attrs['variables'] = json.dumps(import_vars)
|
||||
try:
|
||||
sanitize_jinja(mem_host_name)
|
||||
except ValueError as e:
|
||||
|
||||
@@ -44,16 +44,18 @@ class Command(BaseCommand):
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
end_color = '\033[0m'
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
end_color = ''
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
|
||||
print()
|
||||
|
||||
32
awx/main/management/commands/run_cache_clear.py
Normal file
32
awx/main/management/commands/run_cache_clear.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.cache_clear')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Cache Clear
|
||||
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
|
||||
"""
|
||||
|
||||
help = 'Launch the cache clear daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
body = json.loads(e.payload)
|
||||
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
|
||||
TaskWorker.run_callable(body)
|
||||
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in cache clear main loop')
|
||||
raise
|
||||
@@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@@ -76,7 +76,7 @@ class Command(BaseCommand):
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
74
awx/main/management/commands/run_heartbeet.py
Normal file
74
awx/main/management/commands/run_heartbeet.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def notify_listener_and_exit(self, *args):
|
||||
with pg_bus_conn(new_connection=False) as conn:
|
||||
conn.notify('web_heartbeet', self.construct_payload(action='offline'))
|
||||
sys.exit(0)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
|
||||
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
41
awx/main/management/commands/run_rsyslog_configurer.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
logger = logging.getLogger('awx.main.rsyslog_configurer')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Rsyslog Configurer
|
||||
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
|
||||
for pg_notify then calls reconfigure_rsyslog
|
||||
"""
|
||||
|
||||
help = 'Launch the rsyslog_configurer daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
logger.info("Change in logging settings found. Restarting rsyslogd")
|
||||
# clear the cache of relevant settings then restart
|
||||
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
|
||||
cache.delete_many(setting_keys)
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
body = json.loads(e.payload)
|
||||
TaskWorker.run_callable(body)
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
|
||||
raise
|
||||
@@ -13,13 +13,13 @@ from django.db import connection
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
BroadcastWebsocketStatsManager,
|
||||
RelayWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -98,8 +98,9 @@ class Command(BaseCommand):
|
||||
try:
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
connection.close() # Because of async nature, main loop will use new connection, so close this
|
||||
except Exception as exc:
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
@@ -130,9 +131,9 @@ class Command(BaseCommand):
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
stats_all = RelayWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
@@ -151,22 +152,19 @@ class Command(BaseCommand):
|
||||
host_stats = Command.get_connection_status(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Broadcast websocket connection status from "{my_hostname}" to:')
|
||||
print(f'Relay websocket connection status from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
|
||||
print(f'\nRelay websocket connection stats from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
@@ -79,6 +79,11 @@ class HostManager(models.Manager):
|
||||
return qs
|
||||
|
||||
|
||||
class HostMetricActiveManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(deleted=False)
|
||||
|
||||
|
||||
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
|
||||
# Create IG mapping by union of all groups their instances are members of
|
||||
ig_ig_mapping = {}
|
||||
|
||||
43
awx/main/migrations/0180_add_hostmetric_fields.py
Normal file
43
awx/main/migrations/0180_add_hostmetric_fields.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-03 09:40
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0179_change_cyberark_plugin_names'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(model_name='hostmetric', name='hostname', field=models.CharField(max_length=512, primary_key=False, serialize=True, unique=True)),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='last_deleted',
|
||||
field=models.DateTimeField(db_index=True, null=True, help_text='When the host was last deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='automated_counter',
|
||||
field=models.BigIntegerField(default=0, help_text='How many times was the host automated'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted_counter',
|
||||
field=models.IntegerField(default=0, help_text='How many times was the host deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted',
|
||||
field=models.BooleanField(
|
||||
default=False, help_text='Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='used_in_inventories',
|
||||
field=models.IntegerField(null=True, help_text='How many inventories contain this host'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
),
|
||||
]
|
||||
33
awx/main/migrations/0181_hostmetricsummarymonthly.py
Normal file
33
awx/main/migrations/0181_hostmetricsummarymonthly.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-10 12:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0180_add_hostmetric_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='HostMetricSummaryMonthly',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('date', models.DateField(unique=True)),
|
||||
('license_consumed', models.BigIntegerField(default=0, help_text='How many unique hosts are consumed from the license')),
|
||||
('license_capacity', models.BigIntegerField(default=0, help_text="'License capacity as max. number of unique hosts")),
|
||||
(
|
||||
'hosts_added',
|
||||
models.IntegerField(default=0, help_text='How many hosts were added in the associated month, consuming more license capacity'),
|
||||
),
|
||||
(
|
||||
'hosts_deleted',
|
||||
models.IntegerField(default=0, help_text='How many hosts were deleted in the associated month, freeing the license capacity'),
|
||||
),
|
||||
(
|
||||
'indirectly_managed_hosts',
|
||||
models.IntegerField(default=0, help_text='Manually entered number indirectly managed hosts for a certain month'),
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
138
awx/main/migrations/0182_constructed_inventory.py
Normal file
138
awx/main/migrations/0182_constructed_inventory.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 14:20
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0181_hostmetricsummarymonthly'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryConstructedInventoryMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
(
|
||||
'constructed_inventory',
|
||||
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory', related_name='constructed_inventory_memberships'),
|
||||
),
|
||||
('input_inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='input_inventories',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
help_text='Only valid for constructed inventories, this links to the inventories that will be used.',
|
||||
related_name='destination_inventories',
|
||||
through='main.InventoryConstructedInventoryMembership',
|
||||
to='main.Inventory',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='kind',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('', 'Hosts have a direct link to this inventory.'),
|
||||
('smart', 'Hosts for inventory generated using the host_filter property.'),
|
||||
('constructed', 'Parse list of source inventories with the constructed inventory plugin.'),
|
||||
],
|
||||
default='',
|
||||
help_text='Kind of inventory being represented.',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='constructed_host',
|
||||
field=models.ForeignKey(
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name='constructed_host_summaries',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -16,7 +16,9 @@ from awx.main.models.inventory import ( # noqa
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Inventory,
|
||||
InventoryConstructedInventoryMembership,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
|
||||
@@ -7,6 +7,7 @@ from collections import defaultdict
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import utc, now
|
||||
@@ -536,25 +537,38 @@ class JobEvent(BasePlaybookEvent):
|
||||
return
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric # circular import
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
|
||||
if self.job.inventory.kind == 'constructed':
|
||||
all_hosts = Host.objects.filter(id__in=self.job.inventory.hosts.values_list(Cast('instance_id', output_field=models.IntegerField()))).only(
|
||||
'id', 'name'
|
||||
)
|
||||
constructed_host_map = self.host_map
|
||||
host_map = {host.name: host.id for host in all_hosts}
|
||||
else:
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
constructed_host_map = {}
|
||||
host_map = self.host_map
|
||||
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = self.host_map.get(host, None)
|
||||
host_id = host_map.get(host)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
constructed_host_id = constructed_host_map.get(host)
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
summary = JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats)
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, constructed_host_id=constructed_host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
@@ -575,12 +589,26 @@ class JobEvent(BasePlaybookEvent):
|
||||
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@staticmethod
|
||||
def _update_host_metrics(updated_hosts_list):
|
||||
from awx.main.models import HostMetric # circular import
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
# bulk-update
|
||||
batch_start, batch_size = 0, 1000
|
||||
while batch_start <= len(updated_hosts_list):
|
||||
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
|
||||
HostMetric.objects.filter(hostname__in=batched_host_list).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
)
|
||||
HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
|
||||
batch_start += batch_size
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
|
||||
@@ -9,6 +9,8 @@ import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -17,6 +19,7 @@ from django.db import models, connection
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.db import transaction
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.urls import resolve
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
@@ -32,7 +35,7 @@ from awx.main.fields import (
|
||||
SmartFilterField,
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
@@ -49,15 +52,25 @@ from awx.main.models.notifications import (
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.inventory')
|
||||
|
||||
|
||||
class InventoryConstructedInventoryMembership(models.Model):
|
||||
constructed_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE, related_name='constructed_inventory_memberships')
|
||||
input_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
"""
|
||||
an inventory source contains lists and hosts.
|
||||
@@ -67,6 +80,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
KIND_CHOICES = [
|
||||
('', _('Hosts have a direct link to this inventory.')),
|
||||
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
||||
('constructed', _('Parse list of source inventories with the constructed inventory plugin.')),
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -139,6 +153,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
default=None,
|
||||
help_text=_('Filter that will be applied to the hosts of this inventory.'),
|
||||
)
|
||||
input_inventories = OrderedManyToManyField(
|
||||
'Inventory',
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
related_name='destination_inventories',
|
||||
help_text=_('Only valid for constructed inventories, this links to the inventories that will be used.'),
|
||||
through='InventoryConstructedInventoryMembership',
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
blank=True,
|
||||
@@ -187,6 +209,14 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
if request is not None:
|
||||
# circular import
|
||||
from awx.api.urls.inventory import constructed_inventory_urls
|
||||
|
||||
route = resolve(request.path_info)
|
||||
if any(route.url_name == url.name for url in constructed_inventory_urls):
|
||||
return reverse('api:constructed_inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
@@ -338,13 +368,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
tower_dict = dict(
|
||||
remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id,
|
||||
remote_host_enabled=str(host.enabled).lower(),
|
||||
remote_host_id=host.id,
|
||||
)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
for prefix in ('host', 'tower'):
|
||||
tower_dict = {
|
||||
f'remote_{prefix}_enabled': str(host.enabled).lower(),
|
||||
f'remote_{prefix}_id': host.id,
|
||||
}
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
|
||||
@@ -431,12 +460,24 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
connection.on_commit(on_commit)
|
||||
|
||||
def _enforce_constructed_source(self):
|
||||
"""
|
||||
Constructed inventory should always have exactly 1 inventory source, constructed type
|
||||
this enforces that requirement
|
||||
"""
|
||||
if self.kind == 'constructed':
|
||||
if not self.inventory_sources.exists():
|
||||
self.inventory_sources.create(
|
||||
source='constructed', name=f'Auto-created source for: {self.name}'[:512], overwrite=True, overwrite_vars=True, update_on_launch=True
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Inventory, self).save(*args, **kwargs)
|
||||
if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite':
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields()
|
||||
self._enforce_constructed_source()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -820,9 +861,64 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
|
||||
|
||||
class HostMetric(models.Model):
|
||||
hostname = models.CharField(primary_key=True, max_length=512)
|
||||
hostname = models.CharField(unique=True, max_length=512)
|
||||
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
|
||||
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
|
||||
last_deleted = models.DateTimeField(null=True, db_index=True, help_text=_('When the host was last deleted'))
|
||||
automated_counter = models.BigIntegerField(default=0, help_text=_('How many times was the host automated'))
|
||||
deleted_counter = models.IntegerField(default=0, help_text=_('How many times was the host deleted'))
|
||||
deleted = models.BooleanField(
|
||||
default=False, help_text=_('Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption')
|
||||
)
|
||||
used_in_inventories = models.IntegerField(null=True, help_text=_('How many inventories contain this host'))
|
||||
|
||||
objects = models.Manager()
|
||||
active_objects = HostMetricActiveManager()
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_metric_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def soft_delete(self):
|
||||
if not self.deleted:
|
||||
self.deleted_counter = (self.deleted_counter or 0) + 1
|
||||
self.last_deleted = now()
|
||||
self.deleted = True
|
||||
self.save(update_fields=['deleted', 'deleted_counter', 'last_deleted'])
|
||||
|
||||
def soft_restore(self):
|
||||
if self.deleted:
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
||||
"""
|
||||
|
||||
date = models.DateField(unique=True)
|
||||
license_consumed = models.BigIntegerField(default=0, help_text=_("How many unique hosts are consumed from the license"))
|
||||
license_capacity = models.BigIntegerField(default=0, help_text=_("'License capacity as max. number of unique hosts"))
|
||||
hosts_added = models.IntegerField(default=0, help_text=_("How many hosts were added in the associated month, consuming more license capacity"))
|
||||
hosts_deleted = models.IntegerField(default=0, help_text=_("How many hosts were deleted in the associated month, freeing the license capacity"))
|
||||
indirectly_managed_hosts = models.IntegerField(default=0, help_text=("Manually entered number indirectly managed hosts for a certain month"))
|
||||
|
||||
|
||||
class InventorySourceOptions(BaseModel):
|
||||
@@ -834,6 +930,7 @@ class InventorySourceOptions(BaseModel):
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('file', _('File, Directory or Script')),
|
||||
('constructed', _('Template additional groups and hostvars at runtime')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
@@ -913,7 +1010,7 @@ class InventorySourceOptions(BaseModel):
|
||||
host_filter = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('Regex where only matching hosts will be imported.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.'),
|
||||
)
|
||||
overwrite = models.BooleanField(
|
||||
default=False,
|
||||
@@ -933,6 +1030,21 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default=1,
|
||||
)
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_("Enter host, group or pattern match"),
|
||||
)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
if self.inventory.kind == 'constructed':
|
||||
return get_control_plane_execution_environment()
|
||||
return super().resolve_execution_environment()
|
||||
|
||||
@staticmethod
|
||||
def cloud_credential_validation(source, cred):
|
||||
@@ -1367,8 +1479,8 @@ class PluginFileInjector(object):
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
@@ -1552,5 +1664,18 @@ class insights(PluginFileInjector):
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class constructed(PluginFileInjector):
|
||||
plugin_name = 'constructed'
|
||||
namespace = 'ansible'
|
||||
collection = 'builtin'
|
||||
|
||||
def build_env(self, *args, **kwargs):
|
||||
env = super().build_env(*args, **kwargs)
|
||||
# Enable script inventory plugin so we pick up the script files from source inventories
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
|
||||
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
|
||||
return env
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
InventorySourceOptions.injectors[cls.__name__] = cls
|
||||
|
||||
@@ -2,12 +2,8 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
@@ -15,11 +11,9 @@ from urllib.parse import urljoin
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
from django.db.models.query import QuerySet
|
||||
from django.db.models.functions import Cast
|
||||
|
||||
# from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
|
||||
@@ -28,6 +22,7 @@ from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
@@ -44,7 +39,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -60,8 +55,6 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
@@ -578,12 +571,7 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='jobs',
|
||||
editable=False,
|
||||
through='JobHostSummary',
|
||||
)
|
||||
hosts = models.ManyToManyField('Host', related_name='jobs', editable=False, through='JobHostSummary', through_fields=('job', 'host'))
|
||||
artifacts = JSONBlob(
|
||||
default=dict,
|
||||
blank=True,
|
||||
@@ -848,109 +836,26 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
|
||||
"""Return value is an iterable for the relevant hosts for this job"""
|
||||
if not self.inventory:
|
||||
return []
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
if filters:
|
||||
host_queryset = host_queryset.filter(**filters)
|
||||
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if isinstance(host_queryset, QuerySet):
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
def get_hosts_for_fact_cache(self):
|
||||
"""
|
||||
Builds the queryset to use for writing or finalizing the fact cache
|
||||
these need to be the 'real' hosts associated with the job.
|
||||
For constructed inventories, that means the original (input inventory) hosts
|
||||
when slicing, that means only returning hosts in that slice
|
||||
"""
|
||||
Host = JobHostSummary._meta.get_field('host').related_model
|
||||
if not self.inventory_id:
|
||||
return Host.objects.none()
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
if timeout > 0:
|
||||
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
if self.inventory.kind == 'constructed':
|
||||
id_field = Host._meta.get_field('id')
|
||||
host_qs = Host.objects.filter(id__in=self.inventory.hosts.exclude(instance_id='').values_list(Cast('instance_id', output_field=id_field)))
|
||||
else:
|
||||
hosts = self._get_inventory_hosts()
|
||||
host_qs = self.inventory.hosts
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
host_qs = host_qs.only(*HOST_FACTS_FIELDS)
|
||||
host_qs = self.inventory.get_sliced_hosts(host_qs, self.job_slice_number, self.job_slice_count)
|
||||
return host_qs
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
@@ -1172,6 +1077,15 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False)
|
||||
constructed_host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='constructed_host_summaries',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
)
|
||||
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
|
||||
@@ -284,7 +284,7 @@ class JobNotificationMixin(object):
|
||||
'workflow_url',
|
||||
'scm_branch',
|
||||
'artifacts',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark' 'processed', 'rescued', 'ignored']},
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark', 'processed', 'rescued', 'ignored']},
|
||||
{
|
||||
'summary_fields': [
|
||||
{
|
||||
|
||||
@@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
@@ -1567,7 +1567,7 @@ class UnifiedJob(
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
return self.controller_node or self.execution_node or get_task_queuename()
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.conf import settings
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
117
awx/main/tasks/facts.py
Normal file
117
awx/main/tasks/facts.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import codecs
|
||||
import datetime
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=job_id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
@@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
@@ -37,6 +37,7 @@ from awx.main.constants import (
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
HOST_FACTS_FIELDS,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@@ -63,6 +64,7 @@ from awx.main.tasks.callback import (
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
|
||||
@@ -315,17 +317,22 @@ class BaseTask(object):
|
||||
|
||||
return env
|
||||
|
||||
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
|
||||
script_data = inventory.get_script_data(**script_params)
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
@@ -450,6 +457,9 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
@@ -548,7 +558,8 @@ class BaseTask(object):
|
||||
params['module'] = self.build_module_name(self.instance)
|
||||
params['module_args'] = self.build_module_args(self.instance)
|
||||
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
# TODO: refactor into a better BasTask method
|
||||
if self.should_use_fact_cache():
|
||||
# Enable Ansible fact cache.
|
||||
params['fact_cache_type'] = 'jsonfile'
|
||||
else:
|
||||
@@ -795,7 +806,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@@ -1003,6 +1014,9 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return self.instance.use_fact_cache
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||
return job.playbook
|
||||
|
||||
@@ -1068,8 +1082,11 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
@@ -1083,10 +1100,14 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
return
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
self.facts_write_time,
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
@@ -1100,7 +1121,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@@ -1422,7 +1443,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return params
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@@ -1469,8 +1490,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source == 'scm':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@@ -1523,6 +1542,22 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args = ['ansible-inventory', '--list', '--export']
|
||||
|
||||
# special case for constructed inventories, we pass source inventories from database
|
||||
# these must come in order, and in order _before_ the constructed inventory itself
|
||||
if inventory_update.inventory.kind == 'constructed':
|
||||
inventory_update.log_lifecycle("start_job_fact_cache")
|
||||
for input_inventory in inventory_update.inventory.input_inventories.all():
|
||||
args.append('-i')
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
|
||||
args.append(to_container_path(source_inv_path, private_data_dir))
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
# Add arguments for the source inventory file/script/thing
|
||||
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
||||
container_location = os.path.join(CONTAINER_ROOT, rel_path)
|
||||
@@ -1530,6 +1565,11 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args.append('-i')
|
||||
args.append(container_location)
|
||||
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
|
||||
# limit should be usable in ansible-inventory 2.15+
|
||||
if inventory_update.limit:
|
||||
args.append('--limit')
|
||||
args.append(inventory_update.limit)
|
||||
|
||||
args.append('--output')
|
||||
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
|
||||
@@ -1545,6 +1585,9 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return bool(self.instance.source == 'constructed')
|
||||
|
||||
def build_inventory(self, inventory_update, private_data_dir):
|
||||
return None # what runner expects in order to not deal with inventory
|
||||
|
||||
@@ -1663,7 +1706,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -1816,7 +1859,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
@@ -28,7 +28,7 @@ from awx.main.utils.common import (
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
# Receptorctl
|
||||
@@ -639,7 +639,7 @@ class AWXReceptorJob:
|
||||
#
|
||||
RECEPTOR_CONFIG_STARTER = (
|
||||
{'local-only': None},
|
||||
{'log-level': 'debug'},
|
||||
{'log-level': 'info'},
|
||||
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
|
||||
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
|
||||
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
|
||||
@@ -668,6 +668,7 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
|
||||
'cert': '/etc/receptor/tls/receptor.crt',
|
||||
'key': '/etc/receptor/tls/receptor.key',
|
||||
'mintls13': False,
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -712,7 +713,7 @@ def write_receptor_config():
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@@ -47,10 +47,11 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
@@ -59,7 +60,6 @@ from awx.main.utils.common import (
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
@@ -115,9 +115,6 @@ def dispatch_startup():
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@@ -132,7 +129,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@@ -244,8 +241,10 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
@task(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
@@ -254,9 +253,6 @@ def handle_setting_changes(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@@ -286,7 +282,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -317,7 +313,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@@ -330,7 +326,7 @@ def gather_analytics():
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@@ -378,12 +374,26 @@ def handle_removed_image(remove_images=None):
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
||||
|
||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@@ -402,7 +412,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@@ -496,7 +506,7 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
@@ -586,7 +596,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@@ -622,7 +632,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@@ -642,7 +652,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@@ -708,7 +718,7 @@ def schedule_manager_success_or_error(instance):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -720,7 +730,7 @@ def handle_work_success(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -760,7 +770,7 @@ def handle_work_error(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -801,7 +811,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -817,7 +827,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
@task(queue=get_task_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -882,16 +892,9 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
if model is None:
|
||||
@@ -903,6 +906,28 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, u
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning("Object or user no longer exists.")
|
||||
return
|
||||
|
||||
o2m_to_preserve = {}
|
||||
fields_to_preserve = set(getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []))
|
||||
|
||||
for field in model._meta.get_fields():
|
||||
if field.name in fields_to_preserve:
|
||||
if field.one_to_many:
|
||||
try:
|
||||
field_val = getattr(obj, field.name)
|
||||
except AttributeError:
|
||||
continue
|
||||
o2m_to_preserve[field.name] = field_val
|
||||
|
||||
sub_obj_list = []
|
||||
for o2m in o2m_to_preserve:
|
||||
for sub_obj in o2m_to_preserve[o2m].all():
|
||||
sub_model = type(sub_obj)
|
||||
sub_obj_list.append((sub_model.__module__, sub_model.__name__, sub_obj.pk))
|
||||
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
|
||||
copy_mapping = {}
|
||||
for sub_obj_setup in sub_obj_list:
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
@@ -10,4 +9,4 @@
|
||||
"CONTROLLER_USERNAME": "fooo",
|
||||
"CONTROLLER_OAUTH_TOKEN": "",
|
||||
"CONTROLLER_VERIFY_SSL": "False"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"AWS_SESSION_TOKEN": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"INSIGHTS_USER": "fooo",
|
||||
"INSIGHTS_PASSWORD": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"FOREMAN_PASSWORD": "fooo",
|
||||
"FOREMAN_SERVER": "https://foo.invalid",
|
||||
"FOREMAN_USER": "fooo"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_VALIDATE_CERTS": "False"
|
||||
}
|
||||
}
|
||||
|
||||
86
awx/main/tests/functional/api/test_analytics.py
Normal file
86
awx/main/tests/functional/api/test_analytics.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import pytest
|
||||
import requests
|
||||
from awx.api.views.analytics import AnalyticsGenericView, MissingSettings, AUTOMATION_ANALYTICS_API_URL_PATH
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from awx.main.utils import get_awx_version
|
||||
from django.utils import translation
|
||||
|
||||
|
||||
class TestAnalyticsGenericView:
|
||||
@pytest.mark.parametrize(
|
||||
"existing_headers,expected_headers",
|
||||
[
|
||||
({}, {}),
|
||||
({'Hey': 'There'}, {}), # We don't forward just any headers
|
||||
({'Content-Type': 'text/html', 'Content-Length': '12'}, {'Content-Type': 'text/html', 'Content-Length': '12'}),
|
||||
# Requests will auto-add the following headers (so we don't need to test them): 'Accept-Encoding', 'User-Agent', 'Accept'
|
||||
],
|
||||
)
|
||||
def test__request_headers(self, existing_headers, expected_headers):
|
||||
expected_headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
expected_headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
expected_headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
request = requests.session()
|
||||
request.headers.update(existing_headers)
|
||||
assert set(expected_headers.items()).issubset(set(AnalyticsGenericView._request_headers(request).items()))
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"path,expected_path",
|
||||
[
|
||||
('A/B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/A/B'),
|
||||
('B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/B'),
|
||||
('/a/b/c/analytics/reports/my_slug', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/reports/my_slug'),
|
||||
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
|
||||
('/a/b/c/analytics', f'{AUTOMATION_ANALYTICS_API_URL_PATH}//a/b/c/analytics'), # Because there is no ending / on analytics we get a weird condition
|
||||
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_path(self, path, expected_path):
|
||||
assert AnalyticsGenericView._get_analytics_path(path) == expected_path
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_url_no_url(self):
|
||||
with override_settings(AUTOMATION_ANALYTICS_URL=None):
|
||||
with pytest.raises(MissingSettings):
|
||||
agw = AnalyticsGenericView()
|
||||
agw._get_analytics_url('A')
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"request_path,ending_url",
|
||||
[
|
||||
('A', 'A'),
|
||||
('A/B', 'A/B'),
|
||||
('A/B/analytics/', ''), # we split on analytics but because there is nothing after
|
||||
('A/B/analytics/report', 'report'),
|
||||
('A/B/analytics/report/slug', 'report/slug'),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_analytics_url(self, request_path, ending_url):
|
||||
base_url = 'http://testing'
|
||||
with override_settings(AUTOMATION_ANALYTICS_URL=base_url):
|
||||
agw = AnalyticsGenericView()
|
||||
assert agw._get_analytics_url(request_path) == f'{base_url}{AUTOMATION_ANALYTICS_API_URL_PATH}/{ending_url}'
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"setting_name,setting_value,raises",
|
||||
[
|
||||
('INSIGHTS_TRACKING_STATE', None, True),
|
||||
('INSIGHTS_TRACKING_STATE', False, True),
|
||||
('INSIGHTS_TRACKING_STATE', True, False),
|
||||
('INSIGHTS_TRACKING_STATE', 'Steve', False),
|
||||
('INSIGHTS_TRACKING_STATE', 1, False),
|
||||
('INSIGHTS_TRACKING_STATE', '', True),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__get_setting(self, setting_name, setting_value, raises):
|
||||
with override_settings(**{setting_name: setting_value}):
|
||||
if raises:
|
||||
with pytest.raises(MissingSettings):
|
||||
AnalyticsGenericView._get_setting(setting_name, False, None)
|
||||
else:
|
||||
assert AnalyticsGenericView._get_setting(setting_name, False, None) == setting_value
|
||||
23
awx/main/tests/functional/api/test_application_name.py
Normal file
23
awx/main/tests/functional/api/test_application_name.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import pytest
|
||||
from awx.settings.application_name import get_service_name, set_application_name
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'argv,result',
|
||||
(
|
||||
([], None),
|
||||
(['-m'], None),
|
||||
(['-m', 'python'], None),
|
||||
(['-m', 'python', 'manage'], None),
|
||||
(['-m', 'python', 'manage', 'a'], 'a'),
|
||||
(['-m', 'python', 'manage', 'b', 'a'], 'b'),
|
||||
(['-m', 'python', 'manage', 'run_something', 'b', 'a'], 'something'),
|
||||
),
|
||||
)
|
||||
def test_get_service_name(argv, result):
|
||||
assert get_service_name(argv) == result
|
||||
|
||||
|
||||
@pytest.mark.parametrize('DATABASES,CLUSTER_ID,function', (({}, 12, ''), ({'default': {'ENGINE': 'sqllite3'}}, 12, '')))
|
||||
def test_set_application_name(DATABASES, CLUSTER_ID, function):
|
||||
set_application_name(DATABASES, CLUSTER_ID, function)
|
||||
@@ -594,3 +594,108 @@ class TestControlledBySCM:
|
||||
rando,
|
||||
expect=403,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestConstructedInventory:
|
||||
@pytest.fixture
|
||||
def constructed_inventory(self, organization):
|
||||
return Inventory.objects.create(name='constructed-test-inventory', kind='constructed', organization=organization)
|
||||
|
||||
def test_get_constructed_inventory(self, constructed_inventory, admin_user, get):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
inv_src.update_cache_timeout = 53
|
||||
inv_src.save(update_fields=['update_cache_timeout'])
|
||||
r = get(url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}), user=admin_user, expect=200)
|
||||
assert r.data['update_cache_timeout'] == 53
|
||||
|
||||
def test_patch_constructed_inventory(self, constructed_inventory, admin_user, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 0
|
||||
assert inv_src.limit == ''
|
||||
r = patch(
|
||||
url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}),
|
||||
data=dict(update_cache_timeout=54, limit='foobar'),
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
assert r.data['update_cache_timeout'] == 54
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 54
|
||||
assert inv_src.limit == 'foobar'
|
||||
|
||||
def test_patch_constructed_inventory_generated_source_limits_editable_fields(self, constructed_inventory, admin_user, project, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
r = patch(
|
||||
url=inv_src.get_absolute_url(),
|
||||
data={
|
||||
'source': 'scm',
|
||||
'source_project': project.pk,
|
||||
'source_path': '',
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
},
|
||||
expect=400,
|
||||
user=admin_user,
|
||||
)
|
||||
assert str(r.data['error'][0]) == "Cannot change field 'source' on a constructed inventory source."
|
||||
|
||||
# Make sure it didn't get updated before we got the error
|
||||
inv_src_after_err = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.id == inv_src_after_err.id
|
||||
assert inv_src.source == inv_src_after_err.source
|
||||
assert inv_src.source_project == inv_src_after_err.source_project
|
||||
assert inv_src.source_path == inv_src_after_err.source_path
|
||||
assert inv_src.source_vars == inv_src_after_err.source_vars
|
||||
|
||||
def test_patch_constructed_inventory_generated_source_allows_source_vars_edit(self, constructed_inventory, admin_user, patch):
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
patch(
|
||||
url=inv_src.get_absolute_url(),
|
||||
data={
|
||||
'source_vars': 'plugin: a.b.c',
|
||||
},
|
||||
expect=200,
|
||||
user=admin_user,
|
||||
)
|
||||
|
||||
inv_src_after_patch = constructed_inventory.inventory_sources.first()
|
||||
|
||||
# sanity checks
|
||||
assert inv_src.id == inv_src_after_patch.id
|
||||
assert inv_src.source == 'constructed'
|
||||
assert inv_src_after_patch.source == 'constructed'
|
||||
assert inv_src.source_vars == ''
|
||||
|
||||
assert inv_src_after_patch.source_vars == 'plugin: a.b.c'
|
||||
|
||||
def test_create_constructed_inventory(self, constructed_inventory, admin_user, post, organization):
|
||||
r = post(
|
||||
url=reverse('api:constructed_inventory_list'),
|
||||
data=dict(name='constructed-inventory-just-created', kind='constructed', organization=organization.id, update_cache_timeout=55, limit='foobar'),
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
)
|
||||
pk = r.data['id']
|
||||
constructed_inventory = Inventory.objects.get(pk=pk)
|
||||
inv_src = constructed_inventory.inventory_sources.first()
|
||||
assert inv_src.update_cache_timeout == 55
|
||||
assert inv_src.limit == 'foobar'
|
||||
|
||||
def test_get_absolute_url_for_constructed_inventory(self, constructed_inventory, admin_user, get):
|
||||
"""
|
||||
If we are using the normal inventory API endpoint to look at a
|
||||
constructed inventory, then we should get a normal inventory API route
|
||||
back. If we are accessing it via the special constructed inventory
|
||||
endpoint, then we should get that back.
|
||||
"""
|
||||
|
||||
url_const = reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk})
|
||||
url_inv = reverse('api:inventory_detail', kwargs={'pk': constructed_inventory.pk})
|
||||
|
||||
const_r = get(url=url_const, user=admin_user, expect=200)
|
||||
inv_r = get(url=url_inv, user=admin_user, expect=200)
|
||||
assert const_r.data['url'] == url_const
|
||||
assert inv_r.data['url'] == url_inv
|
||||
assert inv_r.data['url'] != const_r.data['url']
|
||||
assert inv_r.data['related']['constructed_url'] == url_const
|
||||
assert const_r.data['related']['constructed_url'] == url_const
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
# AWX
|
||||
from awx.api.serializers import JobTemplateSerializer
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project
|
||||
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project, Inventory
|
||||
from awx.main.migrations import _save_password_keys as save_password_keys
|
||||
|
||||
# Django
|
||||
@@ -353,3 +353,19 @@ def test_job_template_branch_prompt_error(project, inventory, post, admin_user):
|
||||
expect=400,
|
||||
)
|
||||
assert 'Project does not allow overriding branch' in str(r.data['ask_scm_branch_on_launch'])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_template_missing_inventory(project, inventory, admin_user, post):
|
||||
jt = JobTemplate.objects.create(
|
||||
name='test-jt', inventory=inventory, ask_inventory_on_launch=True, project=project, playbook='helloworld.yml', host_config_key='abcd'
|
||||
)
|
||||
Inventory.objects.get(pk=inventory.pk).delete()
|
||||
r = post(
|
||||
url=reverse('api:job_template_callback', kwargs={'pk': jt.pk}),
|
||||
data={'host_config_key': 'abcd'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert r.status_code == 400
|
||||
assert "Cannot start automatically, an inventory is required." in str(r.data)
|
||||
|
||||
@@ -153,3 +153,13 @@ def test_post_org_approval_notification(get, post, admin, notification_template,
|
||||
response = get(url, admin)
|
||||
assert response.status_code == 200
|
||||
assert len(response.data['results']) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_post_wfj_notification(get, post, admin, workflow_job, notification):
|
||||
workflow_job.notifications.add(notification)
|
||||
workflow_job.save()
|
||||
url = reverse("api:workflow_job_notifications_list", kwargs={'pk': workflow_job.pk})
|
||||
response = get(url, admin)
|
||||
assert response.status_code == 200
|
||||
assert len(response.data['results']) == 1
|
||||
|
||||
@@ -329,3 +329,21 @@ def test_galaxy_credential_association(alice, admin, organization, post, get):
|
||||
'Public Galaxy 4',
|
||||
'Public Galaxy 5',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_admin_credential_count(org_admin, admin, organization, post, get):
|
||||
galaxy = CredentialType.defaults['galaxy_api_token']()
|
||||
galaxy.save()
|
||||
|
||||
for i in range(3):
|
||||
cred = Credential.objects.create(credential_type=galaxy, name=f'test_{i}', inputs={'url': 'https://galaxy.ansible.com/'})
|
||||
url = reverse('api:organization_galaxy_credentials_list', kwargs={'pk': organization.pk})
|
||||
post(url, {'associate': True, 'id': cred.pk}, user=admin, expect=204)
|
||||
# org admin should see all associated galaxy credentials
|
||||
resp = get(url, user=org_admin)
|
||||
assert resp.data['count'] == 3
|
||||
# removing one to validate new count
|
||||
post(url, {'disassociate': True, 'id': Credential.objects.get(name='test_1').pk}, user=admin, expect=204)
|
||||
resp_new = get(url, user=org_admin)
|
||||
assert resp_new.data['count'] == 2
|
||||
|
||||
75
awx/main/tests/functional/api/test_serializers.py
Normal file
75
awx/main/tests/functional/api/test_serializers.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import pytest
|
||||
|
||||
from django.test.utils import override_settings
|
||||
|
||||
from rest_framework.serializers import ValidationError
|
||||
|
||||
from awx.api.serializers import UserSerializer
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"password,min_length,min_digits,min_upper,min_special,expect_error",
|
||||
[
|
||||
# Test length
|
||||
("a", 1, 0, 0, 0, False),
|
||||
("a", 2, 0, 0, 0, True),
|
||||
("aa", 2, 0, 0, 0, False),
|
||||
("aaabcDEF123$%^", 2, 0, 0, 0, False),
|
||||
# Test digits
|
||||
("a", 0, 1, 0, 0, True),
|
||||
("1", 0, 1, 0, 0, False),
|
||||
("1", 0, 2, 0, 0, True),
|
||||
("12", 0, 2, 0, 0, False),
|
||||
("12abcDEF123$%^", 0, 2, 0, 0, False),
|
||||
# Test upper
|
||||
("a", 0, 0, 1, 0, True),
|
||||
("A", 0, 0, 1, 0, False),
|
||||
("A", 0, 0, 2, 0, True),
|
||||
("AB", 0, 0, 2, 0, False),
|
||||
("ABabcDEF123$%^", 0, 0, 2, 0, False),
|
||||
# Test special
|
||||
("a", 0, 0, 0, 1, True),
|
||||
("!", 0, 0, 0, 1, False),
|
||||
("!", 0, 0, 0, 2, True),
|
||||
("!@", 0, 0, 0, 2, False),
|
||||
("!@abcDEF123$%^", 0, 0, 0, 2, False),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_validate_password_rules(password, min_length, min_digits, min_upper, min_special, expect_error):
|
||||
user_serializer = UserSerializer()
|
||||
|
||||
# First test password with no params, this should always pass
|
||||
try:
|
||||
user_serializer.validate_password(password)
|
||||
except ValidationError:
|
||||
assert False, f"Password {password} should not have validation issue if no params are used"
|
||||
|
||||
with override_settings(
|
||||
LOCAL_PASSWORD_MIN_LENGTH=min_length, LOCAL_PASSWORD_MIN_DIGITS=min_digits, LOCAL_PASSWORD_MIN_UPPER=min_upper, LOCAL_PASSWORD_MIN_SPECIAL=min_special
|
||||
):
|
||||
if expect_error:
|
||||
with pytest.raises(ValidationError):
|
||||
user_serializer.validate_password(password)
|
||||
else:
|
||||
try:
|
||||
user_serializer.validate_password(password)
|
||||
except ValidationError:
|
||||
assert False, "validate_password raised an unexpected exception"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_validate_password_too_long():
|
||||
password_max_length = User._meta.get_field('password').max_length
|
||||
password = "x" * password_max_length
|
||||
|
||||
user_serializer = UserSerializer()
|
||||
try:
|
||||
user_serializer.validate_password(password)
|
||||
except ValidationError:
|
||||
assert False, f"Password {password} should not have validation"
|
||||
|
||||
password = f"{password}x"
|
||||
with pytest.raises(ValidationError):
|
||||
user_serializer.validate_password(password)
|
||||
54
awx/main/tests/functional/api/test_workflow_job.py
Normal file
54
awx/main/tests/functional/api/test_workflow_job.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"is_admin, status",
|
||||
[
|
||||
[True, 201],
|
||||
[False, 403],
|
||||
], # if they're a WFJ admin, they get a 201 # if they're not a WFJ *nor* org admin, they get a 403
|
||||
)
|
||||
def test_workflow_job_relaunch(workflow_job, post, admin_user, alice, is_admin, status):
|
||||
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
|
||||
if is_admin:
|
||||
post(url, user=admin_user, expect=status)
|
||||
else:
|
||||
post(url, user=alice, expect=status)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_relaunch_failure(workflow_job, post, admin_user):
|
||||
workflow_job.is_sliced_job = True
|
||||
workflow_job.job_template = None
|
||||
workflow_job.save()
|
||||
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
|
||||
post(url, user=admin_user, expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_relaunch_not_inventory_failure(workflow_job, post, admin_user):
|
||||
workflow_job.is_sliced_job = True
|
||||
workflow_job.inventory = None
|
||||
workflow_job.save()
|
||||
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
|
||||
post(url, user=admin_user, expect=400)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"is_admin, status",
|
||||
[
|
||||
[True, 202],
|
||||
[False, 403],
|
||||
], # if they're a WFJ admin, they get a 202 # if they're not a WFJ *nor* org admin, they get a 403
|
||||
)
|
||||
def test_workflow_job_cancel(workflow_job, post, admin_user, alice, is_admin, status):
|
||||
url = reverse("api:workflow_job_cancel", kwargs={'pk': workflow_job.pk})
|
||||
if is_admin:
|
||||
post(url, user=admin_user, expect=status)
|
||||
else:
|
||||
post(url, user=alice, expect=status)
|
||||
@@ -511,6 +511,14 @@ def group(inventory):
|
||||
return inventory.groups.create(name='single-group')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def constructed_inventory(organization):
|
||||
"""
|
||||
creates a new constructed inventory source
|
||||
"""
|
||||
return Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_source(inventory):
|
||||
# by making it ec2, the credential is not required
|
||||
@@ -735,6 +743,30 @@ def system_job_factory(system_job_template, admin):
|
||||
return factory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt(workflow_job_template_factory, organization):
|
||||
objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True)
|
||||
return objects.workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt_with_nodes(workflow_job_template_factory, organization, job_template):
|
||||
objects = workflow_job_template_factory(
|
||||
'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True
|
||||
)
|
||||
return objects.workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt_node(wfjt_with_nodes):
|
||||
return wfjt_with_nodes.workflow_job_template_nodes.all()[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job(wfjt):
|
||||
return wfjt.workflow_jobs.create(name='test_workflow')
|
||||
|
||||
|
||||
def dumps(value):
|
||||
return DjangoJSONEncoder().encode(value)
|
||||
|
||||
|
||||
@@ -3,178 +3,209 @@ import pytest
|
||||
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary
|
||||
from django.db.models import Q
|
||||
|
||||
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary, HostMetric
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_changed(emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
class TestEvents:
|
||||
def setup_method(self):
|
||||
self.hostnames = []
|
||||
self.host_map = dict()
|
||||
self.inventory = None
|
||||
self.job = None
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
|
||||
# the `playbook_on_stats` event is where we update the parent changed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.changed is True
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_changed(self, emit):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.changed is False
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
|
||||
# the `playbook_on_stats` event is where we update the parent changed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.changed is True
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_failed(emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
|
||||
@mock.patch('awx.main.models.events.emit_event_detail')
|
||||
def test_parent_failed(self, emit, event):
|
||||
j = Job()
|
||||
j.save()
|
||||
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
|
||||
assert JobEvent.objects.count() == 1
|
||||
for e in JobEvent.objects.all():
|
||||
assert e.failed is False
|
||||
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save()
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save()
|
||||
|
||||
# the `playbook_on_stats` event is where we update the parent failed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
# the `playbook_on_stats` event is where we update the parent failed linkage
|
||||
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
|
||||
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
|
||||
assert events.count() == 2
|
||||
for e in events.all():
|
||||
assert e.failed is True
|
||||
|
||||
def test_host_summary_generation(self):
|
||||
self._generate_hosts(100)
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation():
|
||||
hostnames = [f'Host {i}' for i in range(100)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
assert self.job.job_host_summaries.count() == len(self.hostnames)
|
||||
assert sorted([s.host_name for s in self.job.job_host_summaries.all()]) == sorted(self.hostnames)
|
||||
|
||||
for s in self.job.job_host_summaries.all():
|
||||
assert self.host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == self.job.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
def test_host_summary_generation_with_deleted_hosts(self):
|
||||
self._generate_hosts(10)
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in self.inventory.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in self.job.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in self.job.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
def test_host_summary_generation_with_limit(self):
|
||||
# Make an inventory with 10 hosts, run a playbook with a --limit
|
||||
# pointed at *one* host,
|
||||
# Verify that *only* that host has an associated JobHostSummary and that
|
||||
# *only* that host has an updated value for .last_job.
|
||||
self._generate_hosts(10)
|
||||
|
||||
# by making the playbook_on_stats *only* include Host 1, we're emulating
|
||||
# the behavior of a `--limit=Host 1`
|
||||
matching_host = Host.objects.get(name='Host 1')
|
||||
self._create_job_event(ok={matching_host.name: len(matching_host.name)}) # effectively, limit=Host 1
|
||||
|
||||
# since the playbook_on_stats only references one host,
|
||||
# there should *only* be on JobHostSummary record (and it should
|
||||
# be related to the appropriate Host)
|
||||
assert JobHostSummary.objects.count() == 1
|
||||
for h in Host.objects.all():
|
||||
if h.name == 'Host 1':
|
||||
assert h.last_job_id == self.job.id
|
||||
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
|
||||
else:
|
||||
# all other hosts in the inventory should remain untouched
|
||||
assert h.last_job_id is None
|
||||
assert h.last_job_host_summary_id is None
|
||||
|
||||
def test_host_metrics_insert(self):
|
||||
self._generate_hosts(10)
|
||||
|
||||
self._create_job_event(
|
||||
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[0:3]),
|
||||
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[3:6]),
|
||||
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[6:9]),
|
||||
skipped=dict((hostname, len(hostname)) for hostname in [self.hostnames[9]]),
|
||||
)
|
||||
|
||||
metrics = HostMetric.objects.all()
|
||||
assert len(metrics) == 10
|
||||
for hm in metrics:
|
||||
assert hm.automated_counter == 1
|
||||
assert hm.last_automation is not None
|
||||
assert hm.deleted is False
|
||||
|
||||
def test_host_metrics_update(self):
|
||||
self._generate_hosts(12)
|
||||
|
||||
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
|
||||
|
||||
# Soft delete 6 host metrics
|
||||
for hm in HostMetric.objects.filter(id__in=[1, 3, 5, 7, 9, 11]):
|
||||
hm.soft_delete()
|
||||
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
# hostnames in 'ignored' and 'rescued' stats are ignored
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
self._create_job_event(
|
||||
ignored=dict((hostname, len(hostname)) for hostname in self.hostnames[0:6]),
|
||||
rescued=dict((hostname, len(hostname)) for hostname in self.hostnames[6:11]),
|
||||
)
|
||||
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
# hostnames in 'changed', 'dark', 'failures', 'ok', 'processed', 'skipped' are processed
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
self._create_job_event(
|
||||
changed=dict((hostname, len(hostname)) for hostname in self.hostnames[0:2]),
|
||||
dark=dict((hostname, len(hostname)) for hostname in self.hostnames[2:4]),
|
||||
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[4:6]),
|
||||
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[6:8]),
|
||||
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[8:10]),
|
||||
skipped=dict((hostname, len(hostname)) for hostname in self.hostnames[10:12]),
|
||||
)
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
|
||||
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
|
||||
|
||||
def _generate_hosts(self, cnt, id_from=0):
|
||||
self.hostnames = [f'Host {i}' for i in range(id_from, id_from + cnt)]
|
||||
self.inventory = Inventory()
|
||||
self.inventory.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=self.inventory.id) for h in self.hostnames])
|
||||
self.job = Job(inventory=self.inventory)
|
||||
self.job.save()
|
||||
|
||||
# host map is a data structure that tracks a mapping of host name --> ID
|
||||
# for the inventory, _regardless_ of whether or not there's a limit
|
||||
# applied to the actual playbook run
|
||||
self.host_map = dict((host.name, host.id) for host in self.inventory.hosts.all())
|
||||
|
||||
def _create_job_event(
|
||||
self,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
assert j.job_host_summaries.count() == len(hostnames)
|
||||
assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames)
|
||||
|
||||
for s in j.job_host_summaries.all():
|
||||
assert host_map[s.host_name] == s.host_id
|
||||
assert s.ok == len(s.host_name)
|
||||
assert s.changed == 0
|
||||
assert s.dark == 0
|
||||
assert s.failures == 0
|
||||
assert s.ignored == 0
|
||||
assert s.processed == 0
|
||||
assert s.rescued == 0
|
||||
assert s.skipped == 0
|
||||
|
||||
for host in Host.objects.all():
|
||||
assert host.last_job_id == j.id
|
||||
assert host.last_job_host_summary.host == host
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_deleted_hosts():
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# delete half of the hosts during the playbook run
|
||||
for h in inv.hosts.all()[:5]:
|
||||
h.delete()
|
||||
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
|
||||
names = sorted([s.host_name for s in j.job_host_summaries.all()])
|
||||
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
|
||||
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_host_summary_generation_with_limit():
|
||||
# Make an inventory with 10 hosts, run a playbook with a --limit
|
||||
# pointed at *one* host,
|
||||
# Verify that *only* that host has an associated JobHostSummary and that
|
||||
# *only* that host has an updated value for .last_job.
|
||||
hostnames = [f'Host {i}' for i in range(10)]
|
||||
inv = Inventory()
|
||||
inv.save()
|
||||
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
|
||||
j = Job(inventory=inv)
|
||||
j.save()
|
||||
|
||||
# host map is a data structure that tracks a mapping of host name --> ID
|
||||
# for the inventory, _regardless_ of whether or not there's a limit
|
||||
# applied to the actual playbook run
|
||||
host_map = dict((host.name, host.id) for host in inv.hosts.all())
|
||||
|
||||
# by making the playbook_on_stats *only* include Host 1, we're emulating
|
||||
# the behavior of a `--limit=Host 1`
|
||||
matching_host = Host.objects.get(name='Host 1')
|
||||
JobEvent.create_from_data(
|
||||
job_id=j.pk,
|
||||
parent_uuid='abc123',
|
||||
event='playbook_on_stats',
|
||||
event_data={
|
||||
'ok': {matching_host.name: len(matching_host.name)}, # effectively, limit=Host 1
|
||||
'changed': {},
|
||||
'dark': {},
|
||||
'failures': {},
|
||||
'ignored': {},
|
||||
'processed': {},
|
||||
'rescued': {},
|
||||
'skipped': {},
|
||||
},
|
||||
host_map=host_map,
|
||||
).save()
|
||||
|
||||
# since the playbook_on_stats only references one host,
|
||||
# there should *only* be on JobHostSummary record (and it should
|
||||
# be related to the appropriate Host)
|
||||
assert JobHostSummary.objects.count() == 1
|
||||
for h in Host.objects.all():
|
||||
if h.name == 'Host 1':
|
||||
assert h.last_job_id == j.id
|
||||
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
|
||||
else:
|
||||
# all other hosts in the inventory should remain untouched
|
||||
assert h.last_job_id is None
|
||||
assert h.last_job_host_summary_id is None
|
||||
ok=None,
|
||||
changed=None,
|
||||
dark=None,
|
||||
failures=None,
|
||||
ignored=None,
|
||||
processed=None,
|
||||
rescued=None,
|
||||
skipped=None,
|
||||
):
|
||||
JobEvent.create_from_data(
|
||||
job_id=self.job.pk,
|
||||
parent_uuid=parent_uuid,
|
||||
event=event,
|
||||
event_data={
|
||||
'ok': ok or {},
|
||||
'changed': changed or {},
|
||||
'dark': dark or {},
|
||||
'failures': failures or {},
|
||||
'ignored': ignored or {},
|
||||
'processed': processed or {},
|
||||
'rescued': rescued or {},
|
||||
'skipped': skipped or {},
|
||||
},
|
||||
host_map=self.host_map,
|
||||
).save()
|
||||
|
||||
@@ -20,3 +20,53 @@ def test_host_metrics_generation():
|
||||
date_today = now().strftime('%Y-%m-%d')
|
||||
result = HostMetric.objects.filter(first_automation__startswith=date_today).count()
|
||||
assert result == len(hostnames)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_soft_delete():
|
||||
hostnames = [f'Host to delete {i}' for i in range(2)]
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create([HostMetric(hostname=h, last_automation=current_time, automated_counter=42) for h in hostnames])
|
||||
|
||||
hm = HostMetric.objects.get(hostname="Host to delete 0")
|
||||
assert hm.last_deleted is None
|
||||
|
||||
last_deleted = None
|
||||
for _ in range(3):
|
||||
# soft delete 1st
|
||||
# 2nd/3rd delete don't have an effect
|
||||
hm.soft_delete()
|
||||
if last_deleted is None:
|
||||
last_deleted = hm.last_deleted
|
||||
|
||||
assert hm.deleted is True
|
||||
assert hm.deleted_counter == 1
|
||||
assert hm.last_deleted == last_deleted
|
||||
assert hm.automated_counter == 42
|
||||
|
||||
# 2nd record is not touched
|
||||
hm = HostMetric.objects.get(hostname="Host to delete 1")
|
||||
assert hm.deleted is False
|
||||
assert hm.deleted_counter == 0
|
||||
assert hm.last_deleted is None
|
||||
assert hm.automated_counter == 42
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_soft_restore():
|
||||
current_time = now()
|
||||
HostMetric.objects.create(hostname="Host 1", last_automation=current_time, deleted=True)
|
||||
HostMetric.objects.create(hostname="Host 2", last_automation=current_time, deleted=True, last_deleted=current_time)
|
||||
HostMetric.objects.create(hostname="Host 3", last_automation=current_time, deleted=False, last_deleted=current_time)
|
||||
HostMetric.objects.all().update(automated_counter=42, deleted_counter=10)
|
||||
|
||||
# 1. deleted, last_deleted not null
|
||||
for hm in HostMetric.objects.all():
|
||||
for _ in range(3):
|
||||
hm.soft_restore()
|
||||
assert hm.deleted is False
|
||||
assert hm.automated_counter == 42 and hm.deleted_counter == 10
|
||||
if hm.hostname == "Host 1":
|
||||
assert hm.last_deleted is None
|
||||
else:
|
||||
assert hm.last_deleted == current_time
|
||||
|
||||
@@ -169,7 +169,8 @@ class TestInventorySourceInjectors:
|
||||
CLOUD_PROVIDERS constant contains the same names as what are
|
||||
defined within the injectors
|
||||
"""
|
||||
assert set(CLOUD_PROVIDERS) == set(InventorySource.injectors.keys())
|
||||
# slight exception case for constructed, because it has a FQCN but is not a cloud source
|
||||
assert set(CLOUD_PROVIDERS) | set(['constructed']) == set(InventorySource.injectors.keys())
|
||||
|
||||
@pytest.mark.parametrize('source,filename', [('ec2', 'aws_ec2.yml'), ('openstack', 'openstack.yml'), ('gce', 'gcp_compute.yml')])
|
||||
def test_plugin_filenames(self, source, filename):
|
||||
|
||||
@@ -123,6 +123,24 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
|
||||
assert set(group_2_2_copy.hosts.all()) == set()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"is_admin, can_copy, status",
|
||||
[
|
||||
[True, True, 200],
|
||||
[False, False, 200],
|
||||
],
|
||||
)
|
||||
def test_workflow_job_template_copy_access(get, admin_user, alice, workflow_job_template, is_admin, can_copy, status):
|
||||
url = reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk})
|
||||
if is_admin:
|
||||
response = get(url, user=admin_user, expect=status)
|
||||
else:
|
||||
workflow_job_template.organization.auditor_role.members.add(alice)
|
||||
response = get(url, user=alice, expect=status)
|
||||
assert response.data['can_copy'] == can_copy
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import InstanceGroup
|
||||
from awx.main.models import InstanceGroup, Inventory
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
@@ -38,6 +38,16 @@ def test_instance_group_ordering(source_model):
|
||||
assert source_model.instance_groups.through.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
|
||||
def test_instance_group_bulk_add(source_model):
|
||||
groups = [InstanceGroup.objects.create(name='host-%d' % i) for i in range(5)]
|
||||
groups.reverse()
|
||||
with pytest.raises(RuntimeError) as err:
|
||||
source_model.instance_groups.add(*groups)
|
||||
assert 'Ordered many-to-many fields do not support multiple objects' in str(err)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
|
||||
def test_instance_group_middle_deletion(source_model):
|
||||
@@ -66,3 +76,33 @@ def test_explicit_ordering(source_model):
|
||||
|
||||
assert [g.name for g in source_model.instance_groups.all()] == ['host-4', 'host-3', 'host-2', 'host-1', 'host-0']
|
||||
assert [g.name for g in source_model.instance_groups.order_by('name').all()] == ['host-0', 'host-1', 'host-2', 'host-3', 'host-4']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_input_inventories_ordering():
|
||||
constructed_inventory = Inventory.objects.create(name='my_constructed', kind='constructed')
|
||||
input_inventories = [Inventory.objects.create(name='inv-%d' % i) for i in range(5)]
|
||||
input_inventories.reverse()
|
||||
for inv in input_inventories:
|
||||
constructed_inventory.input_inventories.add(inv)
|
||||
|
||||
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-4', 'inv-3', 'inv-2', 'inv-1', 'inv-0']
|
||||
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
|
||||
(0, 'inv-4'),
|
||||
(1, 'inv-3'),
|
||||
(2, 'inv-2'),
|
||||
(3, 'inv-1'),
|
||||
(4, 'inv-0'),
|
||||
]
|
||||
|
||||
constructed_inventory.input_inventories.remove(input_inventories[0])
|
||||
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-3', 'inv-2', 'inv-1', 'inv-0']
|
||||
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
|
||||
(0, 'inv-3'),
|
||||
(1, 'inv-2'),
|
||||
(2, 'inv-1'),
|
||||
(3, 'inv-0'),
|
||||
]
|
||||
|
||||
constructed_inventory.input_inventories.clear()
|
||||
assert constructed_inventory.input_inventories.through.objects.count() == 0
|
||||
|
||||
@@ -94,7 +94,8 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
|
||||
|
||||
ig_all = instance_group_factory("all", instances=[i1, i2, i3])
|
||||
ig_dup = instance_group_factory("duplicates", instances=[i1])
|
||||
project.organization.instance_groups.add(ig_all, ig_dup)
|
||||
project.organization.instance_groups.add(ig_all)
|
||||
project.organization.instance_groups.add(ig_dup)
|
||||
actual_num_instances = Instance.objects.count()
|
||||
list_response = get(reverse('api:instance_list'), user=system_auditor)
|
||||
api_num_instances_auditor = list(list_response.data.items())[0][1]
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
import pytest
|
||||
from awx.main.models import Inventory
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_constructed_inventory_post(post, admin_user, organization):
|
||||
inv1 = Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
|
||||
inv2 = Inventory.objects.create(name='dummy2', kind='constructed', organization=organization)
|
||||
resp = post(
|
||||
url=reverse('api:inventory_input_inventories', kwargs={'pk': inv1.pk}),
|
||||
data={'id': inv2.pk},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_source(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
url=reverse('api:inventory_inventory_sources_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'dummy1', 'source': 'constructed'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_host(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
url=reverse('api:inventory_hosts_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'dummy1'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_add_constructed_inventory_group(post, admin_user, constructed_inventory):
|
||||
resp = post(
|
||||
reverse('api:inventory_groups_list', kwargs={'pk': constructed_inventory.pk}),
|
||||
data={'name': 'group-test'},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_edit_constructed_inventory_source(patch, admin_user, inventory_source_factory):
|
||||
inv_src = inventory_source_factory(name='dummy1', source='constructed')
|
||||
resp = patch(
|
||||
reverse('api:inventory_source_detail', kwargs={'pk': inv_src.pk}),
|
||||
data={'description': inv_src.name},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert resp.status_code == 400
|
||||
@@ -26,12 +26,12 @@ def test_python_and_js_licenses():
|
||||
return (is_gpl, is_lgpl)
|
||||
|
||||
def find_embedded_source_version(path, name):
|
||||
for entry in os.listdir(path):
|
||||
# Check variations of '-' and '_' in filenames due to python
|
||||
for fname in [name, name.replace('-', '_')]:
|
||||
if entry.startswith(fname) and entry.endswith('.tar.gz'):
|
||||
v = entry.split(name + '-')[1].split('.tar.gz')[0]
|
||||
return v
|
||||
files = os.listdir(path)
|
||||
tgz_files = [f for f in files if f.endswith('.tar.gz')]
|
||||
for tgz in tgz_files:
|
||||
pkg_name = tgz.split('-')[0].split('_')[0]
|
||||
if pkg_name == name:
|
||||
return tgz.split('-')[1].split('.tar.gz')[0]
|
||||
return None
|
||||
|
||||
list = {}
|
||||
|
||||
@@ -218,3 +218,31 @@ def test_webhook_notification_pointed_to_a_redirect_launch_endpoint(post, admin,
|
||||
)
|
||||
|
||||
assert n1.send("", n1.messages.get("success").get("body")) == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_update_notification_template(admin, notification_template):
|
||||
notification_template.messages['workflow_approval'] = {
|
||||
"running": {
|
||||
"message": None,
|
||||
"body": None,
|
||||
}
|
||||
}
|
||||
notification_template.save()
|
||||
|
||||
workflow_approval_message = {
|
||||
"approved": {
|
||||
"message": None,
|
||||
"body": None,
|
||||
},
|
||||
"running": {
|
||||
"message": "test-message",
|
||||
"body": None,
|
||||
},
|
||||
}
|
||||
notification_template.messages['workflow_approval'] = workflow_approval_message
|
||||
notification_template.save()
|
||||
|
||||
subevents = sorted(notification_template.messages["workflow_approval"].keys())
|
||||
assert subevents == ["approved", "running"]
|
||||
assert notification_template.messages['workflow_approval'] == workflow_approval_message
|
||||
|
||||
@@ -13,30 +13,6 @@ from rest_framework.exceptions import PermissionDenied
|
||||
from awx.main.models import InventorySource, JobLaunchConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt(workflow_job_template_factory, organization):
|
||||
objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True)
|
||||
return objects.workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt_with_nodes(workflow_job_template_factory, organization, job_template):
|
||||
objects = workflow_job_template_factory(
|
||||
'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True
|
||||
)
|
||||
return objects.workflow_job_template
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wfjt_node(wfjt_with_nodes):
|
||||
return wfjt_with_nodes.workflow_job_template_nodes.all()[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow_job(wfjt):
|
||||
return wfjt.workflow_jobs.create(name='test_workflow')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestWorkflowJobTemplateAccess:
|
||||
def test_random_user_no_edit(self, wfjt, rando):
|
||||
|
||||
28
awx/main/tests/settings_for_test.py
Normal file
28
awx/main/tests/settings_for_test.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# Python
|
||||
from unittest import mock
|
||||
import uuid
|
||||
|
||||
# patch python-ldap
|
||||
with mock.patch('__main__.__builtins__.dir', return_value=[]):
|
||||
import ldap # NOQA
|
||||
|
||||
# Load development settings for base variables.
|
||||
from awx.settings.development import * # NOQA
|
||||
|
||||
# Some things make decisions based on settings.SETTINGS_MODULE, so this is done for that
|
||||
SETTINGS_MODULE = 'awx.settings.development'
|
||||
|
||||
# Use SQLite for unit tests instead of PostgreSQL. If the lines below are
|
||||
# commented out, Django will create the test_awx-dev database in PostgreSQL to
|
||||
# run unit tests.
|
||||
CACHES = {'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-{}'.format(str(uuid.uuid4()))}}
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), # noqa
|
||||
'TEST': {
|
||||
# Test database cannot be :memory: for inventory tests.
|
||||
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3') # noqa
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -50,6 +50,7 @@ class TestApiRootView:
|
||||
'activity_stream',
|
||||
'workflow_job_templates',
|
||||
'workflow_jobs',
|
||||
'analytics',
|
||||
]
|
||||
view = ApiVersionRootView()
|
||||
ret = view.get(mocker.MagicMock())
|
||||
|
||||
@@ -6,37 +6,35 @@ import time
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Inventory,
|
||||
Host,
|
||||
)
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
|
||||
from django.utils.timezone import now
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hosts(inventory):
|
||||
def ref_time():
|
||||
return now() - timedelta(seconds=5)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hosts(ref_time):
|
||||
inventory = Inventory(id=5)
|
||||
return [
|
||||
Host(name='host1', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host2', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host3', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name=u'Iñtërnâtiônàlizætiøn', ansible_facts={"a": 1, "b": 2}, inventory=inventory),
|
||||
Host(name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name='host3', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
Host(name=u'Iñtërnâtiônàlizætiøn', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=ref_time, inventory=inventory),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory():
|
||||
return Inventory(id=5)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job(mocker, hosts, inventory):
|
||||
j = Job(inventory=inventory, id=2)
|
||||
j._get_inventory_hosts = mocker.Mock(return_value=hosts)
|
||||
return j
|
||||
|
||||
|
||||
def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
def test_start_job_fact_cache(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
@@ -46,25 +44,43 @@ def test_start_job_fact_cache(hosts, job, inventory, tmpdir):
|
||||
assert os.path.getmtime(filepath) <= last_modified
|
||||
|
||||
|
||||
def test_fact_cache_with_invalid_path_traversal(job, inventory, tmpdir, mocker):
|
||||
job._get_inventory_hosts = mocker.Mock(
|
||||
return_value=[
|
||||
Host(
|
||||
name='../foo',
|
||||
ansible_facts={"a": 1, "b": 2},
|
||||
),
|
||||
]
|
||||
)
|
||||
def test_fact_cache_with_invalid_path_traversal(tmpdir):
|
||||
hosts = [
|
||||
Host(
|
||||
name='../foo',
|
||||
ansible_facts={"a": 1, "b": 2},
|
||||
),
|
||||
]
|
||||
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
# a file called "foo" should _not_ be written outside the facts dir
|
||||
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
# the hosts fixture was modified 5s ago, which is more than 2s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
assert last_modified is None
|
||||
|
||||
for host in hosts:
|
||||
assert not os.path.exists(os.path.join(fact_cache, host.name))
|
||||
|
||||
|
||||
def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
# the hosts fixture was modified 5s ago, which is less than 7s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=7)
|
||||
assert last_modified
|
||||
|
||||
for host in hosts:
|
||||
assert os.path.exists(os.path.join(fact_cache, host.name))
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@@ -80,18 +96,19 @@ def test_finish_job_fact_cache_with_existing_data(job, hosts, inventory, mocker,
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@@ -103,22 +120,23 @@ def test_finish_job_fact_cache_with_bad_data(job, hosts, inventory, mocker, tmpd
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
bulk_update.assert_not_called()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(job, hosts, inventory, mocker, tmpdir):
|
||||
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = job.start_job_fact_cache(fact_cache, timeout=0)
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
os.remove(os.path.join(fact_cache, hosts[1].name))
|
||||
job.finish_job_fact_cache(fact_cache, last_modified)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified is None
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == {}
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user