Compare commits

..

2 Commits

Author SHA1 Message Date
Gabe Muniz
3f83647600 Fix tests to fail when over drift over heartbeat time 2023-03-17 00:24:25 -04:00
Gabe Muniz
6461ecc762 Fix race with heartbeat and reaper logic 2023-03-16 19:16:51 -04:00
382 changed files with 10479 additions and 20570 deletions

View File

@@ -3,7 +3,7 @@ name: CI
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEV_DOCKER_OWNER: ${{ github.repository_owner }} DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
COMPOSE_TAG: ${{ github.base_ref || 'devel' }} COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
on: on:
pull_request: pull_request:

View File

@@ -6,10 +6,6 @@ on:
- opened - opened
- reopened - reopened
permissions:
contents: read # to fetch code
issues: write # to label issues
jobs: jobs:
triage: triage:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -7,10 +7,6 @@ on:
- reopened - reopened
- synchronize - synchronize
permissions:
contents: read # to determine modified files (actions/labeler)
pull-requests: write # to add labels to PRs (actions/labeler)
jobs: jobs:
triage: triage:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@@ -8,9 +8,6 @@ on:
release: release:
types: [published] types: [published]
permissions:
contents: read # to fetch code (actions/checkout)
jobs: jobs:
promote: promote:
if: endsWith(github.repository, '/awx') if: endsWith(github.repository, '/awx')

6
.gitignore vendored
View File

@@ -157,11 +157,7 @@ use_dev_supervisor.txt
*.unison.tmp *.unison.tmp
*.# *.#
/awx/ui/.ui-built /awx/ui/.ui-built
/Dockerfile
/_build/ /_build/
/_build_kube_dev/ /_build_kube_dev/
/Dockerfile
/Dockerfile.dev
/Dockerfile.kube-dev /Dockerfile.kube-dev
awx/ui_next/src
awx/ui_next/build

View File

@@ -6,7 +6,6 @@ recursive-include awx/templates *.html
recursive-include awx/api/templates *.md *.html *.yml recursive-include awx/api/templates *.md *.html *.yml
recursive-include awx/ui/build *.html recursive-include awx/ui/build *.html
recursive-include awx/ui/build * recursive-include awx/ui/build *
recursive-include awx/ui_next/build *
recursive-include awx/playbooks *.yml recursive-include awx/playbooks *.yml
recursive-include awx/lib/site-packages * recursive-include awx/lib/site-packages *
recursive-include awx/plugins *.ps1 recursive-include awx/plugins *.ps1

136
Makefile
View File

@@ -1,6 +1,4 @@
-include awx/ui_next/Makefile PYTHON ?= python3.9
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
DOCKER_COMPOSE ?= docker-compose DOCKER_COMPOSE ?= docker-compose
OFFICIAL ?= no OFFICIAL ?= no
NODE ?= node NODE ?= node
@@ -37,15 +35,10 @@ SPLUNK ?= false
PROMETHEUS ?= false PROMETHEUS ?= false
# If set to true docker-compose will also start a grafana instance # If set to true docker-compose will also start a grafana instance
GRAFANA ?= false GRAFANA ?= false
# If set to true docker-compose will also start a tacacs+ instance
TACACS ?= false
VENV_BASE ?= /var/lib/awx/venv VENV_BASE ?= /var/lib/awx/venv
DEV_DOCKER_OWNER ?= ansible DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
# Docker will only accept lowercase, so github names like Paul need to be paul
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
@@ -91,7 +84,7 @@ clean-schema:
clean-languages: clean-languages:
rm -f $(I18N_FLAG_FILE) rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete find ./awx/locale/ -type f -regex ".*\.mo$" -delete
## Remove temporary build files, compiled Python files. ## Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist clean: clean-ui clean-api clean-awxkit clean-dist
@@ -222,6 +215,12 @@ daphne:
fi; \ fi; \
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
wsbroadcast:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_wsbroadcast
## Run to start the background task dispatcher for development. ## Run to start the background task dispatcher for development.
dispatcher: dispatcher:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
@@ -229,6 +228,7 @@ dispatcher:
fi; \ fi; \
$(PYTHON) manage.py run_dispatcher $(PYTHON) manage.py run_dispatcher
## Run to start the zeromq callback receiver ## Run to start the zeromq callback receiver
receiver: receiver:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
@@ -245,34 +245,6 @@ jupyter:
fi; \ fi; \
$(MANAGEMENT_COMMAND) shell_plus --notebook $(MANAGEMENT_COMMAND) shell_plus --notebook
## Start the rsyslog configurer process in background in development environment.
run-rsyslog-configurer:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_rsyslog_configurer
## Start cache_clear process in background in development environment.
run-cache-clear:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_cache_clear
## Start the wsrelay process in background in development environment.
run-wsrelay:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_wsrelay
## Start the heartbeat process in background in development environment.
run-heartbeet:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_heartbeet
reports: reports:
mkdir -p $@ mkdir -p $@
@@ -299,13 +271,13 @@ swagger: reports
check: black check: black
api-lint: api-lint:
BLACK_ARGS="--check" $(MAKE) black BLACK_ARGS="--check" make black
flake8 awx flake8 awx
yamllint -s . yamllint -s .
## Run egg_info_dev to generate awx.egg-info for development.
awx-link: awx-link:
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev [ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
PYTEST_ARGS ?= -n auto PYTEST_ARGS ?= -n auto
@@ -324,7 +296,7 @@ github_ci_setup:
# CI_GITHUB_TOKEN is defined in .github files # CI_GITHUB_TOKEN is defined in .github files
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
$(MAKE) docker-compose-build make docker-compose-build
## Runs AWX_DOCKER_CMD inside a new docker container. ## Runs AWX_DOCKER_CMD inside a new docker container.
docker-runner: docker-runner:
@@ -374,7 +346,7 @@ test_collection_sanity:
rm -rf $(COLLECTION_INSTALL) rm -rf $(COLLECTION_INSTALL)
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
ansible --version ansible --version
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection COLLECTION_VERSION=1.0.0 make install_collection
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS) cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
test_collection_integration: install_collection test_collection_integration: install_collection
@@ -446,7 +418,7 @@ ui-devel: awx/ui/node_modules
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \ cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \ cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \ cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
fi fi
ui-devel-instrumented: awx/ui/node_modules ui-devel-instrumented: awx/ui/node_modules
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented $(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
@@ -473,12 +445,11 @@ ui-test-general:
$(NPM_BIN) run --prefix awx/ui pretest $(NPM_BIN) run --prefix awx/ui pretest
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand $(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
HEADLESS ?= no HEADLESS ?= no
ifeq ($(HEADLESS), yes) ifeq ($(HEADLESS), yes)
dist/$(SDIST_TAR_FILE): dist/$(SDIST_TAR_FILE):
else else
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
endif endif
$(PYTHON) -m build -s $(PYTHON) -m build -s
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
@@ -524,9 +495,9 @@ docker-compose-sources: .git/hooks/pre-commit
-e enable_ldap=$(LDAP) \ -e enable_ldap=$(LDAP) \
-e enable_splunk=$(SPLUNK) \ -e enable_splunk=$(SPLUNK) \
-e enable_prometheus=$(PROMETHEUS) \ -e enable_prometheus=$(PROMETHEUS) \
-e enable_grafana=$(GRAFANA) \ -e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
-e enable_tacacs=$(TACACS) \
$(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources docker-compose: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
@@ -559,25 +530,16 @@ docker-compose-container-group-clean:
fi fi
rm -rf tools/docker-compose-minikube/_sources/ rm -rf tools/docker-compose-minikube/_sources/
.PHONY: Dockerfile.dev ## Base development image build
## Generate Dockerfile.dev for awx_devel image docker-compose-build:
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
ansible-playbook tools/ansible/dockerfile.yml \ DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
-e dockerfile_name=Dockerfile.dev \ --build-arg BUILDKIT_INLINE_CACHE=1 \
-e build_dev=True \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
-e receptor_image=$(RECEPTOR_IMAGE)
## Build awx_devel image for docker compose development environment
docker-compose-build: Dockerfile.dev
DOCKER_BUILDKIT=1 docker build \
-f Dockerfile.dev \
-t $(DEVEL_IMAGE_NAME) \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
docker-clean: docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);) -$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);) -$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q) docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
@@ -592,7 +554,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-container-group: docker-compose-container-group:
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose MINIKUBE_CONTAINER_GROUP=true make docker-compose
clean-elk: clean-elk:
docker stop tools_kibana_1 docker stop tools_kibana_1
@@ -609,36 +571,12 @@ VERSION:
@echo "awx: $(VERSION)" @echo "awx: $(VERSION)"
PYTHON_VERSION: PYTHON_VERSION:
@echo "$(subst python,,$(PYTHON))" @echo "$(PYTHON)" | sed 's:python::'
.PHONY: version-for-buildyml
version-for-buildyml:
@echo $(firstword $(subst +, ,$(VERSION)))
# version-for-buildyml prints a special version string for build.yml,
# chopping off the sha after the '+' sign.
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
# This does the same thing in native make without
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
# Example:
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
.PHONY: Dockerfile .PHONY: Dockerfile
## Generate Dockerfile for awx image
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
ansible-playbook tools/ansible/dockerfile.yml \ ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
-e receptor_image=$(RECEPTOR_IMAGE) \
-e headless=$(HEADLESS)
## Build awx image for deployment on Kubernetes environment.
awx-kube-build: Dockerfile
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
.PHONY: Dockerfile.kube-dev
## Generate Docker.kube-dev for awx_kube_devel image
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
ansible-playbook tools/ansible/dockerfile.yml \ ansible-playbook tools/ansible/dockerfile.yml \
-e dockerfile_name=Dockerfile.kube-dev \ -e dockerfile_name=Dockerfile.kube-dev \
@@ -653,6 +591,13 @@ awx-kube-dev-build: Dockerfile.kube-dev
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build awx image for deployment on Kubernetes environment.
awx-kube-build: Dockerfile
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
# Translation TASKS # Translation TASKS
# -------------------------------------- # --------------------------------------
@@ -672,7 +617,6 @@ messages:
fi; \ fi; \
$(PYTHON) manage.py makemessages -l en_us --keep-pot $(PYTHON) manage.py makemessages -l en_us --keep-pot
.PHONY: print-%
print-%: print-%:
@echo $($*) @echo $($*)
@@ -684,12 +628,12 @@ HELP_FILTER=.PHONY
## Display help targets ## Display help targets
help: help:
@printf "Available targets:\n" @printf "Available targets:\n"
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))" @make -s help/generate | grep -vE "\w($(HELP_FILTER))"
## Display help for all targets ## Display help for all targets
help/all: help/all:
@printf "Available targets:\n" @printf "Available targets:\n"
@$(MAKE) -s help/generate @make -s help/generate
## Generate help output from MAKEFILE_LIST ## Generate help output from MAKEFILE_LIST
help/generate: help/generate:
@@ -710,7 +654,3 @@ help/generate:
} \ } \
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u { lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
@printf "\n" @printf "\n"
## Display help for ui-next targets
help/ui-next:
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"

View File

@@ -5,11 +5,13 @@
import inspect import inspect
import logging import logging
import time import time
import uuid
# Django # Django
from django.conf import settings from django.conf import settings
from django.contrib.auth import views as auth_views from django.contrib.auth import views as auth_views
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldDoesNotExist
from django.db import connection, transaction from django.db import connection, transaction
from django.db.models.fields.related import OneToOneRel from django.db.models.fields.related import OneToOneRel
@@ -33,7 +35,7 @@ from rest_framework.negotiation import DefaultContentNegotiation
# AWX # AWX
from awx.api.filters import FieldLookupBackend from awx.api.filters import FieldLookupBackend
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
from awx.main.access import optimize_queryset from awx.main.access import access_registry
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
from awx.main.utils.db import get_all_field_names from awx.main.utils.db import get_all_field_names
from awx.main.utils.licensing import server_product_name from awx.main.utils.licensing import server_product_name
@@ -362,7 +364,12 @@ class GenericAPIView(generics.GenericAPIView, APIView):
return self.queryset._clone() return self.queryset._clone()
elif self.model is not None: elif self.model is not None:
qs = self.model._default_manager qs = self.model._default_manager
qs = optimize_queryset(qs) if self.model in access_registry:
access_class = access_registry[self.model]
if access_class.select_related:
qs = qs.select_related(*access_class.select_related)
if access_class.prefetch_related:
qs = qs.prefetch_related(*access_class.prefetch_related)
return qs return qs
else: else:
return super(GenericAPIView, self).get_queryset() return super(GenericAPIView, self).get_queryset()
@@ -505,9 +512,6 @@ class SubListAPIView(ParentMixin, ListAPIView):
# And optionally (user must have given access permission on parent object # And optionally (user must have given access permission on parent object
# to view sublist): # to view sublist):
# parent_access = 'read' # parent_access = 'read'
# filter_read_permission sets whether or not to override the default intersection behavior
# implemented here
filter_read_permission = True
def get_description_context(self): def get_description_context(self):
d = super(SubListAPIView, self).get_description_context() d = super(SubListAPIView, self).get_description_context()
@@ -522,16 +526,12 @@ class SubListAPIView(ParentMixin, ListAPIView):
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
self.check_parent_access(parent) self.check_parent_access(parent)
if not self.filter_read_permission: qs = self.request.user.get_queryset(self.model).distinct()
return optimize_queryset(self.get_sublist_queryset(parent)) sublist_qs = self.get_sublist_queryset(parent)
qs = self.request.user.get_queryset(self.model) return qs & sublist_qs
if hasattr(self, 'parent_key'):
# This is vastly preferable for ReverseForeignKey relationships
return qs.filter(**{self.parent_key: parent})
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
def get_sublist_queryset(self, parent): def get_sublist_queryset(self, parent):
return getattrd(parent, self.relationship) return getattrd(parent, self.relationship).distinct()
class DestroyAPIView(generics.DestroyAPIView): class DestroyAPIView(generics.DestroyAPIView):
@@ -580,6 +580,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
d.update({'parent_key': getattr(self, 'parent_key', None)}) d.update({'parent_key': getattr(self, 'parent_key', None)})
return d return d
def get_queryset(self):
if hasattr(self, 'parent_key'):
# Prefer this filtering because ForeignKey allows us more assumptions
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(**{self.parent_key: parent})
return super(SubListCreateAPIView, self).get_queryset()
def create(self, request, *args, **kwargs): def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the # If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to # DB yet. We want to see if we can create it. The URL may choose to
@@ -958,11 +967,16 @@ class CopyAPIView(GenericAPIView):
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all(): if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
new_obj.admin_role.members.add(request.user) new_obj.admin_role.members.add(request.user)
if sub_objs: if sub_objs:
# store the copied object dict into cache, because it's
# often too large for postgres' notification bus
# (which has a default maximum message size of 8k)
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
cache.set(key, sub_objs, timeout=3600)
permission_check_func = None permission_check_func = None
if hasattr(type(self), 'deep_copy_permission_check_func'): if hasattr(type(self), 'deep_copy_permission_check_func'):
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func') permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
trigger_delayed_deep_copy( trigger_delayed_deep_copy(
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
) )
serializer = self._get_copy_return_serializer(new_obj) serializer = self._get_copy_return_serializer(new_obj)
headers = {'Location': new_obj.get_absolute_url(request=request)} headers = {'Location': new_obj.get_absolute_url(request=request)}

View File

@@ -25,7 +25,6 @@ __all__ = [
'UserPermission', 'UserPermission',
'IsSystemAdminOrAuditor', 'IsSystemAdminOrAuditor',
'WorkflowApprovalPermission', 'WorkflowApprovalPermission',
'AnalyticsPermission',
] ]
@@ -251,16 +250,3 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
class WebhookKeyPermission(permissions.BasePermission): class WebhookKeyPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj): def has_object_permission(self, request, view, obj):
return request.user.can_access(view.model, 'admin', obj, request.data) return request.user.can_access(view.model, 'admin', obj, request.data)
class AnalyticsPermission(permissions.BasePermission):
"""
Allows GET/POST/OPTIONS to system admins and system auditors.
"""
def has_permission(self, request, view):
if not (request.user and request.user.is_authenticated):
return False
if request.method in ["GET", "POST", "OPTIONS"]:
return request.user.is_superuser or request.user.is_system_auditor
return request.user.is_superuser

View File

@@ -56,8 +56,6 @@ from awx.main.models import (
ExecutionEnvironment, ExecutionEnvironment,
Group, Group,
Host, Host,
HostMetric,
HostMetricSummaryMonthly,
Instance, Instance,
InstanceGroup, InstanceGroup,
InstanceLink, InstanceLink,
@@ -158,7 +156,6 @@ SUMMARIZABLE_FK_FIELDS = {
'kind', 'kind',
), ),
'host': DEFAULT_SUMMARY_FIELDS, 'host': DEFAULT_SUMMARY_FIELDS,
'constructed_host': DEFAULT_SUMMARY_FIELDS,
'group': DEFAULT_SUMMARY_FIELDS, 'group': DEFAULT_SUMMARY_FIELDS,
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',), 'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',), 'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
@@ -192,11 +189,6 @@ SUMMARIZABLE_FK_FIELDS = {
} }
# These fields can be edited on a constructed inventory's generated source (possibly by using the constructed
# inventory's special API endpoint, but also by using the inventory sources endpoint).
CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS = ('source_vars', 'update_cache_timeout', 'limit', 'verbosity')
def reverse_gfk(content_object, request): def reverse_gfk(content_object, request):
""" """
Computes a reverse for a GenericForeignKey field. Computes a reverse for a GenericForeignKey field.
@@ -954,7 +946,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
class UserSerializer(BaseSerializer): class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.')) password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True) ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service')) external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False) is_system_auditor = serializers.BooleanField(default=False)
@@ -981,12 +973,7 @@ class UserSerializer(BaseSerializer):
def to_representation(self, obj): def to_representation(self, obj):
ret = super(UserSerializer, self).to_representation(obj) ret = super(UserSerializer, self).to_representation(obj)
if self.get_external_account(obj): ret.pop('password', None)
# If this is an external account it shouldn't have a password field
ret.pop('password', None)
else:
# If its an internal account lets assume there is a password and return $encrypted$ to the user
ret['password'] = '$encrypted$'
if obj and type(self) is UserSerializer: if obj and type(self) is UserSerializer:
ret['auth'] = obj.social_auth.values('provider', 'uid') ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret return ret
@@ -1000,31 +987,13 @@ class UserSerializer(BaseSerializer):
django_validate_password(value) django_validate_password(value)
if not self.instance and value in (None, ''): if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.')) raise serializers.ValidationError(_('Password required for new User.'))
# Check if a password is too long
password_max_length = User._meta.get_field('password').max_length
if len(value) > password_max_length:
raise serializers.ValidationError(_('Password max length is {}'.format(password_max_length)))
if getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH', 0) and len(value) < getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'):
raise serializers.ValidationError(_('Password must be at least {} characters long.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'))))
if getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS', 0) and sum(c.isdigit() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'):
raise serializers.ValidationError(_('Password must contain at least {} digits.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'))))
if getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER', 0) and sum(c.isupper() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER'):
raise serializers.ValidationError(
_('Password must contain at least {} uppercase characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER')))
)
if getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL', 0) and sum(not c.isalnum() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL'):
raise serializers.ValidationError(
_('Password must contain at least {} special characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL')))
)
return value return value
def _update_password(self, obj, new_password): def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for # For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set. # users managed by LDAP who already have an unusable password set.
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option # Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj): if new_password and not self.get_external_account(obj):
obj.set_password(new_password) obj.set_password(new_password)
obj.save(update_fields=['password']) obj.save(update_fields=['password'])
@@ -1701,8 +1670,13 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
res.update( res.update(
dict( dict(
hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}), hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups=self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups=self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}), variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}), script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree=self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources=self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources=self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}), activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}), job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}), ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
@@ -1713,18 +1687,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}), labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
) )
) )
if obj.kind in ('', 'constructed'):
# links not relevant for the "old" smart inventory
res['groups'] = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk})
res['root_groups'] = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk})
res['update_inventory_sources'] = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk})
res['inventory_sources'] = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk})
res['tree'] = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk})
if obj.organization: if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk}) res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
if obj.kind == 'constructed':
res['input_inventories'] = self.reverse('api:inventory_input_inventories', kwargs={'pk': obj.pk})
res['constructed_url'] = self.reverse('api:constructed_inventory_detail', kwargs={'pk': obj.pk})
return res return res
def to_representation(self, obj): def to_representation(self, obj):
@@ -1766,91 +1730,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
return super(InventorySerializer, self).validate(attrs) return super(InventorySerializer, self).validate(attrs)
class ConstructedFieldMixin(serializers.Field):
def get_attribute(self, instance):
if not hasattr(instance, '_constructed_inv_src'):
instance._constructed_inv_src = instance.inventory_sources.first()
inv_src = instance._constructed_inv_src
return super().get_attribute(inv_src) # yoink
class ConstructedCharField(ConstructedFieldMixin, serializers.CharField):
pass
class ConstructedIntegerField(ConstructedFieldMixin, serializers.IntegerField):
pass
class ConstructedInventorySerializer(InventorySerializer):
source_vars = ConstructedCharField(
required=False,
default=None,
allow_blank=True,
help_text=_('The source_vars for the related auto-created inventory source, special to constructed inventory.'),
)
update_cache_timeout = ConstructedIntegerField(
required=False,
allow_null=True,
min_value=0,
default=None,
help_text=_('The cache timeout for the related auto-created inventory source, special to constructed inventory'),
)
limit = ConstructedCharField(
required=False,
default=None,
allow_blank=True,
help_text=_('The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.'),
)
verbosity = ConstructedIntegerField(
required=False,
allow_null=True,
min_value=0,
max_value=2,
default=None,
help_text=_('The verbosity level for the related auto-created inventory source, special to constructed inventory'),
)
class Meta:
model = Inventory
fields = ('*', '-host_filter') + CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
read_only_fields = ('*', 'kind')
def pop_inv_src_data(self, data):
inv_src_data = {}
for field in CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS:
if field in data:
# values always need to be removed, as they are not valid for Inventory model
value = data.pop(field)
# null is not valid for any of those fields, taken as not-provided
if value is not None:
inv_src_data[field] = value
return inv_src_data
def apply_inv_src_data(self, inventory, inv_src_data):
if inv_src_data:
update_fields = []
inv_src = inventory.inventory_sources.first()
for field, value in inv_src_data.items():
setattr(inv_src, field, value)
update_fields.append(field)
if update_fields:
inv_src.save(update_fields=update_fields)
def create(self, validated_data):
validated_data['kind'] = 'constructed'
inv_src_data = self.pop_inv_src_data(validated_data)
inventory = super().create(validated_data)
self.apply_inv_src_data(inventory, inv_src_data)
return inventory
def update(self, obj, validated_data):
inv_src_data = self.pop_inv_src_data(validated_data)
obj = super().update(obj, validated_data)
self.apply_inv_src_data(obj, inv_src_data)
return obj
class InventoryScriptSerializer(InventorySerializer): class InventoryScriptSerializer(InventorySerializer):
class Meta: class Meta:
fields = () fields = ()
@@ -1904,9 +1783,6 @@ class HostSerializer(BaseSerializerWithVariables):
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}), ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
) )
) )
if obj.inventory.kind == 'constructed':
res['original_host'] = self.reverse('api:host_detail', kwargs={'pk': obj.instance_id})
res['ansible_facts'] = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id})
if obj.inventory: if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk}) res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job: if obj.last_job:
@@ -1928,10 +1804,6 @@ class HostSerializer(BaseSerializerWithVariables):
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]] group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count() group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list}) d.setdefault('groups', {'count': group_cnt, 'results': group_list})
if obj.inventory.kind == 'constructed':
summaries_qs = obj.constructed_host_summaries
else:
summaries_qs = obj.job_host_summaries
d.setdefault( d.setdefault(
'recent_jobs', 'recent_jobs',
[ [
@@ -1942,7 +1814,7 @@ class HostSerializer(BaseSerializerWithVariables):
'status': j.job.status, 'status': j.job.status,
'finished': j.job.finished, 'finished': j.job.finished,
} }
for j in summaries_qs.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5] for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
], ],
) )
return d return d
@@ -1967,8 +1839,8 @@ class HostSerializer(BaseSerializerWithVariables):
return value return value
def validate_inventory(self, value): def validate_inventory(self, value):
if value.kind in ('constructed', 'smart'): if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart or Constructed Inventories")}) raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value return value
def validate_variables(self, value): def validate_variables(self, value):
@@ -2066,8 +1938,8 @@ class GroupSerializer(BaseSerializerWithVariables):
return value return value
def validate_inventory(self, value): def validate_inventory(self, value):
if value.kind in ('constructed', 'smart'): if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart or Constructed Inventories")}) raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value return value
def to_representation(self, obj): def to_representation(self, obj):
@@ -2190,7 +2062,7 @@ class BulkHostCreateSerializer(serializers.Serializer):
host_data = [] host_data = []
for r in result: for r in result:
item = {k: getattr(r, k) for k in return_keys} item = {k: getattr(r, k) for k in return_keys}
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')): if not settings.IS_TESTING_MODE:
# sqlite acts different with bulk_create -- it doesn't return the id of the objects # sqlite acts different with bulk_create -- it doesn't return the id of the objects
# to get it, you have to do an additional query, which is not useful for our tests # to get it, you have to do an additional query, which is not useful for our tests
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id}) item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
@@ -2266,7 +2138,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
'custom_virtualenv', 'custom_virtualenv',
'timeout', 'timeout',
'verbosity', 'verbosity',
'limit',
) )
read_only_fields = ('*', 'custom_virtualenv') read_only_fields = ('*', 'custom_virtualenv')
@@ -2373,8 +2244,8 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
return value return value
def validate_inventory(self, value): def validate_inventory(self, value):
if value and value.kind in ('constructed', 'smart'): if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart or Constructed Inventories")}) raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value return value
# TODO: remove when old 'credential' fields are removed # TODO: remove when old 'credential' fields are removed
@@ -2418,16 +2289,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
def get_field_from_model_or_attrs(fd): def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None) return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if self.instance and self.instance.source == 'constructed': if get_field_from_model_or_attrs('source') == 'scm':
allowed_fields = CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
for field in attrs:
if attrs[field] != getattr(self.instance, field) and field not in allowed_fields:
raise serializers.ValidationError({"error": _("Cannot change field '{}' on a constructed inventory source.").format(field)})
elif get_field_from_model_or_attrs('source') == 'scm':
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None: if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")}) raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
elif get_field_from_model_or_attrs('source') == 'constructed':
raise serializers.ValidationError({"error": _('constructed not a valid source for inventory')})
else: else:
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch'])) redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
if redundant_scm_fields: if redundant_scm_fields:
@@ -4169,7 +4033,6 @@ class JobHostSummarySerializer(BaseSerializer):
'-description', '-description',
'job', 'job',
'host', 'host',
'constructed_host',
'host_name', 'host_name',
'changed', 'changed',
'dark', 'dark',
@@ -5523,32 +5386,6 @@ class InstanceHealthCheckSerializer(BaseSerializer):
fields = read_only_fields fields = read_only_fields
class HostMetricSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = HostMetric
fields = (
"id",
"hostname",
"url",
"first_automation",
"last_automation",
"last_deleted",
"automated_counter",
"deleted_counter",
"deleted",
"used_in_inventories",
)
class HostMetricSummaryMonthlySerializer(BaseSerializer):
class Meta:
model = HostMetricSummaryMonthly
read_only_fields = ("id", "date", "license_consumed", "license_capacity", "hosts_added", "hosts_deleted", "indirectly_managed_hosts")
fields = read_only_fields
class InstanceGroupSerializer(BaseSerializer): class InstanceGroupSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete'] show_capabilities = ['edit', 'delete']
capacity = serializers.SerializerMethodField() capacity = serializers.SerializerMethodField()

View File

@@ -1,18 +0,0 @@
{% ifmeth GET %}
# Retrieve {{ model_verbose_name|title|anora }}:
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
record containing the following fields:
{% include "api/_result_fields_common.md" %}
{% endifmeth %}
{% ifmeth DELETE %}
# Delete {{ model_verbose_name|title|anora }}:
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
A soft deletion will mark the `deleted` field as true and exclude the host
metric from license calculations.
This may be undone later if the same hostname is automated again afterwards.
{% endifmeth %}

View File

@@ -2,7 +2,6 @@ receptor_user: awx
receptor_group: awx receptor_group: awx
receptor_verify: true receptor_verify: true
receptor_tls: true receptor_tls: true
receptor_mintls13: false
receptor_work_commands: receptor_work_commands:
ansible-runner: ansible-runner:
command: ansible-runner command: ansible-runner

View File

@@ -1,31 +0,0 @@
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
import awx.api.views.analytics as analytics
urls = [
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
]
__all__ = ['urls']

View File

@@ -1,10 +0,0 @@
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import HostMetricList, HostMetricDetail
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
__all__ = ['urls']

View File

@@ -6,10 +6,7 @@ from django.urls import re_path
from awx.api.views.inventory import ( from awx.api.views.inventory import (
InventoryList, InventoryList,
InventoryDetail, InventoryDetail,
ConstructedInventoryDetail,
ConstructedInventoryList,
InventoryActivityStreamList, InventoryActivityStreamList,
InventoryInputInventoriesList,
InventoryJobTemplateList, InventoryJobTemplateList,
InventoryAccessList, InventoryAccessList,
InventoryObjectRolesList, InventoryObjectRolesList,
@@ -40,7 +37,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'), re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'), re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'), re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'), re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'), re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'), re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
@@ -52,10 +48,4 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'), re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
] ]
# Constructed inventory special views __all__ = ['urls']
constructed_inventory_urls = [
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
]
__all__ = ['urls', 'constructed_inventory_urls']

View File

@@ -30,7 +30,6 @@ from awx.api.views import (
OAuth2TokenList, OAuth2TokenList,
ApplicationOAuth2TokenList, ApplicationOAuth2TokenList,
OAuth2ApplicationDetail, OAuth2ApplicationDetail,
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
) )
from awx.api.views.bulk import ( from awx.api.views.bulk import (
@@ -42,17 +41,15 @@ from awx.api.views.bulk import (
from awx.api.views.mesh_visualizer import MeshVisualizer from awx.api.views.mesh_visualizer import MeshVisualizer
from awx.api.views.metrics import MetricsView from awx.api.views.metrics import MetricsView
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
from .organization import urls as organization_urls from .organization import urls as organization_urls
from .user import urls as user_urls from .user import urls as user_urls
from .project import urls as project_urls from .project import urls as project_urls
from .project_update import urls as project_update_urls from .project_update import urls as project_update_urls
from .inventory import urls as inventory_urls, constructed_inventory_urls from .inventory import urls as inventory_urls
from .execution_environments import urls as execution_environment_urls from .execution_environments import urls as execution_environment_urls
from .team import urls as team_urls from .team import urls as team_urls
from .host import urls as host_urls from .host import urls as host_urls
from .host_metric import urls as host_metric_urls
from .group import urls as group_urls from .group import urls as group_urls
from .inventory_source import urls as inventory_source_urls from .inventory_source import urls as inventory_source_urls
from .inventory_update import urls as inventory_update_urls from .inventory_update import urls as inventory_update_urls
@@ -83,7 +80,7 @@ from .oauth2 import urls as oauth2_urls
from .oauth2_root import urls as oauth2_root_urls from .oauth2_root import urls as oauth2_root_urls
from .workflow_approval_template import urls as workflow_approval_template_urls from .workflow_approval_template import urls as workflow_approval_template_urls
from .workflow_approval import urls as workflow_approval_urls from .workflow_approval import urls as workflow_approval_urls
from .analytics import urls as analytics_urls
v2_urls = [ v2_urls = [
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'), re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
@@ -120,11 +117,7 @@ v2_urls = [
re_path(r'^project_updates/', include(project_update_urls)), re_path(r'^project_updates/', include(project_update_urls)),
re_path(r'^teams/', include(team_urls)), re_path(r'^teams/', include(team_urls)),
re_path(r'^inventories/', include(inventory_urls)), re_path(r'^inventories/', include(inventory_urls)),
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
re_path(r'^hosts/', include(host_urls)), re_path(r'^hosts/', include(host_urls)),
re_path(r'^host_metrics/', include(host_metric_urls)),
# It will be enabled in future version of the AWX
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
re_path(r'^groups/', include(group_urls)), re_path(r'^groups/', include(group_urls)),
re_path(r'^inventory_sources/', include(inventory_source_urls)), re_path(r'^inventory_sources/', include(inventory_source_urls)),
re_path(r'^inventory_updates/', include(inventory_update_urls)), re_path(r'^inventory_updates/', include(inventory_update_urls)),
@@ -148,7 +141,6 @@ v2_urls = [
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'), re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'), re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
re_path(r'^activity_stream/', include(activity_stream_urls)), re_path(r'^activity_stream/', include(activity_stream_urls)),
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)), re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
re_path(r'^workflow_approvals/', include(workflow_approval_urls)), re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'), re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),

View File

@@ -17,6 +17,7 @@ from collections import OrderedDict
from urllib3.exceptions import ConnectTimeoutError from urllib3.exceptions import ConnectTimeoutError
# Django # Django
from django.conf import settings from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
@@ -29,7 +30,7 @@ from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseRedirect from django.http import HttpResponse
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
@@ -62,7 +63,7 @@ from wsgiref.util import FileWrapper
# AWX # AWX
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
from awx.main.access import get_user_queryset from awx.main.access import get_user_queryset, HostAccess
from awx.api.generics import ( from awx.api.generics import (
APIView, APIView,
BaseUsersList, BaseUsersList,
@@ -794,7 +795,13 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
parent_model = models.ExecutionEnvironment parent_model = models.ExecutionEnvironment
relationship = 'activitystream_set' relationship = 'activitystream_set'
search_fields = ('changes',) search_fields = ('changes',)
filter_read_permission = False
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(execution_environment=parent)
class ProjectList(ListCreateAPIView): class ProjectList(ListCreateAPIView):
@@ -1541,41 +1548,6 @@ class HostRelatedSearchMixin(object):
return ret return ret
class HostMetricList(ListAPIView):
name = _("Host Metrics List")
model = models.HostMetric
serializer_class = serializers.HostMetricSerializer
permission_classes = (IsSystemAdminOrAuditor,)
search_fields = ('hostname', 'deleted')
def get_queryset(self):
return self.model.objects.all()
class HostMetricDetail(RetrieveDestroyAPIView):
name = _("Host Metric Detail")
model = models.HostMetric
serializer_class = serializers.HostMetricSerializer
permission_classes = (IsSystemAdminOrAuditor,)
def delete(self, request, *args, **kwargs):
self.get_object().soft_delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# It will be enabled in future version of the AWX
# class HostMetricSummaryMonthlyList(ListAPIView):
# name = _("Host Metrics Summary Monthly")
# model = models.HostMetricSummaryMonthly
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
# permission_classes = (IsSystemAdminOrAuditor,)
# search_fields = ('date',)
#
# def get_queryset(self):
# return self.model.objects.all()
class HostList(HostRelatedSearchMixin, ListCreateAPIView): class HostList(HostRelatedSearchMixin, ListCreateAPIView):
always_allow_superuser = False always_allow_superuser = False
model = models.Host model = models.Host
@@ -1604,8 +1576,6 @@ class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
def delete(self, request, *args, **kwargs): def delete(self, request, *args, **kwargs):
if self.get_object().inventory.pending_deletion: if self.get_object().inventory.pending_deletion:
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
if self.get_object().inventory.kind == 'constructed':
return Response({"error": _("Delete constructed inventory hosts from input inventory.")}, status=status.HTTP_400_BAD_REQUEST)
return super(HostDetail, self).delete(request, *args, **kwargs) return super(HostDetail, self).delete(request, *args, **kwargs)
@@ -1613,14 +1583,6 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
model = models.Host model = models.Host
serializer_class = serializers.AnsibleFactsSerializer serializer_class = serializers.AnsibleFactsSerializer
def get(self, request, *args, **kwargs):
obj = self.get_object()
if obj.inventory.kind == 'constructed':
# If this is a constructed inventory host, it is not the source of truth about facts
# redirect to the original input inventory host instead
return HttpResponseRedirect(reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id}, request=self.request))
return super().get(request, *args, **kwargs)
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView): class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
model = models.Host model = models.Host
@@ -1628,7 +1590,13 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
parent_model = models.Inventory parent_model = models.Inventory
relationship = 'hosts' relationship = 'hosts'
parent_key = 'inventory' parent_key = 'inventory'
filter_read_permission = False
def get_queryset(self):
inventory = self.get_parent_object()
qs = getattrd(inventory, self.relationship).all()
# Apply queryset optimizations
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
return qs
class HostGroupsList(SubListCreateAttachDetachAPIView): class HostGroupsList(SubListCreateAttachDetachAPIView):
@@ -2569,7 +2537,16 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
serializer_class = serializers.CredentialSerializer serializer_class = serializers.CredentialSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
relationship = 'credentials' relationship = 'credentials'
filter_read_permission = False
def get_queryset(self):
# Return the full list of credentials
parent = self.get_parent_object()
self.check_parent_access(parent)
sublist_qs = getattrd(parent, self.relationship)
sublist_qs = sublist_qs.prefetch_related(
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
)
return sublist_qs
def is_valid_relation(self, parent, sub, created=False): def is_valid_relation(self, parent, sub, created=False):
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]: if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
@@ -2671,10 +2648,7 @@ class JobTemplateCallback(GenericAPIView):
# Permission class should have already validated host_config_key. # Permission class should have already validated host_config_key.
job_template = self.get_object() job_template = self.get_object()
# Attempt to find matching hosts based on remote address. # Attempt to find matching hosts based on remote address.
if job_template.inventory: matching_hosts = self.find_matching_hosts()
matching_hosts = self.find_matching_hosts()
else:
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
# If the host is not found, update the inventory before trying to # If the host is not found, update the inventory before trying to
# match again. # match again.
inventory_sources_already_updated = [] inventory_sources_already_updated = []
@@ -2759,7 +2733,6 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
serializer_class = serializers.InstanceGroupSerializer serializer_class = serializers.InstanceGroupSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
relationship = 'instance_groups' relationship = 'instance_groups'
filter_read_permission = False
class JobTemplateAccessList(ResourceAccessList): class JobTemplateAccessList(ResourceAccessList):
@@ -2850,7 +2823,16 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
relationship = '' relationship = ''
enforce_parent_relationship = 'workflow_job_template' enforce_parent_relationship = 'workflow_job_template'
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
filter_read_permission = False
'''
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
'relationship'
'''
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
def is_valid_relation(self, parent, sub, created=False): def is_valid_relation(self, parent, sub, created=False):
if created: if created:
@@ -2925,7 +2907,14 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
parent_model = models.WorkflowJobNode parent_model = models.WorkflowJobNode
relationship = '' relationship = ''
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
filter_read_permission = False
#
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
#
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).all()
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList): class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
@@ -3104,8 +3093,11 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
relationship = 'workflow_job_template_nodes' relationship = 'workflow_job_template_nodes'
parent_key = 'workflow_job_template' parent_key = 'workflow_job_template'
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
ordering = ('id',) # assure ordering by id for consistency
filter_read_permission = False def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).order_by('id')
class WorkflowJobTemplateJobsList(SubListAPIView): class WorkflowJobTemplateJobsList(SubListAPIView):
@@ -3197,8 +3189,11 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
relationship = 'workflow_job_nodes' relationship = 'workflow_job_nodes'
parent_key = 'workflow_job' parent_key = 'workflow_job'
search_fields = ('unified_job_template__name', 'unified_job_template__description') search_fields = ('unified_job_template__name', 'unified_job_template__description')
ordering = ('id',) # assure ordering by id for consistency
filter_read_permission = False def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).order_by('id')
class WorkflowJobCancel(GenericCancelView): class WorkflowJobCancel(GenericCancelView):
@@ -3512,7 +3507,11 @@ class BaseJobHostSummariesList(SubListAPIView):
relationship = 'job_host_summaries' relationship = 'job_host_summaries'
name = _('Job Host Summaries List') name = _('Job Host Summaries List')
search_fields = ('host_name',) search_fields = ('host_name',)
filter_read_permission = False
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
class HostJobHostSummariesList(BaseJobHostSummariesList): class HostJobHostSummariesList(BaseJobHostSummariesList):

View File

@@ -1,296 +0,0 @@
import requests
import logging
import urllib.parse as urlparse
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils import translation
from awx.api.generics import APIView, Response
from awx.api.permissions import AnalyticsPermission
from awx.api.versioning import reverse
from awx.main.utils import get_awx_version
from rest_framework import status
from collections import OrderedDict
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
AWX_ANALYTICS_API_PREFIX = 'analytics'
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
ERROR_MISSING_URL = "missing-url"
ERROR_MISSING_USER = "missing-user"
ERROR_MISSING_PASSWORD = "missing-password"
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
ERROR_NOT_FOUND = "not-found"
ERROR_UNAUTHORIZED = "unauthorized"
ERROR_UNKNOWN = "unknown"
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
logger = logging.getLogger('awx.api.views.analytics')
class MissingSettings(Exception):
"""Settings are not correct Exception"""
pass
class GetNotAllowedMixin(object):
def get(self, request, format=None):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
class AnalyticsRootView(APIView):
permission_classes = (AnalyticsPermission,)
name = _('Automation Analytics')
swagger_topic = 'Automation Analytics'
def get(self, request, format=None):
data = OrderedDict()
data['authorized'] = reverse('api:analytics_authorized')
data['reports'] = reverse('api:analytics_reports_list')
data['report_options'] = reverse('api:analytics_report_options_list')
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
data['event_explorer'] = reverse('api:analytics_event_explorer')
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
data['host_explorer'] = reverse('api:analytics_host_explorer')
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
data['job_explorer'] = reverse('api:analytics_job_explorer')
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
return Response(data)
class AnalyticsGenericView(APIView):
"""
Example:
headers = {
'Content-Type': 'application/json',
}
params = {
'limit': '20',
'offset': '0',
'sort_by': 'name:asc',
}
json_data = {
'limit': '20',
'offset': '0',
'sort_options': 'name',
'sort_order': 'asc',
'tags': [],
'slug': [],
'name': [],
'description': '',
}
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
headers=headers, json=json_data)
return Response(response.json(), status=response.status_code)
"""
permission_classes = (AnalyticsPermission,)
@staticmethod
def _request_headers(request):
headers = {}
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
if request.headers.get(header, None):
headers[header] = request.headers.get(header)
headers['X-Rh-Analytics-Source'] = 'controller'
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
headers['Accept-Language'] = translation.get_language()
return headers
@staticmethod
def _get_analytics_path(request_path):
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
path_specific = parts[-1]
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
def _get_analytics_url(self, request_path):
analytics_path = self._get_analytics_path(request_path)
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
if not url:
raise MissingSettings(ERROR_MISSING_URL)
url_parts = urlparse.urlsplit(url)
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
return analytics_url
@staticmethod
def _get_setting(setting_name, default, error_message):
setting = getattr(settings, setting_name, default)
if not setting:
raise MissingSettings(error_message)
return setting
@staticmethod
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
if message:
text["error"]["message"] = message
return Response(text, status=status_code)
def _error_response_404(self, response):
try:
json_response = response.json()
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
message = json_response.get('error', None)
if message:
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
# Standard 404 problem => HTTP 404
message = json_response.get('detail', None) or response.text
except requests.exceptions.JSONDecodeError:
# Unexpected text => still HTTP 404
message = response.text
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
@staticmethod
def _update_response_links(json_response):
if not json_response.get('links', None):
return
for key, value in json_response['links'].items():
if value:
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
def _forward_response(self, response):
try:
content_type = response.headers.get('content-type', '')
if content_type.find('application/json') != -1:
json_response = response.json()
self._update_response_links(json_response)
return Response(json_response, status=response.status_code)
except Exception as e:
logger.error(f"Analytics API: Response error: {e}")
return Response(response.content, status=response.status_code)
def _send_to_analytics(self, request, method):
try:
headers = self._request_headers(request)
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
url = self._get_analytics_url(request.path)
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
if method not in ["GET", "POST", "OPTIONS"]:
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
response = requests.request(
method,
url,
auth=(rh_user, rh_password),
verify=settings.INSIGHTS_CERT_PATH,
params=request.query_params,
headers=headers,
json=request.data,
timeout=(31, 31),
)
#
# Missing or wrong user/pass
#
if response.status_code == status.HTTP_401_UNAUTHORIZED:
text = (response.text or '').rstrip("\n")
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
#
# Not found, No entitlement or No data in Analytics
#
elif response.status_code == status.HTTP_404_NOT_FOUND:
return self._error_response_404(response)
#
# Success or not a 401/404 errors are just forwarded
#
else:
return self._forward_response(response)
except MissingSettings as e:
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
return self._error_response(e.args[0], remote=False)
except requests.exceptions.RequestException as e:
logger.error(f"Analytics API: Request error: {e}")
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"Analytics API: Error: {e}")
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
class AnalyticsGenericListView(AnalyticsGenericView):
def get(self, request, format=None):
return self._send_to_analytics(request, method="GET")
def post(self, request, format=None):
return self._send_to_analytics(request, method="POST")
def options(self, request, format=None):
return self._send_to_analytics(request, method="OPTIONS")
class AnalyticsGenericDetailView(AnalyticsGenericView):
def get(self, request, slug, format=None):
return self._send_to_analytics(request, method="GET")
def post(self, request, slug, format=None):
return self._send_to_analytics(request, method="POST")
def options(self, request, slug, format=None):
return self._send_to_analytics(request, method="OPTIONS")
class AnalyticsAuthorizedView(AnalyticsGenericListView):
name = _("Authorized")
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Reports")
swagger_topic = "Automation Analytics"
class AnalyticsReportDetail(AnalyticsGenericDetailView):
name = _("Report")
class AnalyticsReportOptionsList(AnalyticsGenericListView):
name = _("Report Options")
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Adoption Rate")
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Event Explorer")
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Host Explorer")
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Job Explorer")
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Probe Templates")
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("Probe Template For Hosts")
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
name = _("ROI Templates")

View File

@@ -14,7 +14,6 @@ from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import PermissionDenied from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response from rest_framework.response import Response
from rest_framework import status from rest_framework import status
from rest_framework import serializers
# AWX # AWX
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
@@ -32,7 +31,6 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.serializers import ( from awx.api.serializers import (
InventorySerializer, InventorySerializer,
ConstructedInventorySerializer,
ActivityStreamSerializer, ActivityStreamSerializer,
RoleSerializer, RoleSerializer,
InstanceGroupSerializer, InstanceGroupSerializer,
@@ -81,9 +79,7 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
# Do not allow changes to an Inventory kind. # Do not allow changes to an Inventory kind.
if kind is not None and obj.kind != kind: if kind is not None and obj.kind != kind:
return Response( return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
)
return super(InventoryDetail, self).update(request, *args, **kwargs) return super(InventoryDetail, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs): def destroy(self, request, *args, **kwargs):
@@ -98,29 +94,6 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST) return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
class ConstructedInventoryDetail(InventoryDetail):
serializer_class = ConstructedInventorySerializer
class ConstructedInventoryList(InventoryList):
serializer_class = ConstructedInventorySerializer
def get_queryset(self):
r = super().get_queryset()
return r.filter(kind='constructed')
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Inventory
relationship = 'input_inventories'
def is_valid_relation(self, parent, sub, created=False):
if sub.kind == 'constructed':
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
class InventoryActivityStreamList(SubListAPIView): class InventoryActivityStreamList(SubListAPIView):
model = ActivityStream model = ActivityStream
serializer_class = ActivityStreamSerializer serializer_class = ActivityStreamSerializer

View File

@@ -61,6 +61,12 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization model = Organization
serializer_class = OrganizationSerializer serializer_class = OrganizationSerializer
def get_queryset(self):
qs = Organization.accessible_objects(self.request.user, 'read_role')
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
qs = qs.prefetch_related('created_by', 'modified_by')
return qs
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Organization model = Organization
@@ -201,7 +207,6 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
serializer_class = InstanceGroupSerializer serializer_class = InstanceGroupSerializer
parent_model = Organization parent_model = Organization
relationship = 'instance_groups' relationship = 'instance_groups'
filter_read_permission = False
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView): class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
@@ -209,7 +214,6 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
serializer_class = CredentialSerializer serializer_class = CredentialSerializer
parent_model = Organization parent_model = Organization
relationship = 'galaxy_credentials' relationship = 'galaxy_credentials'
filter_read_permission = False
def is_valid_relation(self, parent, sub, created=False): def is_valid_relation(self, parent, sub, created=False):
if sub.kind != 'galaxy_api_token': if sub.kind != 'galaxy_api_token':

View File

@@ -98,14 +98,10 @@ class ApiVersionRootView(APIView):
data['tokens'] = reverse('api:o_auth2_token_list', request=request) data['tokens'] = reverse('api:o_auth2_token_list', request=request)
data['metrics'] = reverse('api:metrics_view', request=request) data['metrics'] = reverse('api:metrics_view', request=request)
data['inventory'] = reverse('api:inventory_list', request=request) data['inventory'] = reverse('api:inventory_list', request=request)
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
data['inventory_sources'] = reverse('api:inventory_source_list', request=request) data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
data['inventory_updates'] = reverse('api:inventory_update_list', request=request) data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
data['groups'] = reverse('api:group_list', request=request) data['groups'] = reverse('api:group_list', request=request)
data['hosts'] = reverse('api:host_list', request=request) data['hosts'] = reverse('api:host_list', request=request)
data['host_metrics'] = reverse('api:host_metric_list', request=request)
# It will be enabled in future version of the AWX
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
data['job_templates'] = reverse('api:job_template_list', request=request) data['job_templates'] = reverse('api:job_template_list', request=request)
data['jobs'] = reverse('api:job_list', request=request) data['jobs'] = reverse('api:job_list', request=request)
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request) data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
@@ -126,7 +122,6 @@ class ApiVersionRootView(APIView):
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request) data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request) data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
data['bulk'] = reverse('api:bulk', request=request) data['bulk'] = reverse('api:bulk', request=request)
data['analytics'] = reverse('api:analytics_root_view', request=request)
return Response(data) return Response(data)
@@ -277,9 +272,6 @@ class ApiV2ConfigView(APIView):
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off' pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
# Guarding against settings.UI_NEXT being set to a non-boolean value
ui_next_state = settings.UI_NEXT if settings.UI_NEXT in (True, False) else False
data = dict( data = dict(
time_zone=settings.TIME_ZONE, time_zone=settings.TIME_ZONE,
license_info=license_data, license_info=license_data,
@@ -288,7 +280,6 @@ class ApiV2ConfigView(APIView):
analytics_status=pendo_state, analytics_status=pendo_state,
analytics_collectors=all_collectors(), analytics_collectors=all_collectors(),
become_methods=PRIVILEGE_ESCALATION_METHODS, become_methods=PRIVILEGE_ESCALATION_METHODS,
ui_next=ui_next_state,
) )
# If LDAP is enabled, user_ldap_fields will return a list of field # If LDAP is enabled, user_ldap_fields will return a list of field

View File

@@ -5,13 +5,11 @@ import threading
import time import time
import os import os
from concurrent.futures import ThreadPoolExecutor
# Django # Django
from django.conf import LazySettings from django.conf import LazySettings
from django.conf import settings, UserSettingsHolder from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation from django.core.exceptions import ImproperlyConfigured
from django.db import transaction, connection from django.db import transaction, connection
from django.db.utils import Error as DBError, ProgrammingError from django.db.utils import Error as DBError, ProgrammingError
from django.utils.functional import cached_property from django.utils.functional import cached_property
@@ -159,7 +157,7 @@ class EncryptedCacheProxy(object):
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty) obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
if obj_id is empty: if obj_id is empty:
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key)) logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None) obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
elif obj_id == SETTING_CACHE_NONE: elif obj_id == SETTING_CACHE_NONE:
obj_id = None obj_id = None
return method(TransientSetting(pk=obj_id, value=value), 'value') return method(TransientSetting(pk=obj_id, value=value), 'value')
@@ -168,6 +166,11 @@ class EncryptedCacheProxy(object):
# a no-op; it just returns the provided value # a no-op; it just returns the provided value
return value return value
def _get_setting_from_db(self, key):
field = self.registry.get_setting_field(key)
if not field.read_only:
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
def __getattr__(self, name): def __getattr__(self, name):
return getattr(self.cache, name) return getattr(self.cache, name)
@@ -183,22 +186,6 @@ def get_settings_to_cache(registry):
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)]) return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
# Will first attempt to get the setting from the database in synchronous mode.
# If call from async context, it will attempt to get the setting from the database in a thread.
def _get_setting_from_db(registry, key):
def get_settings_from_db_sync(registry, key):
field = registry.get_setting_field(key)
if not field.read_only or key == 'INSTALL_UUID':
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
try:
return get_settings_from_db_sync(registry, key)
except SynchronousOnlyOperation:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(get_settings_from_db_sync, registry, key)
return future.result()
def get_cache_value(value): def get_cache_value(value):
"""Returns the proper special cache setting for a value """Returns the proper special cache setting for a value
based on instance type. based on instance type.
@@ -358,7 +345,7 @@ class SettingsWrapper(UserSettingsHolder):
setting_id = None setting_id = None
# this value is read-only, however we *do* want to fetch its value from the database # this value is read-only, however we *do* want to fetch its value from the database
if not field.read_only or name == 'INSTALL_UUID': if not field.read_only or name == 'INSTALL_UUID':
setting = _get_setting_from_db(self.registry, name) setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
if setting: if setting:
if getattr(field, 'encrypted', False): if getattr(field, 'encrypted', False):
value = decrypt_field(setting, 'value') value = decrypt_field(setting, 'value')

View File

@@ -94,7 +94,9 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
@pytest.mark.django_db @pytest.mark.django_db
def test_setting_singleton_update(api_request, dummy_setting): def test_setting_singleton_update(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'): with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.handle_setting_changes'
):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3}) api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
assert response.data['FOO_BAR'] == 3 assert response.data['FOO_BAR'] == 3
@@ -110,7 +112,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
# sure that the _Forbidden validator doesn't get used for the # sure that the _Forbidden validator doesn't get used for the
# fields. See also https://github.com/ansible/awx/issues/4099. # fields. See also https://github.com/ansible/awx/issues/4099.
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch( with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache' 'awx.conf.views.handle_setting_changes'
): ):
api_request( api_request(
'patch', 'patch',
@@ -124,7 +126,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
@pytest.mark.django_db @pytest.mark.django_db
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting): def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch( with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache' 'awx.conf.views.handle_setting_changes'
): ):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5}) api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
@@ -134,7 +136,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
@pytest.mark.django_db @pytest.mark.django_db
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting): def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch( with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache' 'awx.conf.views.handle_setting_changes'
): ):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'}) api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$') assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
@@ -153,14 +155,16 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate( with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
'foobar', func_raising_exception 'foobar', func_raising_exception
), mock.patch('awx.conf.views.clear_setting_cache'): ), mock.patch('awx.conf.views.handle_setting_changes'):
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23}) response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
assert response.status_code == 400 assert response.status_code == 400
@pytest.mark.django_db @pytest.mark.django_db
def test_setting_singleton_delete(api_request, dummy_setting): def test_setting_singleton_delete(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'): with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.handle_setting_changes'
):
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
assert not response.data['FOO_BAR'] assert not response.data['FOO_BAR']
@@ -169,7 +173,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
@pytest.mark.django_db @pytest.mark.django_db
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting): def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch( with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache' 'awx.conf.views.handle_setting_changes'
): ):
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})) response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))

View File

@@ -26,11 +26,10 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
from awx.api.permissions import IsSystemAdminOrAuditor from awx.api.permissions import IsSystemAdminOrAuditor
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.utils import camelcase_to_underscore from awx.main.utils import camelcase_to_underscore
from awx.main.tasks.system import clear_setting_cache from awx.main.tasks.system import handle_setting_changes
from awx.conf.models import Setting from awx.conf.models import Setting
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
from awx.conf import settings_registry from awx.conf import settings_registry
from awx.main.utils.external_logging import reconfigure_rsyslog
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name')) SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
@@ -119,10 +118,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
setting.save(update_fields=['value']) setting.save(update_fields=['value'])
settings_change_list.append(key) settings_change_list.append(key)
if settings_change_list: if settings_change_list:
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list)) connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
# call notify to rsyslog. no data is need so payload is empty
reconfigure_rsyslog.delay()
def destroy(self, request, *args, **kwargs): def destroy(self, request, *args, **kwargs):
instance = self.get_object() instance = self.get_object()
@@ -137,10 +133,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
setting.delete() setting.delete()
settings_change_list.append(setting.key) settings_change_list.append(setting.key)
if settings_change_list: if settings_change_list:
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list)) connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
# call notify to rsyslog. no data is need so payload is empty
reconfigure_rsyslog.delay()
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname # When TOWER_URL_BASE is deleted from the API, reset it to the hostname
# used to make the request as a default. # used to make the request as a default.

View File

@@ -2952,19 +2952,3 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
for cls in BaseAccess.__subclasses__(): for cls in BaseAccess.__subclasses__():
access_registry[cls.model] = cls access_registry[cls.model] = cls
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
def optimize_queryset(queryset):
"""
A utility method in case you already have a queryset and just want to
apply the standard optimizations for that model.
In other words, use if you do not want to start from filtered_queryset for some reason.
"""
if not queryset.model or queryset.model not in access_registry:
return queryset
access_class = access_registry[queryset.model]
if access_class.select_related:
queryset = queryset.select_related(*access_class.select_related)
if access_class.prefetch_related:
queryset = queryset.prefetch_related(*access_class.prefetch_related)
return queryset

View File

@@ -4,11 +4,11 @@ import logging
# AWX # AWX
from awx.main.analytics.subsystem_metrics import Metrics from awx.main.analytics.subsystem_metrics import Metrics
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
logger = logging.getLogger('awx.main.scheduler') logger = logging.getLogger('awx.main.scheduler')
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def send_subsystem_metrics(): def send_subsystem_metrics():
Metrics().send_metrics() Metrics().send_metrics()

View File

@@ -65,7 +65,7 @@ class FixedSlidingWindow:
return sum(self.buckets.values()) or 0 return sum(self.buckets.values()) or 0
class RelayWebsocketStatsManager: class BroadcastWebsocketStatsManager:
def __init__(self, event_loop, local_hostname): def __init__(self, event_loop, local_hostname):
self._local_hostname = local_hostname self._local_hostname = local_hostname
@@ -74,7 +74,7 @@ class RelayWebsocketStatsManager:
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
def new_remote_host_stats(self, remote_hostname): def new_remote_host_stats(self, remote_hostname):
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname) self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
return self._stats[remote_hostname] return self._stats[remote_hostname]
def delete_remote_host_stats(self, remote_hostname): def delete_remote_host_stats(self, remote_hostname):
@@ -107,7 +107,7 @@ class RelayWebsocketStatsManager:
return parser.text_string_to_metric_families(stats_str.decode('UTF-8')) return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
class RelayWebsocketStats: class BroadcastWebsocketStats:
def __init__(self, local_hostname, remote_hostname): def __init__(self, local_hostname, remote_hostname):
self._local_hostname = local_hostname self._local_hostname = local_hostname
self._remote_hostname = remote_hostname self._remote_hostname = remote_hostname

View File

@@ -6,7 +6,7 @@ import platform
import distro import distro
from django.db import connection from django.db import connection
from django.db.models import Count, Min from django.db.models import Count
from django.conf import settings from django.conf import settings
from django.contrib.sessions.models import Session from django.contrib.sessions.models import Session
from django.utils.timezone import now, timedelta from django.utils.timezone import now, timedelta
@@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
""" """
def trivial_slicing(key, since, until, last_gather, **kwargs): def trivial_slicing(key, since, until, last_gather):
if since is not None: if since is not None:
return [(since, until)] return [(since, until)]
@@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather, **kwargs):
return [(last_entry, until)] return [(last_entry, until)]
def four_hour_slicing(key, since, until, last_gather, **kwargs): def four_hour_slicing(key, since, until, last_gather):
if since is not None: if since is not None:
last_entry = since last_entry = since
else: else:
@@ -69,54 +69,6 @@ def four_hour_slicing(key, since, until, last_gather, **kwargs):
start = end start = end
def host_metric_slicing(key, since, until, last_gather, **kwargs):
"""
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
"""
from awx.main.models.inventory import HostMetric
if since is not None:
return [(since, until)]
from awx.conf.models import Setting
# Check if full sync should be done
full_sync_enabled = kwargs.get('full_sync_enabled', False)
last_entry = None
if not full_sync_enabled:
#
# If not, try incremental sync first
#
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
last_entry = last_entries.get(key)
if not last_entry:
#
# If not done before, switch to full sync
#
full_sync_enabled = True
if full_sync_enabled:
#
# Find the lowest date for full sync
#
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
if not last_entry:
# empty table
return []
start, end = last_entry, None
while start < until:
end = min(start + timedelta(days=30), until)
yield (start, end)
start = end
def _identify_lower(key, since, until, last_gather): def _identify_lower(key, since, until, last_gather):
from awx.conf.models import Setting from awx.conf.models import Setting
@@ -131,7 +83,7 @@ def _identify_lower(key, since, until, last_gather):
return lower, last_entries return lower, last_entries
@register('config', '1.6', description=_('General platform configuration.')) @register('config', '1.4', description=_('General platform configuration.'))
def config(since, **kwargs): def config(since, **kwargs):
license_info = get_license() license_info = get_license()
install_type = 'traditional' install_type = 'traditional'
@@ -155,13 +107,10 @@ def config(since, **kwargs):
'subscription_name': license_info.get('subscription_name'), 'subscription_name': license_info.get('subscription_name'),
'sku': license_info.get('sku'), 'sku': license_info.get('sku'),
'support_level': license_info.get('support_level'), 'support_level': license_info.get('support_level'),
'usage': license_info.get('usage'),
'product_name': license_info.get('product_name'), 'product_name': license_info.get('product_name'),
'valid_key': license_info.get('valid_key'), 'valid_key': license_info.get('valid_key'),
'satellite': license_info.get('satellite'), 'satellite': license_info.get('satellite'),
'pool_id': license_info.get('pool_id'), 'pool_id': license_info.get('pool_id'),
'subscription_id': license_info.get('subscription_id'),
'account_number': license_info.get('account_number'),
'current_instances': license_info.get('current_instances'), 'current_instances': license_info.get('current_instances'),
'automated_instances': license_info.get('automated_instances'), 'automated_instances': license_info.get('automated_instances'),
'automated_since': license_info.get('automated_since'), 'automated_since': license_info.get('automated_since'),
@@ -170,7 +119,6 @@ def config(since, **kwargs):
'compliant': license_info.get('compliant'), 'compliant': license_info.get('compliant'),
'date_warning': license_info.get('date_warning'), 'date_warning': license_info.get('date_warning'),
'date_expired': license_info.get('date_expired'), 'date_expired': license_info.get('date_expired'),
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
'free_instances': license_info.get('free_instances', 0), 'free_instances': license_info.get('free_instances', 0),
'total_licensed_instances': license_info.get('instance_count', 0), 'total_licensed_instances': license_info.get('instance_count', 0),
'license_expiry': license_info.get('time_remaining', 0), 'license_expiry': license_info.get('time_remaining', 0),
@@ -588,25 +536,3 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id ) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER''' ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path) return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
@register(
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
)
def host_metric_table(since, full_path, until, **kwargs):
host_metric_query = '''COPY (SELECT main_hostmetric.id,
main_hostmetric.hostname,
main_hostmetric.first_automation,
main_hostmetric.last_automation,
main_hostmetric.last_deleted,
main_hostmetric.deleted,
main_hostmetric.automated_counter,
main_hostmetric.deleted_counter,
main_hostmetric.used_in_inventories
FROM main_hostmetric
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
)
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)

View File

@@ -52,7 +52,7 @@ def all_collectors():
} }
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None): def register(key, version, description=None, format='json', expensive=None):
""" """
A decorator used to register a function as a metric collector. A decorator used to register a function as a metric collector.
@@ -71,7 +71,6 @@ def register(key, version, description=None, format='json', expensive=None, full
f.__awx_analytics_description__ = description f.__awx_analytics_description__ = description
f.__awx_analytics_type__ = format f.__awx_analytics_type__ = format
f.__awx_expensive__ = expensive f.__awx_expensive__ = expensive
f.__awx_full_sync_interval__ = full_sync_interval
return f return f
return decorate return decorate
@@ -260,19 +259,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
# These slicer functions may return a generator. The `since` parameter is # These slicer functions may return a generator. The `since` parameter is
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to # allowed to be None, and will fall back to LAST_ENTRIES[key] or to
# LAST_GATHER (truncated appropriately to match the 4-week limit). # LAST_GATHER (truncated appropriately to match the 4-week limit).
#
# Or it can force full table sync if interval is given
kwargs = dict()
full_sync_enabled = False
if func.__awx_full_sync_interval__:
last_full_sync = last_entries.get(f"{key}_full")
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
kwargs['full_sync_enabled'] = full_sync_enabled
if func.__awx_expensive__: if func.__awx_expensive__:
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs) slices = func.__awx_expensive__(key, since, until, last_gather)
else: else:
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs) slices = collectors.trivial_slicing(key, since, until, last_gather)
for start, end in slices: for start, end in slices:
files = func(start, full_path=gather_dir, until=end) files = func(start, full_path=gather_dir, until=end)
@@ -311,12 +301,6 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
succeeded = False succeeded = False
logger.exception("Could not generate metric {}".format(filename)) logger.exception("Could not generate metric {}".format(filename))
# update full sync timestamp if successfully shipped
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
with disable_activity_stream():
last_entries[f"{key}_full"] = now()
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
if collection_type != 'dry-run': if collection_type != 'dry-run':
if succeeded: if succeeded:
for fpath in tarfiles: for fpath in tarfiles:
@@ -375,7 +359,9 @@ def ship(path):
s.headers = get_awx_http_client_headers() s.headers = get_awx_http_client_headers()
s.headers.pop('Content-Type') s.headers.pop('Content-Type')
with set_environ(**settings.AWX_TASK_ENV): with set_environ(**settings.AWX_TASK_ENV):
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)) response = s.post(
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
)
# Accept 2XX status_codes # Accept 2XX status_codes
if response.status_code >= 300: if response.status_code >= 300:
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text)) logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))

View File

@@ -9,7 +9,7 @@ from django.apps import apps
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing from awx.main.utils import is_testing
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics') logger = logging.getLogger('awx.main.analytics')
@@ -264,6 +264,13 @@ class Metrics:
data[field] = self.METRICS[field].decode(self.conn) data[field] = self.METRICS[field].decode(self.conn)
return data return data
def store_metrics(self, data_json):
# called when receiving metrics from other instances
data = json.loads(data_json)
if self.instance_name != data['instance']:
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
def should_pipe_execute(self): def should_pipe_execute(self):
if self.metrics_have_changed is False: if self.metrics_have_changed is False:
return False return False
@@ -298,15 +305,13 @@ class Metrics:
try: try:
current_time = time.time() current_time = time.time()
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval: if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
serialized_metrics = self.serialize_local_metrics()
payload = { payload = {
'instance': self.instance_name, 'instance': self.instance_name,
'metrics': serialized_metrics, 'metrics': self.serialize_local_metrics(),
} }
# store the serialized data locally as well, so that load_other_metrics will read it # store a local copy as well
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics) self.store_metrics(json.dumps(payload))
emit_channel_notification("metrics", payload) emit_channel_notification("metrics", payload)
self.previous_send_metrics.set(current_time) self.previous_send_metrics.set(current_time)
self.previous_send_metrics.store_value(self.conn) self.previous_send_metrics.store_value(self.conn)
finally: finally:

View File

@@ -10,7 +10,7 @@ from rest_framework import serializers
# AWX # AWX
from awx.conf import fields, register, register_validate from awx.conf import fields, register, register_validate
from awx.main.models import ExecutionEnvironment from awx.main.models import ExecutionEnvironment
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
logger = logging.getLogger('awx.main.conf') logger = logging.getLogger('awx.main.conf')
@@ -795,42 +795,6 @@ register(
category_slug='bulk', category_slug='bulk',
) )
register(
'UI_NEXT',
field_class=fields.BooleanField,
default=False,
label=_('Enable Preview of New User Interface'),
help_text=_('Enable preview of new user interface.'),
category=_('System'),
category_slug='system',
)
register(
'SUBSCRIPTION_USAGE_MODEL',
field_class=fields.ChoiceField,
choices=[
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
(
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
),
],
default='',
allow_blank=True,
label=_('Defines subscription usage model and shows Host Metrics'),
category=_('System'),
category_slug='system',
)
register(
'CLEANUP_HOST_METRICS_LAST_TS',
field_class=fields.DateTimeField,
label=_('Last cleanup date for HostMetrics'),
allow_null=True,
category=_('System'),
category_slug='system',
)
def logging_validate(serializer, attrs): def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'): if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -38,8 +38,6 @@ STANDARD_INVENTORY_UPDATE_ENV = {
'ANSIBLE_INVENTORY_EXPORT': 'True', 'ANSIBLE_INVENTORY_EXPORT': 'True',
# Redirecting output to stderr allows JSON parsing to still work with -vvv # Redirecting output to stderr allows JSON parsing to still work with -vvv
'ANSIBLE_VERBOSE_TO_STDERR': 'True', 'ANSIBLE_VERBOSE_TO_STDERR': 'True',
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
} }
CAN_CANCEL = ('new', 'pending', 'waiting', 'running') CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
ACTIVE_STATES = CAN_CANCEL ACTIVE_STATES = CAN_CANCEL
@@ -65,7 +63,7 @@ ENV_BLOCKLIST = frozenset(
'INVENTORY_HOSTVARS', 'INVENTORY_HOSTVARS',
'AWX_HOST', 'AWX_HOST',
'PROJECT_REVISION', 'PROJECT_REVISION',
'SUPERVISOR_CONFIG_PATH', 'SUPERVISOR_WEB_CONFIG_PATH',
) )
) )
@@ -108,9 +106,3 @@ JOB_VARIABLE_PREFIXES = [
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = ( ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m' '\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
) )
# Values for setting SUBSCRIPTION_USAGE_MODEL
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')

View File

@@ -3,7 +3,6 @@ import logging
import time import time
import hmac import hmac
import asyncio import asyncio
import redis
from django.core.serializers.json import DjangoJSONEncoder from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings from django.conf import settings
@@ -81,7 +80,7 @@ class WebsocketSecretAuthHelper:
WebsocketSecretAuthHelper.verify_secret(secret) WebsocketSecretAuthHelper.verify_secret(secret)
class RelayConsumer(AsyncJsonWebsocketConsumer): class BroadcastConsumer(AsyncJsonWebsocketConsumer):
async def connect(self): async def connect(self):
try: try:
WebsocketSecretAuthHelper.is_authorized(self.scope) WebsocketSecretAuthHelper.is_authorized(self.scope)
@@ -101,21 +100,6 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
async def internal_message(self, event): async def internal_message(self, event):
await self.send(event['text']) await self.send(event['text'])
async def receive_json(self, data):
(group, message) = unwrap_broadcast_msg(data)
if group == "metrics":
message = json.loads(message['text'])
conn = redis.Redis.from_url(settings.BROKER_URL)
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
else:
await self.channel_layer.group_send(group, message)
async def consumer_subscribe(self, event):
await self.send_json(event)
async def consumer_unsubscribe(self, event):
await self.send_json(event)
class EventConsumer(AsyncJsonWebsocketConsumer): class EventConsumer(AsyncJsonWebsocketConsumer):
async def connect(self): async def connect(self):
@@ -144,11 +128,6 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
self.channel_name, self.channel_name,
) )
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
)
@database_sync_to_async @database_sync_to_async
def user_can_see_object_id(self, user_access, oid): def user_can_see_object_id(self, user_access, oid):
# At this point user is a channels.auth.UserLazyObject object # At this point user is a channels.auth.UserLazyObject object
@@ -197,20 +176,9 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
self.channel_name, self.channel_name,
) )
if len(old_groups):
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
)
new_groups_exclusive = new_groups - current_groups new_groups_exclusive = new_groups - current_groups
for group_name in new_groups_exclusive: for group_name in new_groups_exclusive:
await self.channel_layer.group_add(group_name, self.channel_name) await self.channel_layer.group_add(group_name, self.channel_name)
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
)
self.scope['session']['groups'] = new_groups self.scope['session']['groups'] = new_groups
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)}) await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
@@ -232,11 +200,9 @@ def _dump_payload(payload):
return None return None
def unwrap_broadcast_msg(payload: dict):
return (payload['group'], payload['message'])
def emit_channel_notification(group, payload): def emit_channel_notification(group, payload):
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
payload_dumped = _dump_payload(payload) payload_dumped = _dump_payload(payload)
if payload_dumped is None: if payload_dumped is None:
return return
@@ -246,6 +212,16 @@ def emit_channel_notification(group, payload):
run_sync( run_sync(
channel_layer.group_send( channel_layer.group_send(
group, group,
{"type": "internal.message", "text": payload_dumped, "needs_relay": True}, {"type": "internal.message", "text": payload_dumped},
)
)
run_sync(
channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{
"type": "internal.message",
"text": wrap_broadcast_msg(group, payload_dumped),
},
) )
) )

View File

@@ -54,12 +54,6 @@ aim_inputs = {
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'), 'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
}, },
{'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']}, {'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
{
'id': 'object_property',
'label': _('Object Property'),
'type': 'string',
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
},
{ {
'id': 'reason', 'id': 'reason',
'label': _('Reason'), 'label': _('Reason'),
@@ -80,7 +74,6 @@ def aim_backend(**kwargs):
app_id = kwargs['app_id'] app_id = kwargs['app_id']
object_query = kwargs['object_query'] object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format'] object_query_format = kwargs['object_query_format']
object_property = kwargs.get('object_property', '')
reason = kwargs.get('reason', None) reason = kwargs.get('reason', None)
if webservice_id == '': if webservice_id == '':
webservice_id = 'AIMWebService' webservice_id = 'AIMWebService'
@@ -105,18 +98,7 @@ def aim_backend(**kwargs):
allow_redirects=False, allow_redirects=False,
) )
raise_for_status(res) raise_for_status(res)
# CCP returns the property name capitalized, username is camel case return res.json()['Content']
# so we need to handle that case
if object_property == '':
object_property = 'Content'
elif object_property.lower() == 'username':
object_property = 'UserName'
elif object_property not in res:
raise KeyError('Property {} not found in object'.format(object_property))
else:
object_property = object_property.capitalize()
return res.json()[object_property]
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend) aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)

View File

@@ -35,14 +35,8 @@ dsv_inputs = {
'type': 'string', 'type': 'string',
'help_text': _('The secret path e.g. /test/secret1'), 'help_text': _('The secret path e.g. /test/secret1'),
}, },
{
'id': 'secret_field',
'label': _('Secret Field'),
'help_text': _('The field to extract from the secret'),
'type': 'string',
},
], ],
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'], 'required': ['tenant', 'client_id', 'client_secret', 'path'],
} }
if settings.DEBUG: if settings.DEBUG:
@@ -58,5 +52,5 @@ if settings.DEBUG:
dsv_plugin = CredentialPlugin( dsv_plugin = CredentialPlugin(
'Thycotic DevOps Secrets Vault', 'Thycotic DevOps Secrets Vault',
dsv_inputs, dsv_inputs,
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
) )

View File

@@ -1,7 +1,7 @@
from .plugin import CredentialPlugin from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret from thycotic.secrets.server import PasswordGrantAuthorizer, SecretServer, ServerSecret
tss_inputs = { tss_inputs = {
'fields': [ 'fields': [
@@ -17,12 +17,6 @@ tss_inputs = {
'help_text': _('The (Application) user username'), 'help_text': _('The (Application) user username'),
'type': 'string', 'type': 'string',
}, },
{
'id': 'domain',
'label': _('Domain'),
'help_text': _('The (Application) user domain'),
'type': 'string',
},
{ {
'id': 'password', 'id': 'password',
'label': _('Password'), 'label': _('Password'),
@@ -50,10 +44,7 @@ tss_inputs = {
def tss_backend(**kwargs): def tss_backend(**kwargs):
if 'domain' in kwargs: authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
else:
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
secret_server = SecretServer(kwargs['server_url'], authorizer) secret_server = SecretServer(kwargs['server_url'], authorizer)
secret_dict = secret_server.get_secret(kwargs['secret_id']) secret_dict = secret_server.get_secret(kwargs['secret_id'])
secret = ServerSecret(**secret_dict) secret = ServerSecret(**secret_dict)

View File

@@ -63,7 +63,7 @@ class RecordedQueryLog(object):
if not os.path.isdir(self.dest): if not os.path.isdir(self.dest):
os.makedirs(self.dest) os.makedirs(self.dest)
progname = ' '.join(sys.argv) progname = ' '.join(sys.argv)
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'): for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
if match in progname: if match in progname:
progname = match progname = match
break break

View File

@@ -1,14 +1,12 @@
import os
import psycopg2 import psycopg2
import select import select
from contextlib import contextmanager from contextlib import contextmanager
from awx.settings.application_name import get_application_name
from django.conf import settings from django.conf import settings
from django.db import connection as pg_connection from django.db import connection as pg_connection
NOT_READY = ([], [], []) NOT_READY = ([], [], [])
@@ -16,29 +14,6 @@ def get_local_queuename():
return settings.CLUSTER_HOST_ID return settings.CLUSTER_HOST_ID
def get_task_queuename():
if os.getenv('AWX_COMPONENT') != 'web':
return settings.CLUSTER_HOST_ID
from awx.main.models.ha import Instance
random_task_instance = (
Instance.objects.filter(
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
node_state=Instance.States.READY,
enabled=True,
)
.only('hostname')
.order_by('?')
.first()
)
if random_task_instance is None:
raise ValueError('No task instances are READY and Enabled.')
return random_task_instance.hostname
class PubSub(object): class PubSub(object):
def __init__(self, conn): def __init__(self, conn):
self.conn = conn self.conn = conn
@@ -85,11 +60,10 @@ def pg_bus_conn(new_connection=False):
''' '''
if new_connection: if new_connection:
conf = settings.DATABASES['default'].copy() conf = settings.DATABASES['default']
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy() conn = psycopg2.connect(
# Modify the application name to distinguish from other connections the process might use dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener') )
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
# Django connection.cursor().connection doesn't have autocommit=True on by default # Django connection.cursor().connection doesn't have autocommit=True on by default
conn.set_session(autocommit=True) conn.set_session(autocommit=True)
else: else:

View File

@@ -6,7 +6,7 @@ from django.conf import settings
from django.db import connection from django.db import connection
import redis import redis
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
from . import pg_bus_conn from . import pg_bus_conn
@@ -21,7 +21,7 @@ class Control(object):
if service not in self.services: if service not in self.services:
raise RuntimeError('{} must be in {}'.format(service, self.services)) raise RuntimeError('{} must be in {}'.format(service, self.services))
self.service = service self.service = service
self.queuename = host or get_task_queuename() self.queuename = host or get_local_queuename()
def status(self, *args, **kwargs): def status(self, *args, **kwargs):
r = redis.Redis.from_url(settings.BROKER_URL) r = redis.Redis.from_url(settings.BROKER_URL)

View File

@@ -10,7 +10,6 @@ from django_guid import set_guid
from django_guid.utils import generate_guid from django_guid.utils import generate_guid
from awx.main.dispatch.worker import TaskWorker from awx.main.dispatch.worker import TaskWorker
from awx.main.utils.db import set_connection_name
logger = logging.getLogger('awx.main.dispatch.periodic') logger = logging.getLogger('awx.main.dispatch.periodic')
@@ -22,9 +21,6 @@ class Scheduler(Scheduler):
def run(): def run():
ppid = os.getppid() ppid = os.getppid()
logger.warning('periodic beat started') logger.warning('periodic beat started')
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
while True: while True:
if os.getppid() != ppid: if os.getppid() != ppid:
# if the parent PID changes, this process has been orphaned # if the parent PID changes, this process has been orphaned

View File

@@ -79,11 +79,9 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
else: else:
hostname = instance.hostname hostname = instance.hostname
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
base_Q = Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) jobs = UnifiedJob.objects.filter(
if ref_time: Q(status='running', modified__lte=ref_time) & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
jobs = UnifiedJob.objects.filter(base_Q & Q(started__lte=ref_time)) )
else:
jobs = UnifiedJob.objects.filter(base_Q)
if excluded_uuids: if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids) jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
for j in jobs: for j in jobs:

View File

@@ -18,7 +18,6 @@ from django.conf import settings
from awx.main.dispatch.pool import WorkerPool from awx.main.dispatch.pool import WorkerPool
from awx.main.dispatch import pg_bus_conn from awx.main.dispatch import pg_bus_conn
from awx.main.utils.common import log_excess_runtime from awx.main.utils.common import log_excess_runtime
from awx.main.utils.db import set_connection_name
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -220,7 +219,6 @@ class BaseWorker(object):
def work_loop(self, queue, finished, idx, *args): def work_loop(self, queue, finished, idx, *args):
ppid = os.getppid() ppid = os.getppid()
signal_handler = WorkerSignalHandler() signal_handler = WorkerSignalHandler()
set_connection_name('worker') # set application_name to distinguish from other dispatcher processes
while not signal_handler.kill_now: while not signal_handler.kill_now:
# if the parent PID changes, this process has been orphaned # if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too # via e.g., segfault or sigkill, we should exit too

View File

@@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
`awx.main.dispatch.publish`. `awx.main.dispatch.publish`.
""" """
@staticmethod @classmethod
def resolve_callable(task): def resolve_callable(cls, task):
""" """
Transform a dotted notation task into an imported, callable function, e.g., Transform a dotted notation task into an imported, callable function, e.g.,
@@ -46,8 +46,7 @@ class TaskWorker(BaseWorker):
return _call return _call
@staticmethod def run_callable(self, body):
def run_callable(body):
""" """
Given some AMQP message, import the correct Python code and run it. Given some AMQP message, import the correct Python code and run it.
""" """

View File

@@ -954,16 +954,6 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
def get_queryset(self): def get_queryset(self):
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name) return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
def add(self, *objects):
if len(objects) > 1:
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
return super().add(*objects)
def remove(self, *objects):
if len(objects) > 1:
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
return super().remove(*objects)
return OrderedManyRelatedManager return OrderedManyRelatedManager
return add_custom_queryset_to_many_related_manager( return add_custom_queryset_to_many_related_manager(
@@ -981,12 +971,13 @@ class OrderedManyToManyField(models.ManyToManyField):
by a special `position` column on the M2M table by a special `position` column on the M2M table
""" """
def _update_m2m_position(self, sender, instance, action, **kwargs): def _update_m2m_position(self, sender, **kwargs):
if action in ('post_add', 'post_remove'): if kwargs.get('action') in ('post_add', 'post_remove'):
descriptor = getattr(instance, self.name) order_with_respect_to = None
order_with_respect_to = descriptor.source_field_name for field in sender._meta.local_fields:
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})): order_with_respect_to = field.name
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
if ig.position != i: if ig.position != i:
ig.position = i ig.position = i
ig.save() ig.save()

View File

@@ -1,22 +0,0 @@
from awx.main.models import HostMetric
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
"""
Run soft-deleting of HostMetrics
"""
help = 'Run soft-deleting of HostMetrics'
def add_arguments(self, parser):
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
def handle(self, *args, **options):
months_ago = options.get('months-ago') or None
if not months_ago:
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
HostMetric.cleanup_task(months_ago)

View File

@@ -1,6 +1,5 @@
from awx.main.tasks.system import clear_setting_cache
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
class Command(BaseCommand): class Command(BaseCommand):
@@ -32,7 +31,5 @@ class Command(BaseCommand):
else: else:
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth') raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
def handle(self, **options): def handle(self, **options):
self._enable_disable_auth(options.get('enable'), options.get('disable')) self._enable_disable_auth(options.get('enable'), options.get('disable'))

View File

@@ -1,230 +1,53 @@
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
import datetime import datetime
from django.core.serializers.json import DjangoJSONEncoder from django.core.serializers.json import DjangoJSONEncoder
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly from awx.main.models.inventory import HostMetric
from awx.main.analytics.collectors import config
import json import json
import sys
import tempfile
import tarfile
import csv
CSV_PREFERRED_ROW_COUNT = 500000
BATCHED_FETCH_COUNT = 10000
class Command(BaseCommand): class Command(BaseCommand):
help = 'This is for offline licensing usage' help = 'This is for offline licensing usage'
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
list_of_queryset = list(
result.values(
'id',
'hostname',
'first_automation',
'last_automation',
'last_deleted',
'automated_counter',
'deleted_counter',
'deleted',
'used_in_inventories',
).order_by('first_automation')[offset : offset + limit]
)
return list_of_queryset
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
list_of_queryset = list(
result.values(
'id',
'date',
'license_consumed',
'license_capacity',
'hosts_added',
'hosts_deleted',
'indirectly_managed_hosts',
).order_by(
'date'
)[offset : offset + limit]
)
return list_of_queryset
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
offset = 0
list_of_queryset = []
while True:
if type == 'host_metric':
result = HostMetric.objects.filter(**filter_kwargs)
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
elif type == 'host_metric_summary_monthly':
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
if not list_of_queryset:
break
else:
yield list_of_queryset
offset += len(list_of_queryset)
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
if type == 'host_metric':
result = HostMetric.objects.filter(**filter_kwargs)
return self.host_metric_queryset(result, offset, fetch_count)
elif type == 'host_metric_summary_monthly':
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
with open(csv_file, mode, newline='') as output_file:
try:
keys = list_of_queryset[0].keys() if list_of_queryset else []
dict_writer = csv.DictWriter(output_file, keys)
if always_header or first_write:
dict_writer.writeheader()
dict_writer.writerows(list_of_queryset)
except Exception as e:
print(e)
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
csv_file = f'{temp_dir}/{type}{index+1}.csv'
arcname_file = f'{type}{index+1}.csv'
first_write = True if index == 0 else False
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
yield csv_file, arcname_file
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
csv_iteration = 1
offset = 0
rows_written_per_csv = 0
to_fetch = BATCHED_FETCH_COUNT
while True:
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
if not list_of_queryset:
break
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
arcname_file = f'{type}{csv_iteration}.csv'
self.write_to_csv(csv_file, list_of_queryset, always_header)
offset += to_fetch
rows_written_per_csv += to_fetch
always_header = False
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
if not remaining_rows_per_csv:
yield csv_file, arcname_file
rows_written_per_csv = 0
always_header = True
to_fetch = BATCHED_FETCH_COUNT
csv_iteration += 1
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
to_fetch = remaining_rows_per_csv
if rows_written_per_csv:
yield csv_file, arcname_file
def config_for_tar(self, options, temp_dir):
config_json = json.dumps(config(options.get('since')))
config_file = f'{temp_dir}/config.json'
arcname_file = 'config.json'
with open(config_file, 'w') as f:
f.write(config_json)
return config_file, arcname_file
def output_json(self, options, filter_kwargs):
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
csv_file = csv_detail[0]
with open(csv_file) as f:
reader = csv.DictReader(f)
rows = list(reader)
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
print(json_result)
def output_csv(self, options, filter_kwargs):
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
csv_file = csv_detail[0]
with open(csv_file) as f:
sys.stdout.write(f.read())
def output_tarball(self, options, filter_kwargs):
always_header = True
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
if rows_per_file <= BATCHED_FETCH_COUNT:
csv_function = self.csv_for_tar
else:
csv_function = self.csv_for_tar_batched_fetch
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
tar.add(csv_detail[0], arcname=csv_detail[1])
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
tar.add(csv_detail[0], arcname=csv_detail[1])
config_file, arcname_file = self.config_for_tar(options, temp_dir)
tar.add(config_file, arcname=arcname_file)
tar.close()
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD') parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly') parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly') parser.add_argument('--json', action='store_true', help='Select output as JSON')
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
def handle(self, *args, **options): def handle(self, *args, **options):
since = options.get('since') since = options.get('since')
until = options.get('until')
if since is None and until is None:
print("No Arguments received")
return None
if since is not None and since.tzinfo is None: if since is not None and since.tzinfo is None:
since = since.replace(tzinfo=datetime.timezone.utc) since = since.replace(tzinfo=datetime.timezone.utc)
if until is not None and until.tzinfo is None:
until = until.replace(tzinfo=datetime.timezone.utc)
filter_kwargs = {} filter_kwargs = {}
if since is not None: if since is not None:
filter_kwargs['last_automation__gte'] = since filter_kwargs['last_automation__gte'] = since
if until is not None:
filter_kwargs['last_automation__lte'] = until
filter_kwargs_host_metrics_summary = {} result = HostMetric.objects.filter(**filter_kwargs)
if since is not None:
filter_kwargs_host_metrics_summary['date__gte'] = since
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
return
# if --json flag is set, output the result in json format # if --json flag is set, output the result in json format
if options['json']: if options['json']:
self.output_json(options, filter_kwargs) list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
elif options['csv']: json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
self.output_csv(options, filter_kwargs) print(json_result)
elif options['tarball']:
self.output_tarball(options, filter_kwargs)
# --json flag is not set, output in plain text # --json flag is not set, output in plain text
else: else:
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:") print(f"Total Number of hosts automated: {len(result)}")
result = HostMetric.objects.filter(**filter_kwargs) for item in result:
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
for item in list_of_queryset:
print( print(
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format( "Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation'] hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
) )
) )
return return

View File

@@ -458,19 +458,12 @@ class Command(BaseCommand):
# TODO: We disable variable overwrite here in case user-defined inventory variables get # TODO: We disable variable overwrite here in case user-defined inventory variables get
# mangled. But we still need to figure out a better way of processing multiple inventory # mangled. But we still need to figure out a better way of processing multiple inventory
# update variables mixing with each other. # update variables mixing with each other.
# issue for this: https://github.com/ansible/awx/issues/11623 all_obj = self.inventory
db_variables = all_obj.variables_dict
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars: db_variables.update(self.all_group.variables)
# NOTE: we had to add a exception case to not merge variables if db_variables != all_obj.variables_dict:
# to make constructed inventory coherent all_obj.variables = json.dumps(db_variables)
db_variables = self.all_group.variables all_obj.save(update_fields=['variables'])
else:
db_variables = self.inventory.variables_dict
db_variables.update(self.all_group.variables)
if db_variables != self.inventory.variables_dict:
self.inventory.variables = json.dumps(db_variables)
self.inventory.save(update_fields=['variables'])
logger.debug('Inventory variables updated from "all" group') logger.debug('Inventory variables updated from "all" group')
else: else:
logger.debug('Inventory variables unmodified') logger.debug('Inventory variables unmodified')
@@ -529,32 +522,16 @@ class Command(BaseCommand):
def _update_db_host_from_mem_host(self, db_host, mem_host): def _update_db_host_from_mem_host(self, db_host, mem_host):
# Update host variables. # Update host variables.
db_variables = db_host.variables_dict db_variables = db_host.variables_dict
mem_variables = mem_host.variables
update_fields = []
# Update host instance_id.
instance_id = self._get_instance_id(mem_variables)
if instance_id != db_host.instance_id:
old_instance_id = db_host.instance_id
db_host.instance_id = instance_id
update_fields.append('instance_id')
if self.inventory.kind == 'constructed':
# remote towervars so the constructed hosts do not have extra variables
for prefix in ('host', 'tower'):
for var in ('remote_{}_enabled', 'remote_{}_id'):
mem_variables.pop(var.format(prefix), None)
if self.overwrite_vars: if self.overwrite_vars:
db_variables = mem_variables db_variables = mem_host.variables
else: else:
db_variables.update(mem_variables) db_variables.update(mem_host.variables)
update_fields = []
if db_variables != db_host.variables_dict: if db_variables != db_host.variables_dict:
db_host.variables = json.dumps(db_variables) db_host.variables = json.dumps(db_variables)
update_fields.append('variables') update_fields.append('variables')
# Update host enabled flag. # Update host enabled flag.
enabled = self._get_enabled(mem_variables) enabled = self._get_enabled(mem_host.variables)
if enabled is not None and db_host.enabled != enabled: if enabled is not None and db_host.enabled != enabled:
db_host.enabled = enabled db_host.enabled = enabled
update_fields.append('enabled') update_fields.append('enabled')
@@ -563,6 +540,12 @@ class Command(BaseCommand):
old_name = db_host.name old_name = db_host.name
db_host.name = mem_host.name db_host.name = mem_host.name
update_fields.append('name') update_fields.append('name')
# Update host instance_id.
instance_id = self._get_instance_id(mem_host.variables)
if instance_id != db_host.instance_id:
old_instance_id = db_host.instance_id
db_host.instance_id = instance_id
update_fields.append('instance_id')
# Update host and display message(s) on what changed. # Update host and display message(s) on what changed.
if update_fields: if update_fields:
db_host.save(update_fields=update_fields) db_host.save(update_fields=update_fields)
@@ -671,19 +654,13 @@ class Command(BaseCommand):
mem_host = self.all_group.all_hosts[mem_host_name] mem_host = self.all_group.all_hosts[mem_host_name]
import_vars = mem_host.variables import_vars = mem_host.variables
host_desc = import_vars.pop('_awx_description', 'imported') host_desc = import_vars.pop('_awx_description', 'imported')
host_attrs = dict(description=host_desc) host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
enabled = self._get_enabled(mem_host.variables) enabled = self._get_enabled(mem_host.variables)
if enabled is not None: if enabled is not None:
host_attrs['enabled'] = enabled host_attrs['enabled'] = enabled
if self.instance_id_var: if self.instance_id_var:
instance_id = self._get_instance_id(mem_host.variables) instance_id = self._get_instance_id(mem_host.variables)
host_attrs['instance_id'] = instance_id host_attrs['instance_id'] = instance_id
if self.inventory.kind == 'constructed':
# remote towervars so the constructed hosts do not have extra variables
for prefix in ('host', 'tower'):
for var in ('remote_{}_enabled', 'remote_{}_id'):
import_vars.pop(var.format(prefix), None)
host_attrs['variables'] = json.dumps(import_vars)
try: try:
sanitize_jinja(mem_host_name) sanitize_jinja(mem_host_name)
except ValueError as e: except ValueError as e:

View File

@@ -44,18 +44,16 @@ class Command(BaseCommand):
for x in ig.instances.all(): for x in ig.instances.all():
color = '\033[92m' color = '\033[92m'
end_color = '\033[0m'
if x.capacity == 0 and x.node_type != 'hop': if x.capacity == 0 and x.node_type != 'hop':
color = '\033[91m' color = '\033[91m'
if not x.enabled: if not x.enabled:
color = '\033[90m[DISABLED] ' color = '\033[90m[DISABLED] '
if no_color: if no_color:
color = '' color = ''
end_color = ''
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else '' capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
version = f" version={x.version or '?'}" if x.node_type != 'hop' else '' version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else '' heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}') print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
print() print()

View File

@@ -1,32 +0,0 @@
import logging
import json
from django.core.management.base import BaseCommand
from awx.main.dispatch import pg_bus_conn
from awx.main.dispatch.worker.task import TaskWorker
logger = logging.getLogger('awx.main.cache_clear')
class Command(BaseCommand):
"""
Cache Clear
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
"""
help = 'Launch the cache clear daemon'
def handle(self, *arg, **options):
try:
with pg_bus_conn(new_connection=True) as conn:
conn.listen("tower_settings_change")
for e in conn.events(yield_timeouts=True):
if e is not None:
body = json.loads(e.payload)
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
TaskWorker.run_callable(body)
except Exception:
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
logger.exception('Encountered unhandled error in cache clear main loop')
raise

View File

@@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.db import connection as django_connection from django.db import connection as django_connection
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
@@ -76,7 +76,7 @@ class Command(BaseCommand):
consumer = None consumer = None
try: try:
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()] queues = ['tower_broadcast_all', get_local_queuename()]
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4)) consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
consumer.run() consumer.run()
except KeyboardInterrupt: except KeyboardInterrupt:

View File

@@ -1,74 +0,0 @@
import json
import logging
import os
import time
import signal
import sys
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.main.dispatch import pg_bus_conn
logger = logging.getLogger('awx.main.commands.run_heartbeet')
class Command(BaseCommand):
help = 'Launch the web server beacon (heartbeet)'
def print_banner(self):
heartbeet = r"""
********** **********
************* *************
*****************************
***********HEART***********
*************************
*******************
*************** _._
*********** /`._ `'. __
******* \ .\| \ _'` `)
*** (``_) \| ).'` /`- /
* `\ `;\_ `\\//`-'` /
\ `'.'.| / __/`
`'--v_|/`'`
__||-._
/'` `-`` `'\\
/ .'` )
\ BEET ' )
\. /
'. /'`
`) |
//
'(.
`\`.
``"""
print(heartbeet)
def construct_payload(self, action='online'):
payload = {
'hostname': settings.CLUSTER_HOST_ID,
'ip': os.environ.get('MY_POD_IP'),
'action': action,
}
return json.dumps(payload)
def notify_listener_and_exit(self, *args):
with pg_bus_conn(new_connection=False) as conn:
conn.notify('web_heartbeet', self.construct_payload(action='offline'))
sys.exit(0)
def do_hearbeat_loop(self):
with pg_bus_conn(new_connection=True) as conn:
while True:
logger.debug('Sending heartbeat')
conn.notify('web_heartbeet', self.construct_payload())
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
def handle(self, *arg, **options):
self.print_banner()
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
# Note: We don't really try any reconnect logic to pg_notify here,
# just let supervisor restart if we fail.
self.do_hearbeat_loop()

View File

@@ -1,41 +0,0 @@
import logging
import json
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.cache import cache
from awx.main.dispatch import pg_bus_conn
from awx.main.dispatch.worker.task import TaskWorker
from awx.main.utils.external_logging import reconfigure_rsyslog
logger = logging.getLogger('awx.main.rsyslog_configurer')
class Command(BaseCommand):
"""
Rsyslog Configurer
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
for pg_notify then calls reconfigure_rsyslog
"""
help = 'Launch the rsyslog_configurer daemon'
def handle(self, *arg, **options):
try:
with pg_bus_conn(new_connection=True) as conn:
conn.listen("rsyslog_configurer")
# reconfigure rsyslog on start up
reconfigure_rsyslog()
for e in conn.events(yield_timeouts=True):
if e is not None:
logger.info("Change in logging settings found. Restarting rsyslogd")
# clear the cache of relevant settings then restart
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
cache.delete_many(setting_keys)
settings._awx_conf_memoizedcache.clear()
body = json.loads(e.payload)
TaskWorker.run_callable(body)
except Exception:
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
raise

View File

@@ -13,13 +13,13 @@ from django.db import connection
from django.db.migrations.executor import MigrationExecutor from django.db.migrations.executor import MigrationExecutor
from awx.main.analytics.broadcast_websocket import ( from awx.main.analytics.broadcast_websocket import (
RelayWebsocketStatsManager, BroadcastWebsocketStatsManager,
safe_name, safe_name,
) )
from awx.main.wsrelay import WebSocketRelayManager from awx.main.wsbroadcast import BroadcastWebsocketManager
logger = logging.getLogger('awx.main.wsrelay') logger = logging.getLogger('awx.main.wsbroadcast')
class Command(BaseCommand): class Command(BaseCommand):
@@ -98,9 +98,8 @@ class Command(BaseCommand):
try: try:
executor = MigrationExecutor(connection) executor = MigrationExecutor(connection)
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes())) migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
connection.close() # Because of async nature, main loop will use new connection, so close this
except Exception as exc: except Exception as exc:
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...') logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
time.sleep(10) time.sleep(10)
return return
@@ -131,9 +130,9 @@ class Command(BaseCommand):
if options.get('status'): if options.get('status'):
try: try:
stats_all = RelayWebsocketStatsManager.get_stats_sync() stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
except redis.exceptions.ConnectionError as e: except redis.exceptions.ConnectionError as e:
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}") print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
return return
data = {} data = {}
@@ -152,19 +151,22 @@ class Command(BaseCommand):
host_stats = Command.get_connection_status(hostnames, data) host_stats = Command.get_connection_status(hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'Relay websocket connection status from "{my_hostname}" to:') print(f'Broadcast websocket connection status from "{my_hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
host_stats = Command.get_connection_stats(hostnames, data) host_stats = Command.get_connection_stats(hostnames, data)
lines = Command._format_lines(host_stats) lines = Command._format_lines(host_stats)
print(f'\nRelay websocket connection stats from "{my_hostname}" to:') print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
print('\n'.join(lines)) print('\n'.join(lines))
return return
try: try:
websocket_relay_manager = WebSocketRelayManager() broadcast_websocket_mgr = BroadcastWebsocketManager()
asyncio.run(websocket_relay_manager.run()) task = broadcast_websocket_mgr.start()
loop = asyncio.get_event_loop()
loop.run_until_complete(task)
except KeyboardInterrupt: except KeyboardInterrupt:
logger.info('Terminating Websocket Relayer') logger.debug('Terminating Websocket Broadcaster')

View File

@@ -79,11 +79,6 @@ class HostManager(models.Manager):
return qs return qs
class HostMetricActiveManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted=False)
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping): def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
# Create IG mapping by union of all groups their instances are members of # Create IG mapping by union of all groups their instances are members of
ig_ig_mapping = {} ig_ig_mapping = {}

View File

@@ -1,18 +0,0 @@
# Generated by Django 3.2.16 on 2023-03-16 15:16
from django.db import migrations
from awx.main.migrations._credentialtypes import migrate_credential_type
from awx.main.models import CredentialType
class Migration(migrations.Migration):
def update_cyberark_plugin_names(apps, schema_editor):
CredentialType.setup_tower_managed_defaults(apps)
migrate_credential_type(apps, 'aim')
migrate_credential_type(apps, 'conjur')
dependencies = [
('main', '0178_instance_group_admin_migration'),
]
operations = [migrations.RunPython(update_cyberark_plugin_names)]

View File

@@ -1,43 +0,0 @@
# Generated by Django 3.2.16 on 2023-02-03 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0179_change_cyberark_plugin_names'),
]
operations = [
migrations.AlterField(model_name='hostmetric', name='hostname', field=models.CharField(max_length=512, primary_key=False, serialize=True, unique=True)),
migrations.AddField(
model_name='hostmetric',
name='last_deleted',
field=models.DateTimeField(db_index=True, null=True, help_text='When the host was last deleted'),
),
migrations.AddField(
model_name='hostmetric',
name='automated_counter',
field=models.BigIntegerField(default=0, help_text='How many times was the host automated'),
),
migrations.AddField(
model_name='hostmetric',
name='deleted_counter',
field=models.IntegerField(default=0, help_text='How many times was the host deleted'),
),
migrations.AddField(
model_name='hostmetric',
name='deleted',
field=models.BooleanField(
default=False, help_text='Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption'
),
),
migrations.AddField(
model_name='hostmetric',
name='used_in_inventories',
field=models.IntegerField(null=True, help_text='How many inventories contain this host'),
),
migrations.AddField(
model_name='hostmetric', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
),
]

View File

@@ -1,33 +0,0 @@
# Generated by Django 3.2.16 on 2023-02-10 12:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0180_add_hostmetric_fields'),
]
operations = [
migrations.CreateModel(
name='HostMetricSummaryMonthly',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(unique=True)),
('license_consumed', models.BigIntegerField(default=0, help_text='How many unique hosts are consumed from the license')),
('license_capacity', models.BigIntegerField(default=0, help_text="'License capacity as max. number of unique hosts")),
(
'hosts_added',
models.IntegerField(default=0, help_text='How many hosts were added in the associated month, consuming more license capacity'),
),
(
'hosts_deleted',
models.IntegerField(default=0, help_text='How many hosts were deleted in the associated month, freeing the license capacity'),
),
(
'indirectly_managed_hosts',
models.IntegerField(default=0, help_text='Manually entered number indirectly managed hosts for a certain month'),
),
],
),
]

View File

@@ -1,138 +0,0 @@
# Generated by Django 3.2.16 on 2022-12-07 14:20
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0181_hostmetricsummarymonthly'),
]
operations = [
migrations.CreateModel(
name='InventoryConstructedInventoryMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
(
'constructed_inventory',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory', related_name='constructed_inventory_memberships'),
),
('input_inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory')),
],
),
migrations.AddField(
model_name='inventory',
name='input_inventories',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
through_fields=('constructed_inventory', 'input_inventory'),
help_text='Only valid for constructed inventories, this links to the inventories that will be used.',
related_name='destination_inventories',
through='main.InventoryConstructedInventoryMembership',
to='main.Inventory',
),
),
migrations.AlterField(
model_name='inventory',
name='kind',
field=models.CharField(
blank=True,
choices=[
('', 'Hosts have a direct link to this inventory.'),
('smart', 'Hosts for inventory generated using the host_filter property.'),
('constructed', 'Parse list of source inventories with the constructed inventory plugin.'),
],
default='',
help_text='Kind of inventory being represented.',
max_length=32,
),
),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
],
default=None,
max_length=32,
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
],
default=None,
max_length=32,
),
),
migrations.AddField(
model_name='inventorysource',
name='limit',
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
),
migrations.AddField(
model_name='inventoryupdate',
name='limit',
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
),
migrations.AlterField(
model_name='inventorysource',
name='host_filter',
field=models.TextField(
blank=True,
default='',
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='host_filter',
field=models.TextField(
blank=True,
default='',
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
),
),
migrations.AddField(
model_name='jobhostsummary',
name='constructed_host',
field=models.ForeignKey(
default=None,
editable=False,
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='constructed_host_summaries',
to='main.host',
),
),
]

View File

@@ -1,9 +1,6 @@
import logging
from awx.main.models import CredentialType from awx.main.models import CredentialType
from django.db.models import Q from django.db.models import Q
logger = logging.getLogger('awx.main.migrations')
DEPRECATED_CRED_KIND = { DEPRECATED_CRED_KIND = {
'rax': { 'rax': {
@@ -79,14 +76,3 @@ def add_tower_verify_field(apps, schema_editor):
def remove_become_methods(apps, schema_editor): def remove_become_methods(apps, schema_editor):
# this is no longer necessary; schemas are defined in code # this is no longer necessary; schemas are defined in code
pass pass
def migrate_credential_type(apps, namespace):
ns_types = apps.get_model('main', 'CredentialType').objects.filter(namespace=namespace).order_by('created')
if ns_types.count() == 2:
original, renamed = ns_types.all()
logger.info(f'There are credential types to migrate in the "{namespace}" namespace: {original.name}')
apps.get_model('main', 'Credential').objects.filter(credential_type_id=original.id).update(credential_type_id=renamed.id)
logger.info(f'Removing old credential type: {renamed.name}')
original.delete()

View File

@@ -16,9 +16,7 @@ from awx.main.models.inventory import ( # noqa
Group, Group,
Host, Host,
HostMetric, HostMetric,
HostMetricSummaryMonthly,
Inventory, Inventory,
InventoryConstructedInventoryMembership,
InventorySource, InventorySource,
InventoryUpdate, InventoryUpdate,
SmartInventoryMembership, SmartInventoryMembership,

View File

@@ -7,7 +7,6 @@ from collections import defaultdict
from django.conf import settings from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist from django.core.exceptions import ObjectDoesNotExist
from django.db import models, DatabaseError from django.db import models, DatabaseError
from django.db.models.functions import Cast
from django.utils.dateparse import parse_datetime from django.utils.dateparse import parse_datetime
from django.utils.text import Truncator from django.utils.text import Truncator
from django.utils.timezone import utc, now from django.utils.timezone import utc, now
@@ -537,38 +536,25 @@ class JobEvent(BasePlaybookEvent):
return return
job = self.job job = self.job
from awx.main.models import Host, JobHostSummary # circular import from awx.main.models import Host, JobHostSummary, HostMetric # circular import
if self.job.inventory.kind == 'constructed':
all_hosts = Host.objects.filter(id__in=self.job.inventory.hosts.values_list(Cast('instance_id', output_field=models.IntegerField()))).only(
'id', 'name'
)
constructed_host_map = self.host_map
host_map = {host.name: host.id for host in all_hosts}
else:
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
constructed_host_map = {}
host_map = self.host_map
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
existing_host_ids = set(h.id for h in all_hosts) existing_host_ids = set(h.id for h in all_hosts)
summaries = dict() summaries = dict()
updated_hosts_list = list() updated_hosts_list = list()
for host in hostnames: for host in hostnames:
updated_hosts_list.append(host.lower()) updated_hosts_list.append(host.lower())
host_id = host_map.get(host) host_id = self.host_map.get(host, None)
if host_id not in existing_host_ids: if host_id not in existing_host_ids:
host_id = None host_id = None
constructed_host_id = constructed_host_map.get(host)
host_stats = {} host_stats = {}
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'): for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
try: try:
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0) host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
except AttributeError: # in case event_data[stat] isn't a dict. except AttributeError: # in case event_data[stat] isn't a dict.
pass pass
summary = JobHostSummary( summary = JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats)
created=now(), modified=now(), job_id=job.id, host_id=host_id, constructed_host_id=constructed_host_id, host_name=host, **host_stats
)
summary.failed = bool(summary.dark or summary.failures) summary.failed = bool(summary.dark or summary.failures)
summaries[(host_id, host)] = summary summaries[(host_id, host)] = summary
@@ -589,26 +575,12 @@ class JobEvent(BasePlaybookEvent):
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100) Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
# Create/update Host Metrics # bulk-create
self._update_host_metrics(updated_hosts_list) current_time = now()
HostMetric.objects.bulk_create(
@staticmethod [HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
def _update_host_metrics(updated_hosts_list):
from awx.main.models import HostMetric # circular import
# bulk-create
current_time = now()
HostMetric.objects.bulk_create(
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
)
# bulk-update
batch_start, batch_size = 0, 1000
while batch_start <= len(updated_hosts_list):
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
HostMetric.objects.filter(hostname__in=batched_host_list).update(
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
) )
batch_start += batch_size HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
@property @property
def job_verbosity(self): def job_verbosity(self):

View File

@@ -9,8 +9,6 @@ import re
import copy import copy
import os.path import os.path
from urllib.parse import urljoin from urllib.parse import urljoin
import dateutil.relativedelta
import yaml import yaml
# Django # Django
@@ -19,7 +17,6 @@ from django.db import models, connection
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.db import transaction from django.db import transaction
from django.core.exceptions import ValidationError from django.core.exceptions import ValidationError
from django.urls import resolve
from django.utils.timezone import now from django.utils.timezone import now
from django.db.models import Q from django.db.models import Q
@@ -35,7 +32,7 @@ from awx.main.fields import (
SmartFilterField, SmartFilterField,
OrderedManyToManyField, OrderedManyToManyField,
) )
from awx.main.managers import HostManager, HostMetricActiveManager from awx.main.managers import HostManager
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
@@ -52,25 +49,15 @@ from awx.main.models.notifications import (
from awx.main.models.credential.injectors import _openstack_data from awx.main.models.credential.injectors import _openstack_data
from awx.main.utils import _inventory_updates from awx.main.utils import _inventory_updates
from awx.main.utils.safe_yaml import sanitize_jinja from awx.main.utils.safe_yaml import sanitize_jinja
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment from awx.main.utils.execution_environments import to_container_path
from awx.main.utils.licensing import server_product_name from awx.main.utils.licensing import server_product_name
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly'] __all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
logger = logging.getLogger('awx.main.models.inventory') logger = logging.getLogger('awx.main.models.inventory')
class InventoryConstructedInventoryMembership(models.Model):
constructed_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE, related_name='constructed_inventory_memberships')
input_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin): class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
""" """
an inventory source contains lists and hosts. an inventory source contains lists and hosts.
@@ -80,7 +67,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
KIND_CHOICES = [ KIND_CHOICES = [
('', _('Hosts have a direct link to this inventory.')), ('', _('Hosts have a direct link to this inventory.')),
('smart', _('Hosts for inventory generated using the host_filter property.')), ('smart', _('Hosts for inventory generated using the host_filter property.')),
('constructed', _('Parse list of source inventories with the constructed inventory plugin.')),
] ]
class Meta: class Meta:
@@ -153,14 +139,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
default=None, default=None,
help_text=_('Filter that will be applied to the hosts of this inventory.'), help_text=_('Filter that will be applied to the hosts of this inventory.'),
) )
input_inventories = OrderedManyToManyField(
'Inventory',
blank=True,
through_fields=('constructed_inventory', 'input_inventory'),
related_name='destination_inventories',
help_text=_('Only valid for constructed inventories, this links to the inventories that will be used.'),
through='InventoryConstructedInventoryMembership',
)
instance_groups = OrderedManyToManyField( instance_groups = OrderedManyToManyField(
'InstanceGroup', 'InstanceGroup',
blank=True, blank=True,
@@ -209,14 +187,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
) )
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
if request is not None:
# circular import
from awx.api.urls.inventory import constructed_inventory_urls
route = resolve(request.path_info)
if any(route.url_name == url.name for url in constructed_inventory_urls):
return reverse('api:constructed_inventory_detail', kwargs={'pk': self.pk}, request=request)
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
variables_dict = VarsDictProperty('variables') variables_dict = VarsDictProperty('variables')
@@ -368,12 +338,13 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
for host in hosts: for host in hosts:
data['_meta']['hostvars'][host.name] = host.variables_dict data['_meta']['hostvars'][host.name] = host.variables_dict
if towervars: if towervars:
for prefix in ('host', 'tower'): tower_dict = dict(
tower_dict = { remote_tower_enabled=str(host.enabled).lower(),
f'remote_{prefix}_enabled': str(host.enabled).lower(), remote_tower_id=host.id,
f'remote_{prefix}_id': host.id, remote_host_enabled=str(host.enabled).lower(),
} remote_host_id=host.id,
data['_meta']['hostvars'][host.name].update(tower_dict) )
data['_meta']['hostvars'][host.name].update(tower_dict)
return data return data
@@ -460,24 +431,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
connection.on_commit(on_commit) connection.on_commit(on_commit)
def _enforce_constructed_source(self):
"""
Constructed inventory should always have exactly 1 inventory source, constructed type
this enforces that requirement
"""
if self.kind == 'constructed':
if not self.inventory_sources.exists():
self.inventory_sources.create(
source='constructed', name=f'Auto-created source for: {self.name}'[:512], overwrite=True, overwrite_vars=True, update_on_launch=True
)
def save(self, *args, **kwargs): def save(self, *args, **kwargs):
self._update_host_smart_inventory_memeberships() self._update_host_smart_inventory_memeberships()
super(Inventory, self).save(*args, **kwargs) super(Inventory, self).save(*args, **kwargs)
if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite': if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite':
# Minimal update of host_count for smart inventory host filter changes # Minimal update of host_count for smart inventory host filter changes
self.update_computed_fields() self.update_computed_fields()
self._enforce_constructed_source()
def delete(self, *args, **kwargs): def delete(self, *args, **kwargs):
self._update_host_smart_inventory_memeberships() self._update_host_smart_inventory_memeberships()
@@ -861,64 +820,9 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
class HostMetric(models.Model): class HostMetric(models.Model):
hostname = models.CharField(unique=True, max_length=512) hostname = models.CharField(primary_key=True, max_length=512)
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against')) first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against')) last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
last_deleted = models.DateTimeField(null=True, db_index=True, help_text=_('When the host was last deleted'))
automated_counter = models.BigIntegerField(default=0, help_text=_('How many times was the host automated'))
deleted_counter = models.IntegerField(default=0, help_text=_('How many times was the host deleted'))
deleted = models.BooleanField(
default=False, help_text=_('Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption')
)
used_in_inventories = models.IntegerField(null=True, help_text=_('How many inventories contain this host'))
objects = models.Manager()
active_objects = HostMetricActiveManager()
def get_absolute_url(self, request=None):
return reverse('api:host_metric_detail', kwargs={'pk': self.pk}, request=request)
def soft_delete(self):
if not self.deleted:
self.deleted_counter = (self.deleted_counter or 0) + 1
self.last_deleted = now()
self.deleted = True
self.save(update_fields=['deleted', 'deleted_counter', 'last_deleted'])
def soft_restore(self):
if self.deleted:
self.deleted = False
self.save(update_fields=['deleted'])
@classmethod
def cleanup_task(cls, months_ago):
try:
months_ago = int(months_ago)
if months_ago <= 0:
raise ValueError()
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
)
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
except (TypeError, ValueError):
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
class HostMetricSummaryMonthly(models.Model):
"""
HostMetric summaries computed by scheduled task <TODO> monthly
"""
date = models.DateField(unique=True)
license_consumed = models.BigIntegerField(default=0, help_text=_("How many unique hosts are consumed from the license"))
license_capacity = models.BigIntegerField(default=0, help_text=_("'License capacity as max. number of unique hosts"))
hosts_added = models.IntegerField(default=0, help_text=_("How many hosts were added in the associated month, consuming more license capacity"))
hosts_deleted = models.IntegerField(default=0, help_text=_("How many hosts were deleted in the associated month, freeing the license capacity"))
indirectly_managed_hosts = models.IntegerField(default=0, help_text=("Manually entered number indirectly managed hosts for a certain month"))
class InventorySourceOptions(BaseModel): class InventorySourceOptions(BaseModel):
@@ -930,7 +834,6 @@ class InventorySourceOptions(BaseModel):
SOURCE_CHOICES = [ SOURCE_CHOICES = [
('file', _('File, Directory or Script')), ('file', _('File, Directory or Script')),
('constructed', _('Template additional groups and hostvars at runtime')),
('scm', _('Sourced from a Project')), ('scm', _('Sourced from a Project')),
('ec2', _('Amazon EC2')), ('ec2', _('Amazon EC2')),
('gce', _('Google Compute Engine')), ('gce', _('Google Compute Engine')),
@@ -1010,7 +913,7 @@ class InventorySourceOptions(BaseModel):
host_filter = models.TextField( host_filter = models.TextField(
blank=True, blank=True,
default='', default='',
help_text=_('This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.'), help_text=_('Regex where only matching hosts will be imported.'),
) )
overwrite = models.BooleanField( overwrite = models.BooleanField(
default=False, default=False,
@@ -1030,21 +933,6 @@ class InventorySourceOptions(BaseModel):
blank=True, blank=True,
default=1, default=1,
) )
limit = models.TextField(
blank=True,
default='',
help_text=_("Enter host, group or pattern match"),
)
def resolve_execution_environment(self):
"""
Project updates, themselves, will use the control plane execution environment.
Jobs using the project can use the default_environment, but the project updates
are not flexible enough to allow customizing the image they use.
"""
if self.inventory.kind == 'constructed':
return get_control_plane_execution_environment()
return super().resolve_execution_environment()
@staticmethod @staticmethod
def cloud_credential_validation(source, cred): def cloud_credential_validation(source, cred):
@@ -1479,8 +1367,8 @@ class PluginFileInjector(object):
def build_env(self, inventory_update, env, private_data_dir, private_data_files): def build_env(self, inventory_update, env, private_data_dir, private_data_files):
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files) injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
env.update(injector_env) env.update(injector_env)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection # Preserves current behavior for Ansible change in default planned for 2.10
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto' env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
return env return env
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files): def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
@@ -1664,18 +1552,5 @@ class insights(PluginFileInjector):
use_fqcn = True use_fqcn = True
class constructed(PluginFileInjector):
plugin_name = 'constructed'
namespace = 'ansible'
collection = 'builtin'
def build_env(self, *args, **kwargs):
env = super().build_env(*args, **kwargs)
# Enable script inventory plugin so we pick up the script files from source inventories
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
return env
for cls in PluginFileInjector.__subclasses__(): for cls in PluginFileInjector.__subclasses__():
InventorySourceOptions.injectors[cls.__name__] = cls InventorySourceOptions.injectors[cls.__name__] = cls

View File

@@ -2,8 +2,12 @@
# All Rights Reserved. # All Rights Reserved.
# Python # Python
import codecs
import datetime
import logging import logging
import os
import time import time
import json
from urllib.parse import urljoin from urllib.parse import urljoin
@@ -11,9 +15,11 @@ from urllib.parse import urljoin
from django.conf import settings from django.conf import settings
from django.core.exceptions import ValidationError from django.core.exceptions import ValidationError
from django.db import models from django.db import models
from django.db.models.functions import Cast from django.db.models.query import QuerySet
# from django.core.cache import cache # from django.core.cache import cache
from django.utils.encoding import smart_str
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldDoesNotExist
@@ -22,7 +28,6 @@ from rest_framework.exceptions import ParseError
# AWX # AWX
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.constants import HOST_FACTS_FIELDS
from awx.main.models.base import ( from awx.main.models.base import (
BaseModel, BaseModel,
CreatedModifiedModel, CreatedModifiedModel,
@@ -39,7 +44,7 @@ from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
JobNotificationMixin, JobNotificationMixin,
) )
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
@@ -55,6 +60,8 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
logger = logging.getLogger('awx.main.models.jobs') logger = logging.getLogger('awx.main.models.jobs')
analytics_logger = logging.getLogger('awx.analytics.job_events')
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob'] __all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
@@ -571,7 +578,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
default=None, default=None,
on_delete=models.SET_NULL, on_delete=models.SET_NULL,
) )
hosts = models.ManyToManyField('Host', related_name='jobs', editable=False, through='JobHostSummary', through_fields=('job', 'host')) hosts = models.ManyToManyField(
'Host',
related_name='jobs',
editable=False,
through='JobHostSummary',
)
artifacts = JSONBlob( artifacts = JSONBlob(
default=dict, default=dict,
blank=True, blank=True,
@@ -819,9 +831,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
for name in JOB_VARIABLE_PREFIXES: for name in JOB_VARIABLE_PREFIXES:
r['{}_job_template_id'.format(name)] = self.job_template.pk r['{}_job_template_id'.format(name)] = self.job_template.pk
r['{}_job_template_name'.format(name)] = self.job_template.name r['{}_job_template_name'.format(name)] = self.job_template.name
if self.execution_node:
for name in JOB_VARIABLE_PREFIXES:
r['{}_execution_node'.format(name)] = self.execution_node
return r return r
''' '''
@@ -836,26 +845,109 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
def get_notification_friendly_name(self): def get_notification_friendly_name(self):
return "Job" return "Job"
def get_hosts_for_fact_cache(self): def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
""" """Return value is an iterable for the relevant hosts for this job"""
Builds the queryset to use for writing or finalizing the fact cache if not self.inventory:
these need to be the 'real' hosts associated with the job. return []
For constructed inventories, that means the original (input inventory) hosts host_queryset = self.inventory.hosts.only(*only)
when slicing, that means only returning hosts in that slice if filters:
""" host_queryset = host_queryset.filter(**filters)
Host = JobHostSummary._meta.get_field('host').related_model host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
if not self.inventory_id: if isinstance(host_queryset, QuerySet):
return Host.objects.none() return host_queryset.iterator()
return host_queryset
if self.inventory.kind == 'constructed': @log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
id_field = Host._meta.get_field('id') def start_job_fact_cache(self, destination, log_data, timeout=None):
host_qs = Host.objects.filter(id__in=self.inventory.hosts.exclude(instance_id='').values_list(Cast('instance_id', output_field=id_field))) self.log_lifecycle("start_job_fact_cache")
log_data['job_id'] = self.id
log_data['written_ct'] = 0
os.makedirs(destination, mode=0o700)
if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
if timeout > 0:
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
timeout = now() - datetime.timedelta(seconds=timeout)
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
else: else:
host_qs = self.inventory.hosts hosts = self._get_inventory_hosts()
host_qs = host_qs.only(*HOST_FACTS_FIELDS) last_filepath_written = None
host_qs = self.inventory.get_sliced_hosts(host_qs, self.job_slice_number, self.job_slice_count) for host in hosts:
return host_qs filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
try:
with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1
last_filepath_written = filepath
except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
# make note of the time we wrote the last file so we can check if any file changed later
if last_filepath_written:
return os.path.getmtime(last_filepath_written)
return None
@log_excess_runtime(
logger,
debug_cutoff=0.01,
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True,
)
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
self.log_lifecycle("finish_job_fact_cache")
log_data['job_id'] = self.id
log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0
hosts_to_update = []
for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run...
modified = os.path.getmtime(filepath)
if (not facts_write_time) or modified > facts_write_time:
with codecs.open(filepath, 'r', encoding='utf-8') as f:
try:
ansible_facts = json.load(f)
except ValueError:
continue
host.ansible_facts = ansible_facts
host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info(
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict(
inventory_id=host.inventory.id,
host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
job_id=self.id,
),
)
log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else:
# if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {}
host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1
if len(hosts_to_update) > 100:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = []
if hosts_to_update:
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
class LaunchTimeConfigBase(BaseModel): class LaunchTimeConfigBase(BaseModel):
@@ -1077,15 +1169,6 @@ class JobHostSummary(CreatedModifiedModel):
editable=False, editable=False,
) )
host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False) host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False)
constructed_host = models.ForeignKey(
'Host',
related_name='constructed_host_summaries',
null=True,
default=None,
on_delete=models.SET_NULL,
editable=False,
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
)
host_name = models.CharField( host_name = models.CharField(
max_length=1024, max_length=1024,

View File

@@ -284,7 +284,7 @@ class JobNotificationMixin(object):
'workflow_url', 'workflow_url',
'scm_branch', 'scm_branch',
'artifacts', 'artifacts',
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark', 'processed', 'rescued', 'ignored']}, {'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark' 'processed', 'rescued', 'ignored']},
{ {
'summary_fields': [ 'summary_fields': [
{ {

View File

@@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
# AWX # AWX
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
from awx.main.dispatch.control import Control as ControlDispatcher from awx.main.dispatch.control import Control as ControlDispatcher
from awx.main.registrar import activity_stream_registrar from awx.main.registrar import activity_stream_registrar
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
@@ -1567,7 +1567,7 @@ class UnifiedJob(
return r return r
def get_queue_name(self): def get_queue_name(self):
return self.controller_node or self.execution_node or get_task_queuename() return self.controller_node or self.execution_node or get_local_queuename()
@property @property
def is_container_group_task(self): def is_container_group_task(self):

View File

@@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
websocket_urlpatterns = [ websocket_urlpatterns = [
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()), re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
] ]
application = AWXProtocolTypeRouter( application = AWXProtocolTypeRouter(

View File

@@ -8,7 +8,7 @@ from django.conf import settings
from awx import MODE from awx import MODE
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
logger = logging.getLogger('awx.main.scheduler') logger = logging.getLogger('awx.main.scheduler')
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
manager().schedule() manager().schedule()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def task_manager(): def task_manager():
run_manager(TaskManager, "task") run_manager(TaskManager, "task")
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def dependency_manager(): def dependency_manager():
run_manager(DependencyManager, "dependency") run_manager(DependencyManager, "dependency")
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def workflow_manager(): def workflow_manager():
run_manager(WorkflowManager, "workflow") run_manager(WorkflowManager, "workflow")

View File

@@ -1,117 +0,0 @@
import codecs
import datetime
import os
import json
import logging
# Django
from django.conf import settings
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
from django.utils.timezone import now
# AWX
from awx.main.utils.common import log_excess_runtime
from awx.main.models.inventory import Host
logger = logging.getLogger('awx.main.tasks.facts')
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
log_data['inventory_id'] = inventory_id
log_data['written_ct'] = 0
try:
os.makedirs(destination, mode=0o700)
except FileExistsError:
pass
if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
if isinstance(hosts, QuerySet):
hosts = hosts.iterator()
last_filepath_written = None
for host in hosts:
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
continue # facts are expired - do not write them
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
try:
with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1
last_filepath_written = filepath
except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
# make note of the time we wrote the last file so we can check if any file changed later
if last_filepath_written:
return os.path.getmtime(last_filepath_written)
return None
@log_excess_runtime(
logger,
debug_cutoff=0.01,
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True,
)
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
log_data['inventory_id'] = inventory_id
log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0
if isinstance(hosts, QuerySet):
hosts = hosts.iterator()
hosts_to_update = []
for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue
if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run...
modified = os.path.getmtime(filepath)
if (not facts_write_time) or modified > facts_write_time:
with codecs.open(filepath, 'r', encoding='utf-8') as f:
try:
ansible_facts = json.load(f)
except ValueError:
continue
host.ansible_facts = ansible_facts
host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info(
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict(
inventory_id=host.inventory.id,
host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
job_id=job_id,
),
)
log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else:
# if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {}
host.ansible_facts_modified = now()
hosts_to_update.append(host)
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1
if len(hosts_to_update) > 100:
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = []
if hosts_to_update:
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])

View File

@@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
# AWX # AWX
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
from awx.main.constants import ( from awx.main.constants import (
PRIVILEGE_ESCALATION_METHODS, PRIVILEGE_ESCALATION_METHODS,
STANDARD_INVENTORY_UPDATE_ENV, STANDARD_INVENTORY_UPDATE_ENV,
@@ -37,7 +37,6 @@ from awx.main.constants import (
MAX_ISOLATED_PATH_COLON_DELIMITER, MAX_ISOLATED_PATH_COLON_DELIMITER,
CONTAINER_VOLUMES_MOUNT_TYPES, CONTAINER_VOLUMES_MOUNT_TYPES,
ACTIVE_STATES, ACTIVE_STATES,
HOST_FACTS_FIELDS,
) )
from awx.main.models import ( from awx.main.models import (
Instance, Instance,
@@ -64,7 +63,6 @@ from awx.main.tasks.callback import (
) )
from awx.main.tasks.signals import with_signal_handling, signal_callback from awx.main.tasks.signals import with_signal_handling, signal_callback
from awx.main.tasks.receptor import AWXReceptorJob from awx.main.tasks.receptor import AWXReceptorJob
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
from awx.main.utils.ansible import read_ansible_config from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
@@ -317,22 +315,17 @@ class BaseTask(object):
return env return env
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
script_data = inventory.get_script_data(**script_params)
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
def build_inventory(self, instance, private_data_dir): def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True) script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'): if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params) # maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
def build_args(self, instance, private_data_dir, passwords): def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError raise NotImplementedError
@@ -457,9 +450,6 @@ class BaseTask(object):
instance.ansible_version = ansible_version_info instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version']) instance.save(update_fields=['ansible_version'])
def should_use_fact_cache(self):
return False
@with_path_cleanup @with_path_cleanup
@with_signal_handling @with_signal_handling
def run(self, pk, **kwargs): def run(self, pk, **kwargs):
@@ -558,8 +548,7 @@ class BaseTask(object):
params['module'] = self.build_module_name(self.instance) params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance) params['module_args'] = self.build_module_args(self.instance)
# TODO: refactor into a better BasTask method if getattr(self.instance, 'use_fact_cache', False):
if self.should_use_fact_cache():
# Enable Ansible fact cache. # Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile' params['fact_cache_type'] = 'jsonfile'
else: else:
@@ -806,7 +795,7 @@ class SourceControlMixin(BaseTask):
self.release_lock(project) self.release_lock(project)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
class RunJob(SourceControlMixin, BaseTask): class RunJob(SourceControlMixin, BaseTask):
""" """
Run a job using ansible-playbook. Run a job using ansible-playbook.
@@ -1014,9 +1003,6 @@ class RunJob(SourceControlMixin, BaseTask):
return args return args
def should_use_fact_cache(self):
return self.instance.use_fact_cache
def build_playbook_path_relative_to_cwd(self, job, private_data_dir): def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook return job.playbook
@@ -1082,11 +1068,8 @@ class RunJob(SourceControlMixin, BaseTask):
# Fetch "cached" fact data from prior runs and put on the disk # Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it # where ansible expects to find it
if self.should_use_fact_cache(): if job.use_fact_cache:
job.log_lifecycle("start_job_fact_cache") self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
self.facts_write_time = start_fact_cache(
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
)
def build_project_dir(self, job, private_data_dir): def build_project_dir(self, job, private_data_dir):
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch) self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
@@ -1100,14 +1083,10 @@ class RunJob(SourceControlMixin, BaseTask):
# actual `run()` call; this _usually_ means something failed in # actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method # the pre_run_hook method
return return
if self.should_use_fact_cache(): if job.use_fact_cache:
job.log_lifecycle("finish_job_fact_cache") job.finish_job_fact_cache(
finish_fact_cache(
job.get_hosts_for_fact_cache(),
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
facts_write_time=self.facts_write_time, self.facts_write_time,
job_id=job.id,
inventory_id=job.inventory_id,
) )
def final_run_hook(self, job, status, private_data_dir): def final_run_hook(self, job, status, private_data_dir):
@@ -1121,7 +1100,7 @@ class RunJob(SourceControlMixin, BaseTask):
update_inventory_computed_fields.delay(inventory.id) update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask): class RunProjectUpdate(BaseTask):
model = ProjectUpdate model = ProjectUpdate
event_model = ProjectUpdateEvent event_model = ProjectUpdateEvent
@@ -1443,7 +1422,7 @@ class RunProjectUpdate(BaseTask):
return params return params
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
class RunInventoryUpdate(SourceControlMixin, BaseTask): class RunInventoryUpdate(SourceControlMixin, BaseTask):
model = InventoryUpdate model = InventoryUpdate
event_model = InventoryUpdateEvent event_model = InventoryUpdateEvent
@@ -1490,6 +1469,8 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
if injector is not None: if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files) env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm': if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict: for env_k in inventory_update.source_vars_dict:
@@ -1542,22 +1523,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
args = ['ansible-inventory', '--list', '--export'] args = ['ansible-inventory', '--list', '--export']
# special case for constructed inventories, we pass source inventories from database
# these must come in order, and in order _before_ the constructed inventory itself
if inventory_update.inventory.kind == 'constructed':
inventory_update.log_lifecycle("start_job_fact_cache")
for input_inventory in inventory_update.inventory.input_inventories.all():
args.append('-i')
script_params = dict(hostvars=True, towervars=True)
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
args.append(to_container_path(source_inv_path, private_data_dir))
# Include any facts from input inventories so they can be used in filters
start_fact_cache(
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
inventory_id=input_inventory.id,
)
# Add arguments for the source inventory file/script/thing # Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir) rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path) container_location = os.path.join(CONTAINER_ROOT, rel_path)
@@ -1565,11 +1530,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
args.append('-i') args.append('-i')
args.append(container_location) args.append(container_location)
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
# limit should be usable in ansible-inventory 2.15+
if inventory_update.limit:
args.append('--limit')
args.append(inventory_update.limit)
args.append('--output') args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json')) args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
@@ -1585,9 +1545,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
return args return args
def should_use_fact_cache(self):
return bool(self.instance.source == 'constructed')
def build_inventory(self, inventory_update, private_data_dir): def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory return None # what runner expects in order to not deal with inventory
@@ -1706,7 +1663,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc()) raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask): class RunAdHocCommand(BaseTask):
""" """
Run an ad hoc command using ansible. Run an ad hoc command using ansible.
@@ -1859,7 +1816,7 @@ class RunAdHocCommand(BaseTask):
return d return d
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
class RunSystemJob(BaseTask): class RunSystemJob(BaseTask):
model = SystemJob model = SystemJob
event_model = SystemJobEvent event_model = SystemJobEvent

View File

@@ -28,7 +28,7 @@ from awx.main.utils.common import (
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
from awx.main.models import Instance, InstanceLink, UnifiedJob from awx.main.models import Instance, InstanceLink, UnifiedJob
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_local_queuename
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
# Receptorctl # Receptorctl
@@ -639,7 +639,7 @@ class AWXReceptorJob:
# #
RECEPTOR_CONFIG_STARTER = ( RECEPTOR_CONFIG_STARTER = (
{'local-only': None}, {'local-only': None},
{'log-level': 'info'}, {'log-level': 'debug'},
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}}, {'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}}, {'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}}, {'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
@@ -668,7 +668,6 @@ RECEPTOR_CONFIG_STARTER = (
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt', 'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
'cert': '/etc/receptor/tls/receptor.crt', 'cert': '/etc/receptor/tls/receptor.crt',
'key': '/etc/receptor/tls/receptor.key', 'key': '/etc/receptor/tls/receptor.key',
'mintls13': False,
} }
}, },
) )
@@ -713,7 +712,7 @@ def write_receptor_config():
links.update(link_state=InstanceLink.States.ESTABLISHED) links.update(link_state=InstanceLink.States.ESTABLISHED)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def remove_deprovisioned_node(hostname): def remove_deprovisioned_node(hostname):
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)

View File

@@ -47,11 +47,10 @@ from awx.main.models import (
Inventory, Inventory,
SmartInventoryMembership, SmartInventoryMembership,
Job, Job,
HostMetric,
) )
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename, reaper from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils.common import ( from awx.main.utils.common import (
get_type_for_model, get_type_for_model,
ignore_inventory_computed_fields, ignore_inventory_computed_fields,
@@ -60,6 +59,7 @@ from awx.main.utils.common import (
ScheduleTaskManager, ScheduleTaskManager,
) )
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.reload import stop_local_services from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock from awx.main.utils.pglock import advisory_lock
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
@@ -115,6 +115,9 @@ def dispatch_startup():
m = Metrics() m = Metrics()
m.reset_values() m.reset_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown(): def inform_cluster_of_shutdown():
try: try:
@@ -129,7 +132,7 @@ def inform_cluster_of_shutdown():
logger.exception('Encountered problem with normal shutdown signal.') logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def apply_cluster_membership_policies(): def apply_cluster_membership_policies():
from awx.main.signals import disable_activity_stream from awx.main.signals import disable_activity_stream
@@ -241,10 +244,8 @@ def apply_cluster_membership_policies():
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute)) logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_settings_change') @task(queue='tower_broadcast_all')
def clear_setting_cache(setting_keys): def handle_setting_changes(setting_keys):
# log that cache is being cleared
logger.info(f"clear_setting_cache of keys {setting_keys}")
orig_len = len(setting_keys) orig_len = len(setting_keys)
for i in range(orig_len): for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]): for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
@@ -253,6 +254,9 @@ def clear_setting_cache(setting_keys):
logger.debug('cache delete_many(%r)', cache_keys) logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys) cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all') @task(queue='tower_broadcast_all')
def delete_project_files(project_path): def delete_project_files(project_path):
@@ -282,7 +286,7 @@ def profile_sql(threshold=1, minutes=1):
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes)) logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None): def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list): if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list") raise TypeError("notification_list should be of type list")
@@ -313,7 +317,7 @@ def send_notifications(notification_list, job_id=None):
logger.exception('Error saving notification {} result.'.format(notification.id)) logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def gather_analytics(): def gather_analytics():
from awx.conf.models import Setting from awx.conf.models import Setting
from rest_framework.fields import DateTimeField from rest_framework.fields import DateTimeField
@@ -326,7 +330,7 @@ def gather_analytics():
analytics.gather() analytics.gather()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def purge_old_stdout_files(): def purge_old_stdout_files():
nowtime = time.time() nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT): for f in os.listdir(settings.JOBOUTPUT_ROOT):
@@ -374,26 +378,12 @@ def handle_removed_image(remove_images=None):
_cleanup_images_and_files(remove_images=remove_images, file_pattern='') _cleanup_images_and_files(remove_images=remove_images, file_pattern='')
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def cleanup_images_and_files(): def cleanup_images_and_files():
_cleanup_images_and_files() _cleanup_images_and_files()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def cleanup_host_metrics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
HostMetric.cleanup_task(months_ago)
@task(queue=get_task_queuename)
def cluster_node_health_check(node): def cluster_node_health_check(node):
""" """
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
@@ -412,7 +402,7 @@ def cluster_node_health_check(node):
this_inst.local_health_check() this_inst.local_health_check()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def execution_node_health_check(node): def execution_node_health_check(node):
if node == '': if node == '':
logger.warning('Remote health check incorrectly called with blank string') logger.warning('Remote health check incorrectly called with blank string')
@@ -506,7 +496,7 @@ def inspect_execution_nodes(instance_list):
execution_node_health_check.apply_async([hostname]) execution_node_health_check.apply_async([hostname])
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks']) @task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None): def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
logger.debug("Cluster node heartbeat task.") logger.debug("Cluster node heartbeat task.")
nowtime = now() nowtime = now()
@@ -596,7 +586,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time)) reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def awx_receptor_workunit_reaper(): def awx_receptor_workunit_reaper():
""" """
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
@@ -632,7 +622,7 @@ def awx_receptor_workunit_reaper():
administrative_workunit_reaper(receptor_work_list) administrative_workunit_reaper(receptor_work_list)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def awx_k8s_reaper(): def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK: if not settings.RECEPTOR_RELEASE_WORK:
return return
@@ -652,7 +642,7 @@ def awx_k8s_reaper():
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group)) logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def awx_periodic_scheduler(): def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired: with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False: if acquired is False:
@@ -718,7 +708,7 @@ def schedule_manager_success_or_error(instance):
ScheduleWorkflowManager().schedule() ScheduleWorkflowManager().schedule()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def handle_work_success(task_actual): def handle_work_success(task_actual):
try: try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
@@ -730,7 +720,7 @@ def handle_work_success(task_actual):
schedule_manager_success_or_error(instance) schedule_manager_success_or_error(instance)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def handle_work_error(task_actual): def handle_work_error(task_actual):
try: try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
@@ -770,7 +760,7 @@ def handle_work_error(task_actual):
schedule_manager_success_or_error(instance) schedule_manager_success_or_error(instance)
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id): def update_inventory_computed_fields(inventory_id):
""" """
Signal handler and wrapper around inventory.update_computed_fields to Signal handler and wrapper around inventory.update_computed_fields to
@@ -811,7 +801,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
return False return False
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def update_host_smart_inventory_memberships(): def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False) smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([]) changed_inventories = set([])
@@ -827,7 +817,7 @@ def update_host_smart_inventory_memberships():
smart_inventory.update_computed_fields() smart_inventory.update_computed_fields()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5): def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user # Delete inventory as user
if user_id is None: if user_id is None:
@@ -892,9 +882,16 @@ def _reconstruct_relationships(copy_mapping):
new_obj.save() new_obj.save()
@task(queue=get_task_queuename) @task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None): def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk)) logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None) model = getattr(importlib.import_module(model_module), model_name, None)
if model is None: if model is None:
@@ -906,28 +903,6 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
except ObjectDoesNotExist: except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.") logger.warning("Object or user no longer exists.")
return return
o2m_to_preserve = {}
fields_to_preserve = set(getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []))
for field in model._meta.get_fields():
if field.name in fields_to_preserve:
if field.one_to_many:
try:
field_val = getattr(obj, field.name)
except AttributeError:
continue
o2m_to_preserve[field.name] = field_val
sub_obj_list = []
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
sub_model = type(sub_obj)
sub_obj_list.append((sub_model.__module__, sub_model.__name__, sub_obj.pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream(): with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {} copy_mapping = {}
for sub_obj_setup in sub_obj_list: for sub_obj_setup in sub_obj_list:

View File

@@ -1,8 +1,9 @@
{ {
"ANSIBLE_JINJA2_NATIVE": "True", "ANSIBLE_JINJA2_NATIVE": "True",
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"AZURE_CLIENT_ID": "fooo", "AZURE_CLIENT_ID": "fooo",
"AZURE_CLOUD_ENVIRONMENT": "fooo", "AZURE_CLOUD_ENVIRONMENT": "fooo",
"AZURE_SECRET": "fooo", "AZURE_SECRET": "fooo",
"AZURE_SUBSCRIPTION_ID": "fooo", "AZURE_SUBSCRIPTION_ID": "fooo",
"AZURE_TENANT": "fooo" "AZURE_TENANT": "fooo"
} }

View File

@@ -1,4 +1,5 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"TOWER_HOST": "https://foo.invalid", "TOWER_HOST": "https://foo.invalid",
"TOWER_PASSWORD": "fooo", "TOWER_PASSWORD": "fooo",
"TOWER_USERNAME": "fooo", "TOWER_USERNAME": "fooo",
@@ -9,4 +10,4 @@
"CONTROLLER_USERNAME": "fooo", "CONTROLLER_USERNAME": "fooo",
"CONTROLLER_OAUTH_TOKEN": "", "CONTROLLER_OAUTH_TOKEN": "",
"CONTROLLER_VERIFY_SSL": "False" "CONTROLLER_VERIFY_SSL": "False"
} }

View File

@@ -1,7 +1,8 @@
{ {
"ANSIBLE_JINJA2_NATIVE": "True", "ANSIBLE_JINJA2_NATIVE": "True",
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"AWS_ACCESS_KEY_ID": "fooo", "AWS_ACCESS_KEY_ID": "fooo",
"AWS_SECRET_ACCESS_KEY": "fooo", "AWS_SECRET_ACCESS_KEY": "fooo",
"AWS_SECURITY_TOKEN": "fooo", "AWS_SECURITY_TOKEN": "fooo",
"AWS_SESSION_TOKEN": "fooo" "AWS_SESSION_TOKEN": "fooo"
} }

View File

@@ -1,5 +1,6 @@
{ {
"ANSIBLE_JINJA2_NATIVE": "True", "ANSIBLE_JINJA2_NATIVE": "True",
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}", "GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}", "GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
"GCP_AUTH_KIND": "serviceaccount", "GCP_AUTH_KIND": "serviceaccount",

View File

@@ -1,4 +1,5 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"INSIGHTS_USER": "fooo", "INSIGHTS_USER": "fooo",
"INSIGHTS_PASSWORD": "fooo" "INSIGHTS_PASSWORD": "fooo"
} }

View File

@@ -1,3 +1,4 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}" "OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
} }

View File

@@ -1,6 +1,7 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"OVIRT_INI_PATH": "{{ file_reference }}", "OVIRT_INI_PATH": "{{ file_reference }}",
"OVIRT_PASSWORD": "fooo", "OVIRT_PASSWORD": "fooo",
"OVIRT_URL": "https://foo.invalid", "OVIRT_URL": "https://foo.invalid",
"OVIRT_USERNAME": "fooo" "OVIRT_USERNAME": "fooo"
} }

View File

@@ -1,5 +1,6 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"FOREMAN_PASSWORD": "fooo", "FOREMAN_PASSWORD": "fooo",
"FOREMAN_SERVER": "https://foo.invalid", "FOREMAN_SERVER": "https://foo.invalid",
"FOREMAN_USER": "fooo" "FOREMAN_USER": "fooo"
} }

View File

@@ -1,6 +1,7 @@
{ {
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
"VMWARE_HOST": "https://foo.invalid", "VMWARE_HOST": "https://foo.invalid",
"VMWARE_PASSWORD": "fooo", "VMWARE_PASSWORD": "fooo",
"VMWARE_USER": "fooo", "VMWARE_USER": "fooo",
"VMWARE_VALIDATE_CERTS": "False" "VMWARE_VALIDATE_CERTS": "False"
} }

View File

@@ -1,86 +0,0 @@
import pytest
import requests
from awx.api.views.analytics import AnalyticsGenericView, MissingSettings, AUTOMATION_ANALYTICS_API_URL_PATH
from django.test.utils import override_settings
from awx.main.utils import get_awx_version
from django.utils import translation
class TestAnalyticsGenericView:
@pytest.mark.parametrize(
"existing_headers,expected_headers",
[
({}, {}),
({'Hey': 'There'}, {}), # We don't forward just any headers
({'Content-Type': 'text/html', 'Content-Length': '12'}, {'Content-Type': 'text/html', 'Content-Length': '12'}),
# Requests will auto-add the following headers (so we don't need to test them): 'Accept-Encoding', 'User-Agent', 'Accept'
],
)
def test__request_headers(self, existing_headers, expected_headers):
expected_headers['X-Rh-Analytics-Source'] = 'controller'
expected_headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
expected_headers['Accept-Language'] = translation.get_language()
request = requests.session()
request.headers.update(existing_headers)
assert set(expected_headers.items()).issubset(set(AnalyticsGenericView._request_headers(request).items()))
@pytest.mark.parametrize(
"path,expected_path",
[
('A/B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/A/B'),
('B', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/B'),
('/a/b/c/analytics/reports/my_slug', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/reports/my_slug'),
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
('/a/b/c/analytics', f'{AUTOMATION_ANALYTICS_API_URL_PATH}//a/b/c/analytics'), # Because there is no ending / on analytics we get a weird condition
('/a/b/c/analytics/', f'{AUTOMATION_ANALYTICS_API_URL_PATH}/'),
],
)
@pytest.mark.django_db
def test__get_analytics_path(self, path, expected_path):
assert AnalyticsGenericView._get_analytics_path(path) == expected_path
@pytest.mark.django_db
def test__get_analytics_url_no_url(self):
with override_settings(AUTOMATION_ANALYTICS_URL=None):
with pytest.raises(MissingSettings):
agw = AnalyticsGenericView()
agw._get_analytics_url('A')
@pytest.mark.parametrize(
"request_path,ending_url",
[
('A', 'A'),
('A/B', 'A/B'),
('A/B/analytics/', ''), # we split on analytics but because there is nothing after
('A/B/analytics/report', 'report'),
('A/B/analytics/report/slug', 'report/slug'),
],
)
@pytest.mark.django_db
def test__get_analytics_url(self, request_path, ending_url):
base_url = 'http://testing'
with override_settings(AUTOMATION_ANALYTICS_URL=base_url):
agw = AnalyticsGenericView()
assert agw._get_analytics_url(request_path) == f'{base_url}{AUTOMATION_ANALYTICS_API_URL_PATH}/{ending_url}'
@pytest.mark.parametrize(
"setting_name,setting_value,raises",
[
('INSIGHTS_TRACKING_STATE', None, True),
('INSIGHTS_TRACKING_STATE', False, True),
('INSIGHTS_TRACKING_STATE', True, False),
('INSIGHTS_TRACKING_STATE', 'Steve', False),
('INSIGHTS_TRACKING_STATE', 1, False),
('INSIGHTS_TRACKING_STATE', '', True),
],
)
@pytest.mark.django_db
def test__get_setting(self, setting_name, setting_value, raises):
with override_settings(**{setting_name: setting_value}):
if raises:
with pytest.raises(MissingSettings):
AnalyticsGenericView._get_setting(setting_name, False, None)
else:
assert AnalyticsGenericView._get_setting(setting_name, False, None) == setting_value

View File

@@ -1,23 +0,0 @@
import pytest
from awx.settings.application_name import get_service_name, set_application_name
@pytest.mark.parametrize(
'argv,result',
(
([], None),
(['-m'], None),
(['-m', 'python'], None),
(['-m', 'python', 'manage'], None),
(['-m', 'python', 'manage', 'a'], 'a'),
(['-m', 'python', 'manage', 'b', 'a'], 'b'),
(['-m', 'python', 'manage', 'run_something', 'b', 'a'], 'something'),
),
)
def test_get_service_name(argv, result):
assert get_service_name(argv) == result
@pytest.mark.parametrize('DATABASES,CLUSTER_ID,function', (({}, 12, ''), ({'default': {'ENGINE': 'sqllite3'}}, 12, '')))
def test_set_application_name(DATABASES, CLUSTER_ID, function):
set_application_name(DATABASES, CLUSTER_ID, function)

View File

@@ -594,108 +594,3 @@ class TestControlledBySCM:
rando, rando,
expect=403, expect=403,
) )
@pytest.mark.django_db
class TestConstructedInventory:
@pytest.fixture
def constructed_inventory(self, organization):
return Inventory.objects.create(name='constructed-test-inventory', kind='constructed', organization=organization)
def test_get_constructed_inventory(self, constructed_inventory, admin_user, get):
inv_src = constructed_inventory.inventory_sources.first()
inv_src.update_cache_timeout = 53
inv_src.save(update_fields=['update_cache_timeout'])
r = get(url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}), user=admin_user, expect=200)
assert r.data['update_cache_timeout'] == 53
def test_patch_constructed_inventory(self, constructed_inventory, admin_user, patch):
inv_src = constructed_inventory.inventory_sources.first()
assert inv_src.update_cache_timeout == 0
assert inv_src.limit == ''
r = patch(
url=reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk}),
data=dict(update_cache_timeout=54, limit='foobar'),
user=admin_user,
expect=200,
)
assert r.data['update_cache_timeout'] == 54
inv_src = constructed_inventory.inventory_sources.first()
assert inv_src.update_cache_timeout == 54
assert inv_src.limit == 'foobar'
def test_patch_constructed_inventory_generated_source_limits_editable_fields(self, constructed_inventory, admin_user, project, patch):
inv_src = constructed_inventory.inventory_sources.first()
r = patch(
url=inv_src.get_absolute_url(),
data={
'source': 'scm',
'source_project': project.pk,
'source_path': '',
'source_vars': 'plugin: a.b.c',
},
expect=400,
user=admin_user,
)
assert str(r.data['error'][0]) == "Cannot change field 'source' on a constructed inventory source."
# Make sure it didn't get updated before we got the error
inv_src_after_err = constructed_inventory.inventory_sources.first()
assert inv_src.id == inv_src_after_err.id
assert inv_src.source == inv_src_after_err.source
assert inv_src.source_project == inv_src_after_err.source_project
assert inv_src.source_path == inv_src_after_err.source_path
assert inv_src.source_vars == inv_src_after_err.source_vars
def test_patch_constructed_inventory_generated_source_allows_source_vars_edit(self, constructed_inventory, admin_user, patch):
inv_src = constructed_inventory.inventory_sources.first()
patch(
url=inv_src.get_absolute_url(),
data={
'source_vars': 'plugin: a.b.c',
},
expect=200,
user=admin_user,
)
inv_src_after_patch = constructed_inventory.inventory_sources.first()
# sanity checks
assert inv_src.id == inv_src_after_patch.id
assert inv_src.source == 'constructed'
assert inv_src_after_patch.source == 'constructed'
assert inv_src.source_vars == ''
assert inv_src_after_patch.source_vars == 'plugin: a.b.c'
def test_create_constructed_inventory(self, constructed_inventory, admin_user, post, organization):
r = post(
url=reverse('api:constructed_inventory_list'),
data=dict(name='constructed-inventory-just-created', kind='constructed', organization=organization.id, update_cache_timeout=55, limit='foobar'),
user=admin_user,
expect=201,
)
pk = r.data['id']
constructed_inventory = Inventory.objects.get(pk=pk)
inv_src = constructed_inventory.inventory_sources.first()
assert inv_src.update_cache_timeout == 55
assert inv_src.limit == 'foobar'
def test_get_absolute_url_for_constructed_inventory(self, constructed_inventory, admin_user, get):
"""
If we are using the normal inventory API endpoint to look at a
constructed inventory, then we should get a normal inventory API route
back. If we are accessing it via the special constructed inventory
endpoint, then we should get that back.
"""
url_const = reverse('api:constructed_inventory_detail', kwargs={'pk': constructed_inventory.pk})
url_inv = reverse('api:inventory_detail', kwargs={'pk': constructed_inventory.pk})
const_r = get(url=url_const, user=admin_user, expect=200)
inv_r = get(url=url_inv, user=admin_user, expect=200)
assert const_r.data['url'] == url_const
assert inv_r.data['url'] == url_inv
assert inv_r.data['url'] != const_r.data['url']
assert inv_r.data['related']['constructed_url'] == url_const
assert const_r.data['related']['constructed_url'] == url_const

View File

@@ -3,7 +3,7 @@ import pytest
# AWX # AWX
from awx.api.serializers import JobTemplateSerializer from awx.api.serializers import JobTemplateSerializer
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project, Inventory from awx.main.models import Job, JobTemplate, CredentialType, WorkflowJobTemplate, Organization, Project
from awx.main.migrations import _save_password_keys as save_password_keys from awx.main.migrations import _save_password_keys as save_password_keys
# Django # Django
@@ -353,19 +353,3 @@ def test_job_template_branch_prompt_error(project, inventory, post, admin_user):
expect=400, expect=400,
) )
assert 'Project does not allow overriding branch' in str(r.data['ask_scm_branch_on_launch']) assert 'Project does not allow overriding branch' in str(r.data['ask_scm_branch_on_launch'])
@pytest.mark.django_db
def test_job_template_missing_inventory(project, inventory, admin_user, post):
jt = JobTemplate.objects.create(
name='test-jt', inventory=inventory, ask_inventory_on_launch=True, project=project, playbook='helloworld.yml', host_config_key='abcd'
)
Inventory.objects.get(pk=inventory.pk).delete()
r = post(
url=reverse('api:job_template_callback', kwargs={'pk': jt.pk}),
data={'host_config_key': 'abcd'},
user=admin_user,
expect=400,
)
assert r.status_code == 400
assert "Cannot start automatically, an inventory is required." in str(r.data)

View File

@@ -153,13 +153,3 @@ def test_post_org_approval_notification(get, post, admin, notification_template,
response = get(url, admin) response = get(url, admin)
assert response.status_code == 200 assert response.status_code == 200
assert len(response.data['results']) == 1 assert len(response.data['results']) == 1
@pytest.mark.django_db
def test_post_wfj_notification(get, post, admin, workflow_job, notification):
workflow_job.notifications.add(notification)
workflow_job.save()
url = reverse("api:workflow_job_notifications_list", kwargs={'pk': workflow_job.pk})
response = get(url, admin)
assert response.status_code == 200
assert len(response.data['results']) == 1

View File

@@ -329,21 +329,3 @@ def test_galaxy_credential_association(alice, admin, organization, post, get):
'Public Galaxy 4', 'Public Galaxy 4',
'Public Galaxy 5', 'Public Galaxy 5',
] ]
@pytest.mark.django_db
def test_org_admin_credential_count(org_admin, admin, organization, post, get):
galaxy = CredentialType.defaults['galaxy_api_token']()
galaxy.save()
for i in range(3):
cred = Credential.objects.create(credential_type=galaxy, name=f'test_{i}', inputs={'url': 'https://galaxy.ansible.com/'})
url = reverse('api:organization_galaxy_credentials_list', kwargs={'pk': organization.pk})
post(url, {'associate': True, 'id': cred.pk}, user=admin, expect=204)
# org admin should see all associated galaxy credentials
resp = get(url, user=org_admin)
assert resp.data['count'] == 3
# removing one to validate new count
post(url, {'disassociate': True, 'id': Credential.objects.get(name='test_1').pk}, user=admin, expect=204)
resp_new = get(url, user=org_admin)
assert resp_new.data['count'] == 2

View File

@@ -1,75 +0,0 @@
import pytest
from django.test.utils import override_settings
from rest_framework.serializers import ValidationError
from awx.api.serializers import UserSerializer
from django.contrib.auth.models import User
@pytest.mark.parametrize(
"password,min_length,min_digits,min_upper,min_special,expect_error",
[
# Test length
("a", 1, 0, 0, 0, False),
("a", 2, 0, 0, 0, True),
("aa", 2, 0, 0, 0, False),
("aaabcDEF123$%^", 2, 0, 0, 0, False),
# Test digits
("a", 0, 1, 0, 0, True),
("1", 0, 1, 0, 0, False),
("1", 0, 2, 0, 0, True),
("12", 0, 2, 0, 0, False),
("12abcDEF123$%^", 0, 2, 0, 0, False),
# Test upper
("a", 0, 0, 1, 0, True),
("A", 0, 0, 1, 0, False),
("A", 0, 0, 2, 0, True),
("AB", 0, 0, 2, 0, False),
("ABabcDEF123$%^", 0, 0, 2, 0, False),
# Test special
("a", 0, 0, 0, 1, True),
("!", 0, 0, 0, 1, False),
("!", 0, 0, 0, 2, True),
("!@", 0, 0, 0, 2, False),
("!@abcDEF123$%^", 0, 0, 0, 2, False),
],
)
@pytest.mark.django_db
def test_validate_password_rules(password, min_length, min_digits, min_upper, min_special, expect_error):
user_serializer = UserSerializer()
# First test password with no params, this should always pass
try:
user_serializer.validate_password(password)
except ValidationError:
assert False, f"Password {password} should not have validation issue if no params are used"
with override_settings(
LOCAL_PASSWORD_MIN_LENGTH=min_length, LOCAL_PASSWORD_MIN_DIGITS=min_digits, LOCAL_PASSWORD_MIN_UPPER=min_upper, LOCAL_PASSWORD_MIN_SPECIAL=min_special
):
if expect_error:
with pytest.raises(ValidationError):
user_serializer.validate_password(password)
else:
try:
user_serializer.validate_password(password)
except ValidationError:
assert False, "validate_password raised an unexpected exception"
@pytest.mark.django_db
def test_validate_password_too_long():
password_max_length = User._meta.get_field('password').max_length
password = "x" * password_max_length
user_serializer = UserSerializer()
try:
user_serializer.validate_password(password)
except ValidationError:
assert False, f"Password {password} should not have validation"
password = f"{password}x"
with pytest.raises(ValidationError):
user_serializer.validate_password(password)

View File

@@ -1,54 +0,0 @@
import pytest
from awx.api.versioning import reverse
@pytest.mark.django_db
@pytest.mark.parametrize(
"is_admin, status",
[
[True, 201],
[False, 403],
], # if they're a WFJ admin, they get a 201 # if they're not a WFJ *nor* org admin, they get a 403
)
def test_workflow_job_relaunch(workflow_job, post, admin_user, alice, is_admin, status):
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
if is_admin:
post(url, user=admin_user, expect=status)
else:
post(url, user=alice, expect=status)
@pytest.mark.django_db
def test_workflow_job_relaunch_failure(workflow_job, post, admin_user):
workflow_job.is_sliced_job = True
workflow_job.job_template = None
workflow_job.save()
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
post(url, user=admin_user, expect=400)
@pytest.mark.django_db
def test_workflow_job_relaunch_not_inventory_failure(workflow_job, post, admin_user):
workflow_job.is_sliced_job = True
workflow_job.inventory = None
workflow_job.save()
url = reverse("api:workflow_job_relaunch", kwargs={'pk': workflow_job.pk})
post(url, user=admin_user, expect=400)
@pytest.mark.django_db
@pytest.mark.parametrize(
"is_admin, status",
[
[True, 202],
[False, 403],
], # if they're a WFJ admin, they get a 202 # if they're not a WFJ *nor* org admin, they get a 403
)
def test_workflow_job_cancel(workflow_job, post, admin_user, alice, is_admin, status):
url = reverse("api:workflow_job_cancel", kwargs={'pk': workflow_job.pk})
if is_admin:
post(url, user=admin_user, expect=status)
else:
post(url, user=alice, expect=status)

View File

@@ -511,14 +511,6 @@ def group(inventory):
return inventory.groups.create(name='single-group') return inventory.groups.create(name='single-group')
@pytest.fixture
def constructed_inventory(organization):
"""
creates a new constructed inventory source
"""
return Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
@pytest.fixture @pytest.fixture
def inventory_source(inventory): def inventory_source(inventory):
# by making it ec2, the credential is not required # by making it ec2, the credential is not required
@@ -743,30 +735,6 @@ def system_job_factory(system_job_template, admin):
return factory return factory
@pytest.fixture
def wfjt(workflow_job_template_factory, organization):
objects = workflow_job_template_factory('test_workflow', organization=organization, persisted=True)
return objects.workflow_job_template
@pytest.fixture
def wfjt_with_nodes(workflow_job_template_factory, organization, job_template):
objects = workflow_job_template_factory(
'test_workflow', organization=organization, workflow_job_template_nodes=[{'unified_job_template': job_template}], persisted=True
)
return objects.workflow_job_template
@pytest.fixture
def wfjt_node(wfjt_with_nodes):
return wfjt_with_nodes.workflow_job_template_nodes.all()[0]
@pytest.fixture
def workflow_job(wfjt):
return wfjt.workflow_jobs.create(name='test_workflow')
def dumps(value): def dumps(value):
return DjangoJSONEncoder().encode(value) return DjangoJSONEncoder().encode(value)

View File

@@ -3,209 +3,178 @@ import pytest
from django.utils.timezone import now from django.utils.timezone import now
from django.db.models import Q from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary
from awx.main.models import Job, JobEvent, Inventory, Host, JobHostSummary, HostMetric
@pytest.mark.django_db @pytest.mark.django_db
class TestEvents: @mock.patch('awx.main.models.events.emit_event_detail')
def setup_method(self): def test_parent_changed(emit):
self.hostnames = [] j = Job()
self.host_map = dict() j.save()
self.inventory = None JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
self.job = None assert JobEvent.objects.count() == 1
for e in JobEvent.objects.all():
assert e.changed is False
@mock.patch('awx.main.models.events.emit_event_detail') JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
def test_parent_changed(self, emit): # the `playbook_on_stats` event is where we update the parent changed linkage
j = Job() JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
j.save() events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save() assert events.count() == 2
assert JobEvent.objects.count() == 1 for e in events.all():
for e in JobEvent.objects.all(): assert e.changed is True
assert e.changed is False
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='runner_on_ok', event_data={'res': {'changed': ['localhost']}}).save()
# the `playbook_on_stats` event is where we update the parent changed linkage
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', 'runner_on_ok'])
assert events.count() == 2
for e in events.all():
assert e.changed is True
@pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS) @pytest.mark.django_db
@mock.patch('awx.main.models.events.emit_event_detail') @pytest.mark.parametrize('event', JobEvent.FAILED_EVENTS)
def test_parent_failed(self, emit, event): @mock.patch('awx.main.models.events.emit_event_detail')
j = Job() def test_parent_failed(emit, event):
j.save() j = Job()
JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save() j.save()
assert JobEvent.objects.count() == 1 JobEvent.create_from_data(job_id=j.pk, uuid='abc123', event='playbook_on_task_start').save()
for e in JobEvent.objects.all(): assert JobEvent.objects.count() == 1
assert e.failed is False for e in JobEvent.objects.all():
assert e.failed is False
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save() JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event=event).save()
# the `playbook_on_stats` event is where we update the parent failed linkage # the `playbook_on_stats` event is where we update the parent failed linkage
JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save() JobEvent.create_from_data(job_id=j.pk, parent_uuid='abc123', event='playbook_on_stats').save()
events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event]) events = JobEvent.objects.filter(event__in=['playbook_on_task_start', event])
assert events.count() == 2 assert events.count() == 2
for e in events.all(): for e in events.all():
assert e.failed is True assert e.failed is True
def test_host_summary_generation(self):
self._generate_hosts(100)
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
assert self.job.job_host_summaries.count() == len(self.hostnames) @pytest.mark.django_db
assert sorted([s.host_name for s in self.job.job_host_summaries.all()]) == sorted(self.hostnames) def test_host_summary_generation():
hostnames = [f'Host {i}' for i in range(100)]
for s in self.job.job_host_summaries.all(): inv = Inventory()
assert self.host_map[s.host_name] == s.host_id inv.save()
assert s.ok == len(s.host_name) Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
assert s.changed == 0 j = Job(inventory=inv)
assert s.dark == 0 j.save()
assert s.failures == 0 host_map = dict((host.name, host.id) for host in inv.hosts.all())
assert s.ignored == 0 JobEvent.create_from_data(
assert s.processed == 0 job_id=j.pk,
assert s.rescued == 0
assert s.skipped == 0
for host in Host.objects.all():
assert host.last_job_id == self.job.id
assert host.last_job_host_summary.host == host
def test_host_summary_generation_with_deleted_hosts(self):
self._generate_hosts(10)
# delete half of the hosts during the playbook run
for h in self.inventory.hosts.all()[:5]:
h.delete()
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
ids = sorted([s.host_id or -1 for s in self.job.job_host_summaries.order_by('id').all()])
names = sorted([s.host_name for s in self.job.job_host_summaries.all()])
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
def test_host_summary_generation_with_limit(self):
# Make an inventory with 10 hosts, run a playbook with a --limit
# pointed at *one* host,
# Verify that *only* that host has an associated JobHostSummary and that
# *only* that host has an updated value for .last_job.
self._generate_hosts(10)
# by making the playbook_on_stats *only* include Host 1, we're emulating
# the behavior of a `--limit=Host 1`
matching_host = Host.objects.get(name='Host 1')
self._create_job_event(ok={matching_host.name: len(matching_host.name)}) # effectively, limit=Host 1
# since the playbook_on_stats only references one host,
# there should *only* be on JobHostSummary record (and it should
# be related to the appropriate Host)
assert JobHostSummary.objects.count() == 1
for h in Host.objects.all():
if h.name == 'Host 1':
assert h.last_job_id == self.job.id
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
else:
# all other hosts in the inventory should remain untouched
assert h.last_job_id is None
assert h.last_job_host_summary_id is None
def test_host_metrics_insert(self):
self._generate_hosts(10)
self._create_job_event(
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[0:3]),
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[3:6]),
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[6:9]),
skipped=dict((hostname, len(hostname)) for hostname in [self.hostnames[9]]),
)
metrics = HostMetric.objects.all()
assert len(metrics) == 10
for hm in metrics:
assert hm.automated_counter == 1
assert hm.last_automation is not None
assert hm.deleted is False
def test_host_metrics_update(self):
self._generate_hosts(12)
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
# Soft delete 6 host metrics
for hm in HostMetric.objects.filter(id__in=[1, 3, 5, 7, 9, 11]):
hm.soft_delete()
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
# hostnames in 'ignored' and 'rescued' stats are ignored
self.job = Job(inventory=self.inventory)
self.job.save()
self._create_job_event(
ignored=dict((hostname, len(hostname)) for hostname in self.hostnames[0:6]),
rescued=dict((hostname, len(hostname)) for hostname in self.hostnames[6:11]),
)
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
assert len(HostMetric.objects.filter(Q(deleted=True) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
# hostnames in 'changed', 'dark', 'failures', 'ok', 'processed', 'skipped' are processed
self.job = Job(inventory=self.inventory)
self.job.save()
self._create_job_event(
changed=dict((hostname, len(hostname)) for hostname in self.hostnames[0:2]),
dark=dict((hostname, len(hostname)) for hostname in self.hostnames[2:4]),
failures=dict((hostname, len(hostname)) for hostname in self.hostnames[4:6]),
ok=dict((hostname, len(hostname)) for hostname in self.hostnames[6:8]),
processed=dict((hostname, len(hostname)) for hostname in self.hostnames[8:10]),
skipped=dict((hostname, len(hostname)) for hostname in self.hostnames[10:12]),
)
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
def _generate_hosts(self, cnt, id_from=0):
self.hostnames = [f'Host {i}' for i in range(id_from, id_from + cnt)]
self.inventory = Inventory()
self.inventory.save()
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=self.inventory.id) for h in self.hostnames])
self.job = Job(inventory=self.inventory)
self.job.save()
# host map is a data structure that tracks a mapping of host name --> ID
# for the inventory, _regardless_ of whether or not there's a limit
# applied to the actual playbook run
self.host_map = dict((host.name, host.id) for host in self.inventory.hosts.all())
def _create_job_event(
self,
parent_uuid='abc123', parent_uuid='abc123',
event='playbook_on_stats', event='playbook_on_stats',
ok=None, event_data={
changed=None, 'ok': dict((hostname, len(hostname)) for hostname in hostnames),
dark=None, 'changed': {},
failures=None, 'dark': {},
ignored=None, 'failures': {},
processed=None, 'ignored': {},
rescued=None, 'processed': {},
skipped=None, 'rescued': {},
): 'skipped': {},
JobEvent.create_from_data( },
job_id=self.job.pk, host_map=host_map,
parent_uuid=parent_uuid, ).save()
event=event,
event_data={ assert j.job_host_summaries.count() == len(hostnames)
'ok': ok or {}, assert sorted([s.host_name for s in j.job_host_summaries.all()]) == sorted(hostnames)
'changed': changed or {},
'dark': dark or {}, for s in j.job_host_summaries.all():
'failures': failures or {}, assert host_map[s.host_name] == s.host_id
'ignored': ignored or {}, assert s.ok == len(s.host_name)
'processed': processed or {}, assert s.changed == 0
'rescued': rescued or {}, assert s.dark == 0
'skipped': skipped or {}, assert s.failures == 0
}, assert s.ignored == 0
host_map=self.host_map, assert s.processed == 0
).save() assert s.rescued == 0
assert s.skipped == 0
for host in Host.objects.all():
assert host.last_job_id == j.id
assert host.last_job_host_summary.host == host
@pytest.mark.django_db
def test_host_summary_generation_with_deleted_hosts():
hostnames = [f'Host {i}' for i in range(10)]
inv = Inventory()
inv.save()
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
j = Job(inventory=inv)
j.save()
host_map = dict((host.name, host.id) for host in inv.hosts.all())
# delete half of the hosts during the playbook run
for h in inv.hosts.all()[:5]:
h.delete()
JobEvent.create_from_data(
job_id=j.pk,
parent_uuid='abc123',
event='playbook_on_stats',
event_data={
'ok': dict((hostname, len(hostname)) for hostname in hostnames),
'changed': {},
'dark': {},
'failures': {},
'ignored': {},
'processed': {},
'rescued': {},
'skipped': {},
},
host_map=host_map,
).save()
ids = sorted([s.host_id or -1 for s in j.job_host_summaries.order_by('id').all()])
names = sorted([s.host_name for s in j.job_host_summaries.all()])
assert ids == [-1, -1, -1, -1, -1, 6, 7, 8, 9, 10]
assert names == ['Host 0', 'Host 1', 'Host 2', 'Host 3', 'Host 4', 'Host 5', 'Host 6', 'Host 7', 'Host 8', 'Host 9']
@pytest.mark.django_db
def test_host_summary_generation_with_limit():
# Make an inventory with 10 hosts, run a playbook with a --limit
# pointed at *one* host,
# Verify that *only* that host has an associated JobHostSummary and that
# *only* that host has an updated value for .last_job.
hostnames = [f'Host {i}' for i in range(10)]
inv = Inventory()
inv.save()
Host.objects.bulk_create([Host(created=now(), modified=now(), name=h, inventory_id=inv.id) for h in hostnames])
j = Job(inventory=inv)
j.save()
# host map is a data structure that tracks a mapping of host name --> ID
# for the inventory, _regardless_ of whether or not there's a limit
# applied to the actual playbook run
host_map = dict((host.name, host.id) for host in inv.hosts.all())
# by making the playbook_on_stats *only* include Host 1, we're emulating
# the behavior of a `--limit=Host 1`
matching_host = Host.objects.get(name='Host 1')
JobEvent.create_from_data(
job_id=j.pk,
parent_uuid='abc123',
event='playbook_on_stats',
event_data={
'ok': {matching_host.name: len(matching_host.name)}, # effectively, limit=Host 1
'changed': {},
'dark': {},
'failures': {},
'ignored': {},
'processed': {},
'rescued': {},
'skipped': {},
},
host_map=host_map,
).save()
# since the playbook_on_stats only references one host,
# there should *only* be on JobHostSummary record (and it should
# be related to the appropriate Host)
assert JobHostSummary.objects.count() == 1
for h in Host.objects.all():
if h.name == 'Host 1':
assert h.last_job_id == j.id
assert h.last_job_host_summary_id == JobHostSummary.objects.first().id
else:
# all other hosts in the inventory should remain untouched
assert h.last_job_id is None
assert h.last_job_host_summary_id is None

View File

@@ -20,53 +20,3 @@ def test_host_metrics_generation():
date_today = now().strftime('%Y-%m-%d') date_today = now().strftime('%Y-%m-%d')
result = HostMetric.objects.filter(first_automation__startswith=date_today).count() result = HostMetric.objects.filter(first_automation__startswith=date_today).count()
assert result == len(hostnames) assert result == len(hostnames)
@pytest.mark.django_db
def test_soft_delete():
hostnames = [f'Host to delete {i}' for i in range(2)]
current_time = now()
HostMetric.objects.bulk_create([HostMetric(hostname=h, last_automation=current_time, automated_counter=42) for h in hostnames])
hm = HostMetric.objects.get(hostname="Host to delete 0")
assert hm.last_deleted is None
last_deleted = None
for _ in range(3):
# soft delete 1st
# 2nd/3rd delete don't have an effect
hm.soft_delete()
if last_deleted is None:
last_deleted = hm.last_deleted
assert hm.deleted is True
assert hm.deleted_counter == 1
assert hm.last_deleted == last_deleted
assert hm.automated_counter == 42
# 2nd record is not touched
hm = HostMetric.objects.get(hostname="Host to delete 1")
assert hm.deleted is False
assert hm.deleted_counter == 0
assert hm.last_deleted is None
assert hm.automated_counter == 42
@pytest.mark.django_db
def test_soft_restore():
current_time = now()
HostMetric.objects.create(hostname="Host 1", last_automation=current_time, deleted=True)
HostMetric.objects.create(hostname="Host 2", last_automation=current_time, deleted=True, last_deleted=current_time)
HostMetric.objects.create(hostname="Host 3", last_automation=current_time, deleted=False, last_deleted=current_time)
HostMetric.objects.all().update(automated_counter=42, deleted_counter=10)
# 1. deleted, last_deleted not null
for hm in HostMetric.objects.all():
for _ in range(3):
hm.soft_restore()
assert hm.deleted is False
assert hm.automated_counter == 42 and hm.deleted_counter == 10
if hm.hostname == "Host 1":
assert hm.last_deleted is None
else:
assert hm.last_deleted == current_time

View File

@@ -169,8 +169,7 @@ class TestInventorySourceInjectors:
CLOUD_PROVIDERS constant contains the same names as what are CLOUD_PROVIDERS constant contains the same names as what are
defined within the injectors defined within the injectors
""" """
# slight exception case for constructed, because it has a FQCN but is not a cloud source assert set(CLOUD_PROVIDERS) == set(InventorySource.injectors.keys())
assert set(CLOUD_PROVIDERS) | set(['constructed']) == set(InventorySource.injectors.keys())
@pytest.mark.parametrize('source,filename', [('ec2', 'aws_ec2.yml'), ('openstack', 'openstack.yml'), ('gce', 'gcp_compute.yml')]) @pytest.mark.parametrize('source,filename', [('ec2', 'aws_ec2.yml'), ('openstack', 'openstack.yml'), ('gce', 'gcp_compute.yml')])
def test_plugin_filenames(self, source, filename): def test_plugin_filenames(self, source, filename):

View File

@@ -123,24 +123,6 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
assert set(group_2_2_copy.hosts.all()) == set() assert set(group_2_2_copy.hosts.all()) == set()
@pytest.mark.django_db
@pytest.mark.parametrize(
"is_admin, can_copy, status",
[
[True, True, 200],
[False, False, 200],
],
)
def test_workflow_job_template_copy_access(get, admin_user, alice, workflow_job_template, is_admin, can_copy, status):
url = reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk})
if is_admin:
response = get(url, user=admin_user, expect=status)
else:
workflow_job_template.organization.auditor_role.members.add(alice)
response = get(url, user=alice, expect=status)
assert response.data['can_copy'] == can_copy
@pytest.mark.django_db @pytest.mark.django_db
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
''' '''

View File

@@ -347,8 +347,8 @@ class TestJobReaper(object):
'status, execution_node, controller_node, modified, fail', 'status, execution_node, controller_node, modified, fail',
[ [
('running', '', '', None, False), # running, not assigned to the instance ('running', '', '', None, False), # running, not assigned to the instance
('running', 'awx', '', None, True), # running, has the instance as its execution_node ('running', 'awx', '', minute, True), # running, has the instance as its execution_node
('running', '', 'awx', None, True), # running, has the instance as its controller_node ('running', '', 'awx', minute, True), # running, has the instance as its controller_node
('waiting', '', '', None, False), # waiting, not assigned to the instance ('waiting', '', '', None, False), # waiting, not assigned to the instance
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago ('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago ('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
@@ -370,7 +370,7 @@ class TestJobReaper(object):
# we have to edit the modification time _without_ calling save() # we have to edit the modification time _without_ calling save()
# (because .save() overwrites it to _now_) # (because .save() overwrites it to _now_)
Job.objects.filter(id=j.id).update(modified=modified) Job.objects.filter(id=j.id).update(modified=modified)
reaper.reap(i) reaper.reap(i, ref_time=now)
reaper.reap_waiting(i) reaper.reap_waiting(i)
job = Job.objects.first() job = Job.objects.first()
if fail: if fail:
@@ -381,14 +381,14 @@ class TestJobReaper(object):
assert job.status == status assert job.status == status
@pytest.mark.parametrize( @pytest.mark.parametrize(
'excluded_uuids, fail, started', 'excluded_uuids, fail, modified',
[ [
(['abc123'], False, None), (['abc123'], False, None),
([], False, None), ([], False, None),
([], True, minute), ([], True, minute),
], ],
) )
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail, started): def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail, modified):
"""Modified Test to account for ref_time in reap()""" """Modified Test to account for ref_time in reap()"""
i = Instance(hostname='awx') i = Instance(hostname='awx')
i.save() i.save()
@@ -400,8 +400,8 @@ class TestJobReaper(object):
celery_task_id='abc123', celery_task_id='abc123',
) )
j.save() j.save()
if started: if modified:
Job.objects.filter(id=j.id).update(started=started) Job.objects.filter(id=j.id).update(modified=modified)
# if the UUID is excluded, don't reap it # if the UUID is excluded, don't reap it
reaper.reap(i, excluded_uuids=excluded_uuids, ref_time=now) reaper.reap(i, excluded_uuids=excluded_uuids, ref_time=now)
@@ -419,23 +419,6 @@ class TestJobReaper(object):
i.save() i.save()
j = WorkflowJob(status='running', execution_node='awx') j = WorkflowJob(status='running', execution_node='awx')
j.save() j.save()
reaper.reap(i) reaper.reap(i, ref_time=now)
assert WorkflowJob.objects.first().status == 'running' assert WorkflowJob.objects.first().status == 'running'
def test_should_not_reap_new(self):
"""
This test is designed specifically to ensure that jobs that are launched after the dispatcher has provided a list of UUIDs aren't reaped.
It is very racy and this test is designed with that in mind
"""
i = Instance(hostname='awx')
# ref_time is set to 10 seconds in the past to mimic someone launching a job in the heartbeat window.
ref_time = tz_now() - datetime.timedelta(seconds=10)
# creating job at current time
job = Job.objects.create(status='running', controller_node=i.hostname)
reaper.reap(i, ref_time=ref_time)
# explictly refreshing from db to ensure up to date cache
job.refresh_from_db()
assert job.started > ref_time
assert job.status == 'running'
assert job.job_explanation == ''

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from awx.main.models import InstanceGroup, Inventory from awx.main.models import InstanceGroup
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
@@ -38,16 +38,6 @@ def test_instance_group_ordering(source_model):
assert source_model.instance_groups.through.objects.count() == 0 assert source_model.instance_groups.through.objects.count() == 0
@pytest.mark.django_db
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
def test_instance_group_bulk_add(source_model):
groups = [InstanceGroup.objects.create(name='host-%d' % i) for i in range(5)]
groups.reverse()
with pytest.raises(RuntimeError) as err:
source_model.instance_groups.add(*groups)
assert 'Ordered many-to-many fields do not support multiple objects' in str(err)
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True) @pytest.mark.parametrize('source_model', ['job_template', 'inventory', 'organization'], indirect=True)
def test_instance_group_middle_deletion(source_model): def test_instance_group_middle_deletion(source_model):
@@ -76,33 +66,3 @@ def test_explicit_ordering(source_model):
assert [g.name for g in source_model.instance_groups.all()] == ['host-4', 'host-3', 'host-2', 'host-1', 'host-0'] assert [g.name for g in source_model.instance_groups.all()] == ['host-4', 'host-3', 'host-2', 'host-1', 'host-0']
assert [g.name for g in source_model.instance_groups.order_by('name').all()] == ['host-0', 'host-1', 'host-2', 'host-3', 'host-4'] assert [g.name for g in source_model.instance_groups.order_by('name').all()] == ['host-0', 'host-1', 'host-2', 'host-3', 'host-4']
@pytest.mark.django_db
def test_input_inventories_ordering():
constructed_inventory = Inventory.objects.create(name='my_constructed', kind='constructed')
input_inventories = [Inventory.objects.create(name='inv-%d' % i) for i in range(5)]
input_inventories.reverse()
for inv in input_inventories:
constructed_inventory.input_inventories.add(inv)
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-4', 'inv-3', 'inv-2', 'inv-1', 'inv-0']
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
(0, 'inv-4'),
(1, 'inv-3'),
(2, 'inv-2'),
(3, 'inv-1'),
(4, 'inv-0'),
]
constructed_inventory.input_inventories.remove(input_inventories[0])
assert [g.name for g in constructed_inventory.input_inventories.all()] == ['inv-3', 'inv-2', 'inv-1', 'inv-0']
assert [(row.position, row.input_inventory.name) for row in constructed_inventory.input_inventories.through.objects.all()] == [
(0, 'inv-3'),
(1, 'inv-2'),
(2, 'inv-1'),
(3, 'inv-0'),
]
constructed_inventory.input_inventories.clear()
assert constructed_inventory.input_inventories.through.objects.count() == 0

View File

@@ -94,8 +94,7 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
ig_all = instance_group_factory("all", instances=[i1, i2, i3]) ig_all = instance_group_factory("all", instances=[i1, i2, i3])
ig_dup = instance_group_factory("duplicates", instances=[i1]) ig_dup = instance_group_factory("duplicates", instances=[i1])
project.organization.instance_groups.add(ig_all) project.organization.instance_groups.add(ig_all, ig_dup)
project.organization.instance_groups.add(ig_dup)
actual_num_instances = Instance.objects.count() actual_num_instances = Instance.objects.count()
list_response = get(reverse('api:instance_list'), user=system_auditor) list_response = get(reverse('api:instance_list'), user=system_auditor)
api_num_instances_auditor = list(list_response.data.items())[0][1] api_num_instances_auditor = list(list_response.data.items())[0][1]

View File

@@ -1,61 +0,0 @@
import pytest
from awx.main.models import Inventory
from awx.api.versioning import reverse
@pytest.mark.django_db
def test_constructed_inventory_post(post, admin_user, organization):
inv1 = Inventory.objects.create(name='dummy1', kind='constructed', organization=organization)
inv2 = Inventory.objects.create(name='dummy2', kind='constructed', organization=organization)
resp = post(
url=reverse('api:inventory_input_inventories', kwargs={'pk': inv1.pk}),
data={'id': inv2.pk},
user=admin_user,
expect=400,
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_add_constructed_inventory_source(post, admin_user, constructed_inventory):
resp = post(
url=reverse('api:inventory_inventory_sources_list', kwargs={'pk': constructed_inventory.pk}),
data={'name': 'dummy1', 'source': 'constructed'},
user=admin_user,
expect=400,
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_add_constructed_inventory_host(post, admin_user, constructed_inventory):
resp = post(
url=reverse('api:inventory_hosts_list', kwargs={'pk': constructed_inventory.pk}),
data={'name': 'dummy1'},
user=admin_user,
expect=400,
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_add_constructed_inventory_group(post, admin_user, constructed_inventory):
resp = post(
reverse('api:inventory_groups_list', kwargs={'pk': constructed_inventory.pk}),
data={'name': 'group-test'},
user=admin_user,
expect=400,
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_edit_constructed_inventory_source(patch, admin_user, inventory_source_factory):
inv_src = inventory_source_factory(name='dummy1', source='constructed')
resp = patch(
reverse('api:inventory_source_detail', kwargs={'pk': inv_src.pk}),
data={'description': inv_src.name},
user=admin_user,
expect=400,
)
assert resp.status_code == 400

View File

@@ -26,12 +26,12 @@ def test_python_and_js_licenses():
return (is_gpl, is_lgpl) return (is_gpl, is_lgpl)
def find_embedded_source_version(path, name): def find_embedded_source_version(path, name):
files = os.listdir(path) for entry in os.listdir(path):
tgz_files = [f for f in files if f.endswith('.tar.gz')] # Check variations of '-' and '_' in filenames due to python
for tgz in tgz_files: for fname in [name, name.replace('-', '_')]:
pkg_name = tgz.split('-')[0].split('_')[0] if entry.startswith(fname) and entry.endswith('.tar.gz'):
if pkg_name == name: v = entry.split(name + '-')[1].split('.tar.gz')[0]
return tgz.split('-')[1].split('.tar.gz')[0] return v
return None return None
list = {} list = {}

View File

@@ -218,31 +218,3 @@ def test_webhook_notification_pointed_to_a_redirect_launch_endpoint(post, admin,
) )
assert n1.send("", n1.messages.get("success").get("body")) == 1 assert n1.send("", n1.messages.get("success").get("body")) == 1
@pytest.mark.django_db
def test_update_notification_template(admin, notification_template):
notification_template.messages['workflow_approval'] = {
"running": {
"message": None,
"body": None,
}
}
notification_template.save()
workflow_approval_message = {
"approved": {
"message": None,
"body": None,
},
"running": {
"message": "test-message",
"body": None,
},
}
notification_template.messages['workflow_approval'] = workflow_approval_message
notification_template.save()
subevents = sorted(notification_template.messages["workflow_approval"].keys())
assert subevents == ["approved", "running"]
assert notification_template.messages['workflow_approval'] == workflow_approval_message

Some files were not shown because too many files have changed in this diff Show More