mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 19:44:43 -03:30
Compare commits
2 Commits
22.3.0
...
avoid_reap
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f83647600 | ||
|
|
6461ecc762 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -19,8 +19,6 @@ body:
|
||||
required: true
|
||||
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
|
||||
required: true
|
||||
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: summary
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -3,7 +3,7 @@ name: CI
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -6,10 +6,6 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code
|
||||
issues: write # to label issues
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/label_pr.yml
vendored
4
.github/workflows/label_pr.yml
vendored
@@ -7,10 +7,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read # to determine modified files (actions/labeler)
|
||||
pull-requests: write # to add labels to PRs (actions/labeler)
|
||||
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
3
.github/workflows/promote.yml
vendored
3
.github/workflows/promote.yml
vendored
@@ -8,9 +8,6 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -157,11 +157,7 @@ use_dev_supervisor.txt
|
||||
*.unison.tmp
|
||||
*.#
|
||||
/awx/ui/.ui-built
|
||||
/Dockerfile
|
||||
/_build/
|
||||
/_build_kube_dev/
|
||||
/Dockerfile
|
||||
/Dockerfile.dev
|
||||
/Dockerfile.kube-dev
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
@@ -6,7 +6,6 @@ recursive-include awx/templates *.html
|
||||
recursive-include awx/api/templates *.md *.html *.yml
|
||||
recursive-include awx/ui/build *.html
|
||||
recursive-include awx/ui/build *
|
||||
recursive-include awx/ui_next/build *
|
||||
recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
|
||||
138
Makefile
138
Makefile
@@ -1,6 +1,4 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
PYTHON ?= python3.9
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
@@ -37,15 +35,10 @@ SPLUNK ?= false
|
||||
PROMETHEUS ?= false
|
||||
# If set to true docker-compose will also start a grafana instance
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
DEV_DOCKER_OWNER ?= ansible
|
||||
# Docker will only accept lowercase, so github names like Paul need to be paul
|
||||
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
|
||||
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
|
||||
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
|
||||
|
||||
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
@@ -91,7 +84,7 @@ clean-schema:
|
||||
|
||||
clean-languages:
|
||||
rm -f $(I18N_FLAG_FILE)
|
||||
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete
|
||||
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
|
||||
|
||||
## Remove temporary build files, compiled Python files.
|
||||
clean: clean-ui clean-api clean-awxkit clean-dist
|
||||
@@ -222,6 +215,12 @@ daphne:
|
||||
fi; \
|
||||
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
|
||||
|
||||
wsbroadcast:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsbroadcast
|
||||
|
||||
## Run to start the background task dispatcher for development.
|
||||
dispatcher:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -229,6 +228,7 @@ dispatcher:
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_dispatcher
|
||||
|
||||
|
||||
## Run to start the zeromq callback receiver
|
||||
receiver:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -245,34 +245,6 @@ jupyter:
|
||||
fi; \
|
||||
$(MANAGEMENT_COMMAND) shell_plus --notebook
|
||||
|
||||
## Start the rsyslog configurer process in background in development environment.
|
||||
run-rsyslog-configurer:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_rsyslog_configurer
|
||||
|
||||
## Start cache_clear process in background in development environment.
|
||||
run-cache-clear:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_cache_clear
|
||||
|
||||
## Start the wsrelay process in background in development environment.
|
||||
run-wsrelay:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_wsrelay
|
||||
|
||||
## Start the heartbeat process in background in development environment.
|
||||
run-heartbeet:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
$(PYTHON) manage.py run_heartbeet
|
||||
|
||||
reports:
|
||||
mkdir -p $@
|
||||
|
||||
@@ -299,13 +271,13 @@ swagger: reports
|
||||
check: black
|
||||
|
||||
api-lint:
|
||||
BLACK_ARGS="--check" $(MAKE) black
|
||||
BLACK_ARGS="--check" make black
|
||||
flake8 awx
|
||||
yamllint -s .
|
||||
|
||||
## Run egg_info_dev to generate awx.egg-info for development.
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
cp -f /tmp/awx.egg-link /var/lib/awx/venv/awx/lib/$(PYTHON)/site-packages/awx.egg-link
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests awx/sso/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
@@ -324,7 +296,7 @@ github_ci_setup:
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
$(MAKE) docker-compose-build
|
||||
make docker-compose-build
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
@@ -374,7 +346,7 @@ test_collection_sanity:
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
COLLECTION_VERSION=1.0.0 make install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
@@ -446,7 +418,7 @@ ui-devel: awx/ui/node_modules
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
|
||||
fi
|
||||
fi
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@@ -473,12 +445,11 @@ ui-test-general:
|
||||
$(NPM_BIN) run --prefix awx/ui pretest
|
||||
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
|
||||
|
||||
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
|
||||
HEADLESS ?= no
|
||||
ifeq ($(HEADLESS), yes)
|
||||
dist/$(SDIST_TAR_FILE):
|
||||
else
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
|
||||
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
|
||||
endif
|
||||
$(PYTHON) -m build -s
|
||||
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
|
||||
@@ -524,9 +495,9 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_ldap=$(LDAP) \
|
||||
-e enable_splunk=$(SPLUNK) \
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
@@ -559,25 +530,16 @@ docker-compose-container-group-clean:
|
||||
fi
|
||||
rm -rf tools/docker-compose-minikube/_sources/
|
||||
|
||||
.PHONY: Dockerfile.dev
|
||||
## Generate Dockerfile.dev for awx_devel image
|
||||
Dockerfile.dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.dev \
|
||||
-e build_dev=True \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx_devel image for docker compose development environment
|
||||
docker-compose-build: Dockerfile.dev
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.dev \
|
||||
-t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
## Base development image build
|
||||
docker-compose-build:
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e build_dev=True -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
DOCKER_BUILDKIT=1 docker build -t $(DEVEL_IMAGE_NAME) \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
@@ -592,7 +554,7 @@ docker-compose-cluster-elk: awx/projects docker-compose-sources
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
|
||||
|
||||
docker-compose-container-group:
|
||||
MINIKUBE_CONTAINER_GROUP=true $(MAKE) docker-compose
|
||||
MINIKUBE_CONTAINER_GROUP=true make docker-compose
|
||||
|
||||
clean-elk:
|
||||
docker stop tools_kibana_1
|
||||
@@ -609,36 +571,12 @@ VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
PYTHON_VERSION:
|
||||
@echo "$(subst python,,$(PYTHON))"
|
||||
|
||||
.PHONY: version-for-buildyml
|
||||
version-for-buildyml:
|
||||
@echo $(firstword $(subst +, ,$(VERSION)))
|
||||
# version-for-buildyml prints a special version string for build.yml,
|
||||
# chopping off the sha after the '+' sign.
|
||||
# tools/ansible/build.yml was doing this: make print-VERSION | cut -d + -f -1
|
||||
# This does the same thing in native make without
|
||||
# the pipe or the extra processes, and now the pb does `make version-for-buildyml`
|
||||
# Example:
|
||||
# 22.1.1.dev38+g523c0d9781 becomes 22.1.1.dev38
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
|
||||
.PHONY: Dockerfile
|
||||
## Generate Dockerfile for awx image
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e receptor_image=$(RECEPTOR_IMAGE) \
|
||||
-e headless=$(HEADLESS)
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml \
|
||||
-e dockerfile_name=Dockerfile.kube-dev \
|
||||
@@ -653,6 +591,13 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build awx image for deployment on Kubernetes environment.
|
||||
awx-kube-build: Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
@@ -660,12 +605,10 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
## generate UI .pot file, an empty template of strings yet to be translated
|
||||
pot: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
|
||||
|
||||
## generate UI .po files for each locale (will update translated strings for `en`)
|
||||
po: $(UI_BUILD_FLAG_FILE)
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
|
||||
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
|
||||
|
||||
## generate API django .pot .po
|
||||
messages:
|
||||
@@ -674,7 +617,6 @@ messages:
|
||||
fi; \
|
||||
$(PYTHON) manage.py makemessages -l en_us --keep-pot
|
||||
|
||||
.PHONY: print-%
|
||||
print-%:
|
||||
@echo $($*)
|
||||
|
||||
@@ -686,12 +628,12 @@ HELP_FILTER=.PHONY
|
||||
## Display help targets
|
||||
help:
|
||||
@printf "Available targets:\n"
|
||||
@$(MAKE) -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
@make -s help/generate | grep -vE "\w($(HELP_FILTER))"
|
||||
|
||||
## Display help for all targets
|
||||
help/all:
|
||||
@printf "Available targets:\n"
|
||||
@$(MAKE) -s help/generate
|
||||
@make -s help/generate
|
||||
|
||||
## Generate help output from MAKEFILE_LIST
|
||||
help/generate:
|
||||
@@ -712,7 +654,3 @@ help/generate:
|
||||
} \
|
||||
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
|
||||
@printf "\n"
|
||||
|
||||
## Display help for ui-next targets
|
||||
help/ui-next:
|
||||
@$(MAKE) -s help MAKEFILE_LIST="awx/ui_next/Makefile"
|
||||
|
||||
@@ -347,7 +347,7 @@ class FieldLookupBackend(BaseFilterBackend):
|
||||
args.append(Q(**{k: v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.'))
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model ' 'does not use roles for access control.'))
|
||||
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
|
||||
if or_filters:
|
||||
q = Q()
|
||||
|
||||
@@ -5,11 +5,13 @@
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import views as auth_views
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from django.db import connection, transaction
|
||||
from django.db.models.fields.related import OneToOneRel
|
||||
@@ -33,7 +35,7 @@ from rest_framework.negotiation import DefaultContentNegotiation
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
@@ -169,7 +171,7 @@ class APIView(views.APIView):
|
||||
self.__init_request_error__ = exc
|
||||
except UnsupportedMediaType as exc:
|
||||
exc.detail = _(
|
||||
'You did not use correct Content-Type in your HTTP request. If you are using our REST API, the Content-Type must be application/json'
|
||||
'You did not use correct Content-Type in your HTTP request. ' 'If you are using our REST API, the Content-Type must be application/json'
|
||||
)
|
||||
self.__init_request_error__ = exc
|
||||
return drf_request
|
||||
@@ -362,7 +364,12 @@ class GenericAPIView(generics.GenericAPIView, APIView):
|
||||
return self.queryset._clone()
|
||||
elif self.model is not None:
|
||||
qs = self.model._default_manager
|
||||
qs = optimize_queryset(qs)
|
||||
if self.model in access_registry:
|
||||
access_class = access_registry[self.model]
|
||||
if access_class.select_related:
|
||||
qs = qs.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
qs = qs.prefetch_related(*access_class.prefetch_related)
|
||||
return qs
|
||||
else:
|
||||
return super(GenericAPIView, self).get_queryset()
|
||||
@@ -505,9 +512,6 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
# And optionally (user must have given access permission on parent object
|
||||
# to view sublist):
|
||||
# parent_access = 'read'
|
||||
# filter_read_permission sets whether or not to override the default intersection behavior
|
||||
# implemented here
|
||||
filter_read_permission = True
|
||||
|
||||
def get_description_context(self):
|
||||
d = super(SubListAPIView, self).get_description_context()
|
||||
@@ -522,16 +526,12 @@ class SubListAPIView(ParentMixin, ListAPIView):
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
if not self.filter_read_permission:
|
||||
return optimize_queryset(self.get_sublist_queryset(parent))
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
if hasattr(self, 'parent_key'):
|
||||
# This is vastly preferable for ReverseForeignKey relationships
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
|
||||
qs = self.request.user.get_queryset(self.model).distinct()
|
||||
sublist_qs = self.get_sublist_queryset(parent)
|
||||
return qs & sublist_qs
|
||||
|
||||
def get_sublist_queryset(self, parent):
|
||||
return getattrd(parent, self.relationship)
|
||||
return getattrd(parent, self.relationship).distinct()
|
||||
|
||||
|
||||
class DestroyAPIView(generics.DestroyAPIView):
|
||||
@@ -580,6 +580,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
|
||||
d.update({'parent_key': getattr(self, 'parent_key', None)})
|
||||
return d
|
||||
|
||||
def get_queryset(self):
|
||||
if hasattr(self, 'parent_key'):
|
||||
# Prefer this filtering because ForeignKey allows us more assumptions
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(**{self.parent_key: parent})
|
||||
return super(SubListCreateAPIView, self).get_queryset()
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
# If the object ID was not specified, it probably doesn't exist in the
|
||||
# DB yet. We want to see if we can create it. The URL may choose to
|
||||
@@ -958,11 +967,16 @@ class CopyAPIView(GenericAPIView):
|
||||
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
|
||||
new_obj.admin_role.members.add(request.user)
|
||||
if sub_objs:
|
||||
# store the copied object dict into cache, because it's
|
||||
# often too large for postgres' notification bus
|
||||
# (which has a default maximum message size of 8k)
|
||||
key = 'deep-copy-{}'.format(str(uuid.uuid4()))
|
||||
cache.set(key, sub_objs, timeout=3600)
|
||||
permission_check_func = None
|
||||
if hasattr(type(self), 'deep_copy_permission_check_func'):
|
||||
permission_check_func = (type(self).__module__, type(self).__name__, 'deep_copy_permission_check_func')
|
||||
trigger_delayed_deep_copy(
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, permission_check_func=permission_check_func
|
||||
self.model.__module__, self.model.__name__, obj.pk, new_obj.pk, request.user.pk, key, permission_check_func=permission_check_func
|
||||
)
|
||||
serializer = self._get_copy_return_serializer(new_obj)
|
||||
headers = {'Location': new_obj.get_absolute_url(request=request)}
|
||||
|
||||
@@ -71,7 +71,7 @@ class Metadata(metadata.SimpleMetadata):
|
||||
'url': _('URL for this {}.'),
|
||||
'related': _('Data structure with URLs of related resources.'),
|
||||
'summary_fields': _(
|
||||
'Data structure with name/description for related resources. The output for some objects may be limited for performance reasons.'
|
||||
'Data structure with name/description for related resources. ' 'The output for some objects may be limited for performance reasons.'
|
||||
),
|
||||
'created': _('Timestamp when this {} was created.'),
|
||||
'modified': _('Timestamp when this {} was last modified.'),
|
||||
|
||||
@@ -25,7 +25,6 @@ __all__ = [
|
||||
'UserPermission',
|
||||
'IsSystemAdminOrAuditor',
|
||||
'WorkflowApprovalPermission',
|
||||
'AnalyticsPermission',
|
||||
]
|
||||
|
||||
|
||||
@@ -251,16 +250,3 @@ class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||
class WebhookKeyPermission(permissions.BasePermission):
|
||||
def has_object_permission(self, request, view, obj):
|
||||
return request.user.can_access(view.model, 'admin', obj, request.data)
|
||||
|
||||
|
||||
class AnalyticsPermission(permissions.BasePermission):
|
||||
"""
|
||||
Allows GET/POST/OPTIONS to system admins and system auditors.
|
||||
"""
|
||||
|
||||
def has_permission(self, request, view):
|
||||
if not (request.user and request.user.is_authenticated):
|
||||
return False
|
||||
if request.method in ["GET", "POST", "OPTIONS"]:
|
||||
return request.user.is_superuser or request.user.is_system_auditor
|
||||
return request.user.is_superuser
|
||||
|
||||
@@ -56,8 +56,6 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
InstanceLink,
|
||||
@@ -158,7 +156,6 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'kind',
|
||||
),
|
||||
'host': DEFAULT_SUMMARY_FIELDS,
|
||||
'constructed_host': DEFAULT_SUMMARY_FIELDS,
|
||||
'group': DEFAULT_SUMMARY_FIELDS,
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
@@ -192,11 +189,6 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
}
|
||||
|
||||
|
||||
# These fields can be edited on a constructed inventory's generated source (possibly by using the constructed
|
||||
# inventory's special API endpoint, but also by using the inventory sources endpoint).
|
||||
CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS = ('source_vars', 'update_cache_timeout', 'limit', 'verbosity')
|
||||
|
||||
|
||||
def reverse_gfk(content_object, request):
|
||||
"""
|
||||
Computes a reverse for a GenericForeignKey field.
|
||||
@@ -220,7 +212,7 @@ class CopySerializer(serializers.Serializer):
|
||||
view = self.context.get('view', None)
|
||||
obj = view.get_object()
|
||||
if name == obj.name:
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from it cannot have the same name.'.format(name)))
|
||||
raise serializers.ValidationError(_('The original object is already named {}, a copy from' ' it cannot have the same name.'.format(name)))
|
||||
return attrs
|
||||
|
||||
|
||||
@@ -760,7 +752,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class UnifiedJobSerializer(BaseSerializer):
|
||||
show_capabilities = ['start', 'delete']
|
||||
event_processing_finished = serializers.BooleanField(
|
||||
help_text=_('Indicates whether all of the events generated by this unified job have been saved to the database.'), read_only=True
|
||||
help_text=_('Indicates whether all of the events generated by this ' 'unified job have been saved to the database.'), read_only=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -954,7 +946,7 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
|
||||
|
||||
|
||||
class UserSerializer(BaseSerializer):
|
||||
password = serializers.CharField(required=False, default='', help_text=_('Field used to change the password.'))
|
||||
password = serializers.CharField(required=False, default='', write_only=True, help_text=_('Write-only field used to change the password.'))
|
||||
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
|
||||
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
|
||||
is_system_auditor = serializers.BooleanField(default=False)
|
||||
@@ -981,12 +973,7 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
def to_representation(self, obj):
|
||||
ret = super(UserSerializer, self).to_representation(obj)
|
||||
if self.get_external_account(obj):
|
||||
# If this is an external account it shouldn't have a password field
|
||||
ret.pop('password', None)
|
||||
else:
|
||||
# If its an internal account lets assume there is a password and return $encrypted$ to the user
|
||||
ret['password'] = '$encrypted$'
|
||||
ret.pop('password', None)
|
||||
if obj and type(self) is UserSerializer:
|
||||
ret['auth'] = obj.social_auth.values('provider', 'uid')
|
||||
return ret
|
||||
@@ -1000,31 +987,13 @@ class UserSerializer(BaseSerializer):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
# Check if a password is too long
|
||||
password_max_length = User._meta.get_field('password').max_length
|
||||
if len(value) > password_max_length:
|
||||
raise serializers.ValidationError(_('Password max length is {}'.format(password_max_length)))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH', 0) and len(value) < getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'):
|
||||
raise serializers.ValidationError(_('Password must be at least {} characters long.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_LENGTH'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS', 0) and sum(c.isdigit() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'):
|
||||
raise serializers.ValidationError(_('Password must contain at least {} digits.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_DIGITS'))))
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER', 0) and sum(c.isupper() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} uppercase characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_UPPER')))
|
||||
)
|
||||
if getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL', 0) and sum(not c.isalnum() for c in value) < getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL'):
|
||||
raise serializers.ValidationError(
|
||||
_('Password must contain at least {} special characters.'.format(getattr(settings, 'LOCAL_PASSWORD_MIN_SPECIAL')))
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
# For now we're not raising an error, just not saving password for
|
||||
# users managed by LDAP who already have an unusable password set.
|
||||
# Get external password will return something like ldap or enterprise or None if the user isn't external. We only want to allow a password update for a None option
|
||||
if new_password and new_password != '$encrypted$' and not self.get_external_account(obj):
|
||||
if new_password and not self.get_external_account(obj):
|
||||
obj.set_password(new_password)
|
||||
obj.save(update_fields=['password'])
|
||||
|
||||
@@ -1579,7 +1548,7 @@ class ProjectPlaybooksSerializer(ProjectSerializer):
|
||||
|
||||
|
||||
class ProjectInventoriesSerializer(ProjectSerializer):
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, not comprehensive.'))
|
||||
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, ' 'not comprehensive.'))
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
@@ -1701,8 +1670,13 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
res.update(
|
||||
dict(
|
||||
hosts=self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
|
||||
groups=self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
|
||||
root_groups=self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
|
||||
variable_data=self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
|
||||
script=self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
|
||||
tree=self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
|
||||
inventory_sources=self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
|
||||
update_inventory_sources=self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
|
||||
activity_stream=self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
|
||||
job_templates=self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
|
||||
ad_hoc_commands=self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
|
||||
@@ -1713,18 +1687,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
labels=self.reverse('api:inventory_label_list', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.kind in ('', 'constructed'):
|
||||
# links not relevant for the "old" smart inventory
|
||||
res['groups'] = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk})
|
||||
res['root_groups'] = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk})
|
||||
res['update_inventory_sources'] = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk})
|
||||
res['inventory_sources'] = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk})
|
||||
res['tree'] = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk})
|
||||
if obj.organization:
|
||||
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
|
||||
if obj.kind == 'constructed':
|
||||
res['input_inventories'] = self.reverse('api:inventory_input_inventories', kwargs={'pk': obj.pk})
|
||||
res['constructed_url'] = self.reverse('api:constructed_inventory_detail', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -1766,91 +1730,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
if not hasattr(instance, '_constructed_inv_src'):
|
||||
instance._constructed_inv_src = instance.inventory_sources.first()
|
||||
inv_src = instance._constructed_inv_src
|
||||
return super().get_attribute(inv_src) # yoink
|
||||
|
||||
|
||||
class ConstructedCharField(ConstructedFieldMixin, serializers.CharField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedIntegerField(ConstructedFieldMixin, serializers.IntegerField):
|
||||
pass
|
||||
|
||||
|
||||
class ConstructedInventorySerializer(InventorySerializer):
|
||||
source_vars = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The source_vars for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
update_cache_timeout = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
default=None,
|
||||
help_text=_('The cache timeout for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
limit = ConstructedCharField(
|
||||
required=False,
|
||||
default=None,
|
||||
allow_blank=True,
|
||||
help_text=_('The limit to restrict the returned hosts for the related auto-created inventory source, special to constructed inventory.'),
|
||||
)
|
||||
verbosity = ConstructedIntegerField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
min_value=0,
|
||||
max_value=2,
|
||||
default=None,
|
||||
help_text=_('The verbosity level for the related auto-created inventory source, special to constructed inventory'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('*', '-host_filter') + CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
read_only_fields = ('*', 'kind')
|
||||
|
||||
def pop_inv_src_data(self, data):
|
||||
inv_src_data = {}
|
||||
for field in CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS:
|
||||
if field in data:
|
||||
# values always need to be removed, as they are not valid for Inventory model
|
||||
value = data.pop(field)
|
||||
# null is not valid for any of those fields, taken as not-provided
|
||||
if value is not None:
|
||||
inv_src_data[field] = value
|
||||
return inv_src_data
|
||||
|
||||
def apply_inv_src_data(self, inventory, inv_src_data):
|
||||
if inv_src_data:
|
||||
update_fields = []
|
||||
inv_src = inventory.inventory_sources.first()
|
||||
for field, value in inv_src_data.items():
|
||||
setattr(inv_src, field, value)
|
||||
update_fields.append(field)
|
||||
if update_fields:
|
||||
inv_src.save(update_fields=update_fields)
|
||||
|
||||
def create(self, validated_data):
|
||||
validated_data['kind'] = 'constructed'
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
inventory = super().create(validated_data)
|
||||
self.apply_inv_src_data(inventory, inv_src_data)
|
||||
return inventory
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
inv_src_data = self.pop_inv_src_data(validated_data)
|
||||
obj = super().update(obj, validated_data)
|
||||
self.apply_inv_src_data(obj, inv_src_data)
|
||||
return obj
|
||||
|
||||
|
||||
class InventoryScriptSerializer(InventorySerializer):
|
||||
class Meta:
|
||||
fields = ()
|
||||
@@ -1904,9 +1783,6 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
ansible_facts=self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
|
||||
)
|
||||
)
|
||||
if obj.inventory.kind == 'constructed':
|
||||
res['original_host'] = self.reverse('api:host_detail', kwargs={'pk': obj.instance_id})
|
||||
res['ansible_facts'] = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id})
|
||||
if obj.inventory:
|
||||
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
|
||||
if obj.last_job:
|
||||
@@ -1928,10 +1804,6 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
|
||||
group_cnt = obj.groups.count()
|
||||
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
|
||||
if obj.inventory.kind == 'constructed':
|
||||
summaries_qs = obj.constructed_host_summaries
|
||||
else:
|
||||
summaries_qs = obj.job_host_summaries
|
||||
d.setdefault(
|
||||
'recent_jobs',
|
||||
[
|
||||
@@ -1942,7 +1814,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
'status': j.job.status,
|
||||
'finished': j.job.finished,
|
||||
}
|
||||
for j in summaries_qs.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created').defer('job__extra_vars', 'job__artifacts')[:5]
|
||||
],
|
||||
)
|
||||
return d
|
||||
@@ -1967,8 +1839,8 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart or Constructed Inventories")})
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
|
||||
return value
|
||||
|
||||
def validate_variables(self, value):
|
||||
@@ -2066,8 +1938,8 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart or Constructed Inventories")})
|
||||
if value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
|
||||
return value
|
||||
|
||||
def to_representation(self, obj):
|
||||
@@ -2190,7 +2062,7 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if settings.DATABASES and ('sqlite3' not in settings.DATABASES.get('default', {}).get('ENGINE')):
|
||||
if not settings.IS_TESTING_MODE:
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
@@ -2266,7 +2138,6 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'custom_virtualenv',
|
||||
'timeout',
|
||||
'verbosity',
|
||||
'limit',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@@ -2373,8 +2244,8 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
return value
|
||||
|
||||
def validate_inventory(self, value):
|
||||
if value and value.kind in ('constructed', 'smart'):
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart or Constructed Inventories")})
|
||||
if value and value.kind == 'smart':
|
||||
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
|
||||
return value
|
||||
|
||||
# TODO: remove when old 'credential' fields are removed
|
||||
@@ -2418,16 +2289,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
if self.instance and self.instance.source == 'constructed':
|
||||
allowed_fields = CONSTRUCTED_INVENTORY_SOURCE_EDITABLE_FIELDS
|
||||
for field in attrs:
|
||||
if attrs[field] != getattr(self.instance, field) and field not in allowed_fields:
|
||||
raise serializers.ValidationError({"error": _("Cannot change field '{}' on a constructed inventory source.").format(field)})
|
||||
elif get_field_from_model_or_attrs('source') == 'scm':
|
||||
if get_field_from_model_or_attrs('source') == 'scm':
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
elif get_field_from_model_or_attrs('source') == 'constructed':
|
||||
raise serializers.ValidationError({"error": _('constructed not a valid source for inventory')})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
|
||||
if redundant_scm_fields:
|
||||
@@ -2905,7 +2769,7 @@ class CredentialSerializer(BaseSerializer):
|
||||
):
|
||||
if getattr(self.instance, related_objects).count() > 0:
|
||||
raise ValidationError(
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality of the resources using it.')
|
||||
_('You cannot change the credential type of the credential, as it may break the functionality' ' of the resources using it.')
|
||||
)
|
||||
|
||||
return credential_type
|
||||
@@ -2925,7 +2789,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add user to owner role. If provided, ' 'do not give either team or organization. Only valid for creation.'),
|
||||
)
|
||||
team = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Team.objects.all(),
|
||||
@@ -2933,14 +2797,14 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
default=None,
|
||||
write_only=True,
|
||||
allow_null=True,
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.'),
|
||||
help_text=_('Write-only field used to add team to owner role. If provided, ' 'do not give either user or organization. Only valid for creation.'),
|
||||
)
|
||||
organization = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Organization.objects.all(),
|
||||
required=False,
|
||||
default=None,
|
||||
allow_null=True,
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, do not give either user or team.'),
|
||||
help_text=_('Inherit permissions from organization roles. If provided on creation, ' 'do not give either user or team.'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
@@ -2962,7 +2826,7 @@ class CredentialSerializerCreate(CredentialSerializer):
|
||||
if len(owner_fields) > 1:
|
||||
received = ", ".join(sorted(owner_fields))
|
||||
raise serializers.ValidationError(
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, received {} fields.".format(received))}
|
||||
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, " "received {} fields.".format(received))}
|
||||
)
|
||||
|
||||
if attrs.get('team'):
|
||||
@@ -3622,7 +3486,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
|
||||
try:
|
||||
return obj.result_stdout
|
||||
except StdoutMaxBytesExceeded as e:
|
||||
return _("Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes.").format(
|
||||
return _("Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes.").format(
|
||||
text_size=e.total, supported_size=e.supported
|
||||
)
|
||||
|
||||
@@ -4169,7 +4033,6 @@ class JobHostSummarySerializer(BaseSerializer):
|
||||
'-description',
|
||||
'job',
|
||||
'host',
|
||||
'constructed_host',
|
||||
'host_name',
|
||||
'changed',
|
||||
'dark',
|
||||
@@ -4536,7 +4399,7 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
if cred.unique_hash() in provided_mapping.keys():
|
||||
continue # User replaced credential with new of same type
|
||||
errors.setdefault('credentials', []).append(
|
||||
_('Removing {} credential at launch time without replacement is not supported. Provided list lacked credential(s): {}.').format(
|
||||
_('Removing {} credential at launch time without replacement is not supported. ' 'Provided list lacked credential(s): {}.').format(
|
||||
cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])
|
||||
)
|
||||
)
|
||||
@@ -5019,7 +4882,7 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
for subevent in event_messages:
|
||||
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
|
||||
error_list.append(
|
||||
_("Workflow Approval event '{}' invalid, must be one of 'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
_("Workflow Approval event '{}' invalid, must be one of " "'running', 'approved', 'timed_out', or 'denied'").format(subevent)
|
||||
)
|
||||
continue
|
||||
subevent_messages = event_messages[subevent]
|
||||
@@ -5523,32 +5386,6 @@ class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class HostMetricSerializer(BaseSerializer):
|
||||
show_capabilities = ['delete']
|
||||
|
||||
class Meta:
|
||||
model = HostMetric
|
||||
fields = (
|
||||
"id",
|
||||
"hostname",
|
||||
"url",
|
||||
"first_automation",
|
||||
"last_automation",
|
||||
"last_deleted",
|
||||
"automated_counter",
|
||||
"deleted_counter",
|
||||
"deleted",
|
||||
"used_in_inventories",
|
||||
)
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlySerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = HostMetricSummaryMonthly
|
||||
read_only_fields = ("id", "date", "license_consumed", "license_capacity", "hosts_added", "hosts_deleted", "indirectly_managed_hosts")
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
class InstanceGroupSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
capacity = serializers.SerializerMethodField()
|
||||
@@ -5559,7 +5396,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
instances = serializers.SerializerMethodField()
|
||||
is_container_group = serializers.BooleanField(
|
||||
required=False,
|
||||
help_text=_('Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
|
||||
)
|
||||
# NOTE: help_text is duplicated from field definitions, no obvious way of
|
||||
# both defining field details here and also getting the field's help_text
|
||||
@@ -5570,7 +5407,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Percentage'),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to this group when new instances come online."),
|
||||
help_text=_("Minimum percentage of all instances that will be automatically assigned to " "this group when new instances come online."),
|
||||
)
|
||||
policy_instance_minimum = serializers.IntegerField(
|
||||
default=0,
|
||||
@@ -5578,7 +5415,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
required=False,
|
||||
initial=0,
|
||||
label=_('Policy Instance Minimum'),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to this group when new instances come online."),
|
||||
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
|
||||
)
|
||||
max_concurrent_jobs = serializers.IntegerField(
|
||||
default=0,
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{% ifmeth GET %}
|
||||
# Retrieve {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
|
||||
record containing the following fields:
|
||||
|
||||
{% include "api/_result_fields_common.md" %}
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth DELETE %}
|
||||
# Delete {{ model_verbose_name|title|anora }}:
|
||||
|
||||
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
|
||||
|
||||
A soft deletion will mark the `deleted` field as true and exclude the host
|
||||
metric from license calculations.
|
||||
This may be undone later if the same hostname is automated again afterwards.
|
||||
{% endifmeth %}
|
||||
@@ -2,7 +2,6 @@ receptor_user: awx
|
||||
receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
import awx.api.views.analytics as analytics
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', analytics.AnalyticsRootView.as_view(), name='analytics_root_view'),
|
||||
re_path(r'^authorized/$', analytics.AnalyticsAuthorizedView.as_view(), name='analytics_authorized'),
|
||||
re_path(r'^reports/$', analytics.AnalyticsReportsList.as_view(), name='analytics_reports_list'),
|
||||
re_path(r'^report/(?P<slug>[\w-]+)/$', analytics.AnalyticsReportDetail.as_view(), name='analytics_report_detail'),
|
||||
re_path(r'^report_options/$', analytics.AnalyticsReportOptionsList.as_view(), name='analytics_report_options_list'),
|
||||
re_path(r'^adoption_rate/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate'),
|
||||
re_path(r'^adoption_rate_options/$', analytics.AnalyticsAdoptionRateList.as_view(), name='analytics_adoption_rate_options'),
|
||||
re_path(r'^event_explorer/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer'),
|
||||
re_path(r'^event_explorer_options/$', analytics.AnalyticsEventExplorerList.as_view(), name='analytics_event_explorer_options'),
|
||||
re_path(r'^host_explorer/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer'),
|
||||
re_path(r'^host_explorer_options/$', analytics.AnalyticsHostExplorerList.as_view(), name='analytics_host_explorer_options'),
|
||||
re_path(r'^job_explorer/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer'),
|
||||
re_path(r'^job_explorer_options/$', analytics.AnalyticsJobExplorerList.as_view(), name='analytics_job_explorer_options'),
|
||||
re_path(r'^probe_templates/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_explorer'),
|
||||
re_path(r'^probe_templates_options/$', analytics.AnalyticsProbeTemplatesList.as_view(), name='analytics_probe_templates_options'),
|
||||
re_path(r'^probe_template_for_hosts/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_explorer'),
|
||||
re_path(r'^probe_template_for_hosts_options/$', analytics.AnalyticsProbeTemplateForHostsList.as_view(), name='analytics_probe_template_for_hosts_options'),
|
||||
re_path(r'^roi_templates/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_explorer'),
|
||||
re_path(r'^roi_templates_options/$', analytics.AnalyticsRoiTemplatesList.as_view(), name='analytics_roi_templates_options'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -1,10 +0,0 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import HostMetricList, HostMetricDetail
|
||||
|
||||
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -6,10 +6,7 @@ from django.urls import re_path
|
||||
from awx.api.views.inventory import (
|
||||
InventoryList,
|
||||
InventoryDetail,
|
||||
ConstructedInventoryDetail,
|
||||
ConstructedInventoryList,
|
||||
InventoryActivityStreamList,
|
||||
InventoryInputInventoriesList,
|
||||
InventoryJobTemplateList,
|
||||
InventoryAccessList,
|
||||
InventoryObjectRolesList,
|
||||
@@ -40,7 +37,6 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
|
||||
@@ -52,10 +48,4 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
|
||||
]
|
||||
|
||||
# Constructed inventory special views
|
||||
constructed_inventory_urls = [
|
||||
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls', 'constructed_inventory_urls']
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -30,7 +30,6 @@ from awx.api.views import (
|
||||
OAuth2TokenList,
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
@@ -42,17 +41,15 @@ from awx.api.views.bulk import (
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
from awx.api.views.analytics import AWX_ANALYTICS_API_PREFIX
|
||||
|
||||
from .organization import urls as organization_urls
|
||||
from .user import urls as user_urls
|
||||
from .project import urls as project_urls
|
||||
from .project_update import urls as project_update_urls
|
||||
from .inventory import urls as inventory_urls, constructed_inventory_urls
|
||||
from .inventory import urls as inventory_urls
|
||||
from .execution_environments import urls as execution_environment_urls
|
||||
from .team import urls as team_urls
|
||||
from .host import urls as host_urls
|
||||
from .host_metric import urls as host_metric_urls
|
||||
from .group import urls as group_urls
|
||||
from .inventory_source import urls as inventory_source_urls
|
||||
from .inventory_update import urls as inventory_update_urls
|
||||
@@ -83,7 +80,7 @@ from .oauth2 import urls as oauth2_urls
|
||||
from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
from .analytics import urls as analytics_urls
|
||||
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -120,11 +117,7 @@ v2_urls = [
|
||||
re_path(r'^project_updates/', include(project_update_urls)),
|
||||
re_path(r'^teams/', include(team_urls)),
|
||||
re_path(r'^inventories/', include(inventory_urls)),
|
||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||
re_path(r'^hosts/', include(host_urls)),
|
||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||
# It will be enabled in future version of the AWX
|
||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||
re_path(r'^groups/', include(group_urls)),
|
||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||
@@ -148,7 +141,6 @@ v2_urls = [
|
||||
re_path(r'^unified_job_templates/$', UnifiedJobTemplateList.as_view(), name='unified_job_template_list'),
|
||||
re_path(r'^unified_jobs/$', UnifiedJobList.as_view(), name='unified_job_list'),
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(rf'^{AWX_ANALYTICS_API_PREFIX}/', include(analytics_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
|
||||
@@ -17,6 +17,7 @@ from collections import OrderedDict
|
||||
|
||||
from urllib3.exceptions import ConnectTimeoutError
|
||||
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
@@ -29,7 +30,7 @@ from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
from django.views.decorators.csrf import csrf_exempt
|
||||
from django.template.loader import render_to_string
|
||||
from django.http import HttpResponse, HttpResponseRedirect
|
||||
from django.http import HttpResponse
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
@@ -62,7 +63,7 @@ from wsgiref.util import FileWrapper
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
from awx.main.access import get_user_queryset
|
||||
from awx.main.access import get_user_queryset, HostAccess
|
||||
from awx.api.generics import (
|
||||
APIView,
|
||||
BaseUsersList,
|
||||
@@ -565,7 +566,7 @@ class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
|
||||
if self.relationship not in ask_mapping:
|
||||
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
|
||||
elif sub.passwords_needed:
|
||||
return {"msg": _("Credential that requires user input on launch cannot be used in saved launch configuration.")}
|
||||
return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
|
||||
|
||||
ask_field_name = ask_mapping[self.relationship]
|
||||
|
||||
@@ -794,7 +795,13 @@ class ExecutionEnvironmentActivityStreamList(SubListAPIView):
|
||||
parent_model = models.ExecutionEnvironment
|
||||
relationship = 'activitystream_set'
|
||||
search_fields = ('changes',)
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(execution_environment=parent)
|
||||
|
||||
|
||||
class ProjectList(ListCreateAPIView):
|
||||
@@ -1541,41 +1548,6 @@ class HostRelatedSearchMixin(object):
|
||||
return ret
|
||||
|
||||
|
||||
class HostMetricList(ListAPIView):
|
||||
name = _("Host Metrics List")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
search_fields = ('hostname', 'deleted')
|
||||
|
||||
def get_queryset(self):
|
||||
return self.model.objects.all()
|
||||
|
||||
|
||||
class HostMetricDetail(RetrieveDestroyAPIView):
|
||||
name = _("Host Metric Detail")
|
||||
model = models.HostMetric
|
||||
serializer_class = serializers.HostMetricSerializer
|
||||
permission_classes = (IsSystemAdminOrAuditor,)
|
||||
|
||||
def delete(self, request, *args, **kwargs):
|
||||
self.get_object().soft_delete()
|
||||
|
||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# It will be enabled in future version of the AWX
|
||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
||||
# name = _("Host Metrics Summary Monthly")
|
||||
# model = models.HostMetricSummaryMonthly
|
||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
||||
# search_fields = ('date',)
|
||||
#
|
||||
# def get_queryset(self):
|
||||
# return self.model.objects.all()
|
||||
|
||||
|
||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||
always_allow_superuser = False
|
||||
model = models.Host
|
||||
@@ -1604,8 +1576,6 @@ class HostDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
def delete(self, request, *args, **kwargs):
|
||||
if self.get_object().inventory.pending_deletion:
|
||||
return Response({"error": _("The inventory for this host is already being deleted.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
if self.get_object().inventory.kind == 'constructed':
|
||||
return Response({"error": _("Delete constructed inventory hosts from input inventory.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
return super(HostDetail, self).delete(request, *args, **kwargs)
|
||||
|
||||
|
||||
@@ -1613,14 +1583,6 @@ class HostAnsibleFactsDetail(RetrieveAPIView):
|
||||
model = models.Host
|
||||
serializer_class = serializers.AnsibleFactsSerializer
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.inventory.kind == 'constructed':
|
||||
# If this is a constructed inventory host, it is not the source of truth about facts
|
||||
# redirect to the original input inventory host instead
|
||||
return HttpResponseRedirect(reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.instance_id}, request=self.request))
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
|
||||
class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIView):
|
||||
model = models.Host
|
||||
@@ -1628,7 +1590,13 @@ class InventoryHostsList(HostRelatedSearchMixin, SubListCreateAttachDetachAPIVie
|
||||
parent_model = models.Inventory
|
||||
relationship = 'hosts'
|
||||
parent_key = 'inventory'
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
inventory = self.get_parent_object()
|
||||
qs = getattrd(inventory, self.relationship).all()
|
||||
# Apply queryset optimizations
|
||||
qs = qs.select_related(*HostAccess.select_related).prefetch_related(*HostAccess.prefetch_related)
|
||||
return qs
|
||||
|
||||
|
||||
class HostGroupsList(SubListCreateAttachDetachAPIView):
|
||||
@@ -2501,7 +2469,7 @@ class JobTemplateSurveySpec(GenericAPIView):
|
||||
return Response(
|
||||
dict(
|
||||
error=_(
|
||||
"$encrypted$ is a reserved keyword for password question defaults, survey question {idx} is type {survey_item[type]}."
|
||||
"$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
|
||||
).format(**context)
|
||||
),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
@@ -2569,7 +2537,16 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
|
||||
serializer_class = serializers.CredentialSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
# Return the full list of credentials
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
sublist_qs = getattrd(parent, self.relationship)
|
||||
sublist_qs = sublist_qs.prefetch_related(
|
||||
'created_by', 'modified_by', 'admin_role', 'use_role', 'read_role', 'admin_role__parents', 'admin_role__members'
|
||||
)
|
||||
return sublist_qs
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.unique_hash() in [cred.unique_hash() for cred in parent.credentials.all()]:
|
||||
@@ -2671,10 +2648,7 @@ class JobTemplateCallback(GenericAPIView):
|
||||
# Permission class should have already validated host_config_key.
|
||||
job_template = self.get_object()
|
||||
# Attempt to find matching hosts based on remote address.
|
||||
if job_template.inventory:
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
else:
|
||||
return Response({"msg": _("Cannot start automatically, an inventory is required.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
matching_hosts = self.find_matching_hosts()
|
||||
# If the host is not found, update the inventory before trying to
|
||||
# match again.
|
||||
inventory_sources_already_updated = []
|
||||
@@ -2759,7 +2733,6 @@ class JobTemplateInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.JobTemplate
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class JobTemplateAccessList(ResourceAccessList):
|
||||
@@ -2850,7 +2823,16 @@ class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, Su
|
||||
relationship = ''
|
||||
enforce_parent_relationship = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
filter_read_permission = False
|
||||
|
||||
'''
|
||||
Limit the set of WorkflowJobTemplateNodes to the related nodes of specified by
|
||||
'relationship'
|
||||
'''
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if created:
|
||||
@@ -2925,7 +2907,14 @@ class WorkflowJobNodeChildrenBaseList(SubListAPIView):
|
||||
parent_model = models.WorkflowJobNode
|
||||
relationship = ''
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
filter_read_permission = False
|
||||
|
||||
#
|
||||
# Limit the set of WorkflowJobNodes to the related nodes of specified by self.relationship
|
||||
#
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).all()
|
||||
|
||||
|
||||
class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList):
|
||||
@@ -3104,8 +3093,11 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
relationship = 'workflow_job_template_nodes'
|
||||
parent_key = 'workflow_job_template'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3197,8 +3189,11 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
relationship = 'workflow_job_nodes'
|
||||
parent_key = 'workflow_job'
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
ordering = ('id',) # assure ordering by id for consistency
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
@@ -3333,6 +3328,7 @@ class JobLabelList(SubListAPIView):
|
||||
serializer_class = serializers.LabelSerializer
|
||||
parent_model = models.Job
|
||||
relationship = 'labels'
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
@@ -3511,7 +3507,11 @@ class BaseJobHostSummariesList(SubListAPIView):
|
||||
relationship = 'job_host_summaries'
|
||||
name = _('Job Host Summaries List')
|
||||
search_fields = ('host_name',)
|
||||
filter_read_permission = False
|
||||
|
||||
def get_queryset(self):
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).select_related('job', 'job__job_template', 'host')
|
||||
|
||||
|
||||
class HostJobHostSummariesList(BaseJobHostSummariesList):
|
||||
@@ -4055,7 +4055,7 @@ class UnifiedJobStdout(RetrieveAPIView):
|
||||
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
|
||||
except models.StdoutMaxBytesExceeded as e:
|
||||
response_message = _(
|
||||
"Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes."
|
||||
"Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
|
||||
).format(text_size=e.total, supported_size=e.supported)
|
||||
if request.accepted_renderer.format == 'json':
|
||||
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
|
||||
|
||||
@@ -1,296 +0,0 @@
|
||||
import requests
|
||||
import logging
|
||||
import urllib.parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils import translation
|
||||
|
||||
from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
AUTOMATION_ANALYTICS_API_URL_PATH = "/api/tower-analytics/v1"
|
||||
AWX_ANALYTICS_API_PREFIX = 'analytics'
|
||||
|
||||
ERROR_UPLOAD_NOT_ENABLED = "analytics-upload-not-enabled"
|
||||
ERROR_MISSING_URL = "missing-url"
|
||||
ERROR_MISSING_USER = "missing-user"
|
||||
ERROR_MISSING_PASSWORD = "missing-password"
|
||||
ERROR_NO_DATA_OR_ENTITLEMENT = "no-data-or-entitlement"
|
||||
ERROR_NOT_FOUND = "not-found"
|
||||
ERROR_UNAUTHORIZED = "unauthorized"
|
||||
ERROR_UNKNOWN = "unknown"
|
||||
ERROR_UNSUPPORTED_METHOD = "unsupported-method"
|
||||
|
||||
logger = logging.getLogger('awx.api.views.analytics')
|
||||
|
||||
|
||||
class MissingSettings(Exception):
|
||||
"""Settings are not correct Exception"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GetNotAllowedMixin(object):
|
||||
def get(self, request, format=None):
|
||||
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
|
||||
|
||||
class AnalyticsRootView(APIView):
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
name = _('Automation Analytics')
|
||||
swagger_topic = 'Automation Analytics'
|
||||
|
||||
def get(self, request, format=None):
|
||||
data = OrderedDict()
|
||||
data['authorized'] = reverse('api:analytics_authorized')
|
||||
data['reports'] = reverse('api:analytics_reports_list')
|
||||
data['report_options'] = reverse('api:analytics_report_options_list')
|
||||
data['adoption_rate'] = reverse('api:analytics_adoption_rate')
|
||||
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
|
||||
data['event_explorer'] = reverse('api:analytics_event_explorer')
|
||||
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
|
||||
data['host_explorer'] = reverse('api:analytics_host_explorer')
|
||||
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
|
||||
data['job_explorer'] = reverse('api:analytics_job_explorer')
|
||||
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
|
||||
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
|
||||
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
|
||||
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
|
||||
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
|
||||
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
|
||||
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
|
||||
return Response(data)
|
||||
|
||||
|
||||
class AnalyticsGenericView(APIView):
|
||||
"""
|
||||
Example:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
|
||||
params = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_by': 'name:asc',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'limit': '20',
|
||||
'offset': '0',
|
||||
'sort_options': 'name',
|
||||
'sort_order': 'asc',
|
||||
'tags': [],
|
||||
'slug': [],
|
||||
'name': [],
|
||||
'description': '',
|
||||
}
|
||||
|
||||
response = requests.post(f'{AUTOMATION_ANALYTICS_API_URL}/reports/', params=params,
|
||||
headers=headers, json=json_data)
|
||||
|
||||
return Response(response.json(), status=response.status_code)
|
||||
"""
|
||||
|
||||
permission_classes = (AnalyticsPermission,)
|
||||
|
||||
@staticmethod
|
||||
def _request_headers(request):
|
||||
headers = {}
|
||||
for header in ['Content-Type', 'Content-Length', 'Accept-Encoding', 'User-Agent', 'Accept']:
|
||||
if request.headers.get(header, None):
|
||||
headers[header] = request.headers.get(header)
|
||||
headers['X-Rh-Analytics-Source'] = 'controller'
|
||||
headers['X-Rh-Analytics-Source-Version'] = get_awx_version()
|
||||
headers['Accept-Language'] = translation.get_language()
|
||||
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _get_analytics_path(request_path):
|
||||
parts = request_path.split(f'{AWX_ANALYTICS_API_PREFIX}/')
|
||||
path_specific = parts[-1]
|
||||
return f"{AUTOMATION_ANALYTICS_API_URL_PATH}/{path_specific}"
|
||||
|
||||
def _get_analytics_url(self, request_path):
|
||||
analytics_path = self._get_analytics_path(request_path)
|
||||
url = getattr(settings, 'AUTOMATION_ANALYTICS_URL', None)
|
||||
if not url:
|
||||
raise MissingSettings(ERROR_MISSING_URL)
|
||||
url_parts = urlparse.urlsplit(url)
|
||||
analytics_url = urlparse.urlunsplit([url_parts.scheme, url_parts.netloc, analytics_path, url_parts.query, url_parts.fragment])
|
||||
return analytics_url
|
||||
|
||||
@staticmethod
|
||||
def _get_setting(setting_name, default, error_message):
|
||||
setting = getattr(settings, setting_name, default)
|
||||
if not setting:
|
||||
raise MissingSettings(error_message)
|
||||
return setting
|
||||
|
||||
@staticmethod
|
||||
def _error_response(keyword, message=None, remote=True, remote_status_code=None, status_code=status.HTTP_403_FORBIDDEN):
|
||||
text = {"error": {"remote": remote, "remote_status": remote_status_code, "keyword": keyword}}
|
||||
if message:
|
||||
text["error"]["message"] = message
|
||||
return Response(text, status=status_code)
|
||||
|
||||
def _error_response_404(self, response):
|
||||
try:
|
||||
json_response = response.json()
|
||||
# Subscription/entitlement problem or missing tenant data in AA db => HTTP 403
|
||||
message = json_response.get('error', None)
|
||||
if message:
|
||||
return self._error_response(ERROR_NO_DATA_OR_ENTITLEMENT, message, remote=True, remote_status_code=response.status_code)
|
||||
|
||||
# Standard 404 problem => HTTP 404
|
||||
message = json_response.get('detail', None) or response.text
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
# Unexpected text => still HTTP 404
|
||||
message = response.text
|
||||
|
||||
return self._error_response(ERROR_NOT_FOUND, message, remote=True, remote_status_code=status.HTTP_404_NOT_FOUND, status_code=status.HTTP_404_NOT_FOUND)
|
||||
|
||||
@staticmethod
|
||||
def _update_response_links(json_response):
|
||||
if not json_response.get('links', None):
|
||||
return
|
||||
|
||||
for key, value in json_response['links'].items():
|
||||
if value:
|
||||
json_response['links'][key] = value.replace(AUTOMATION_ANALYTICS_API_URL_PATH, f"/api/v2/{AWX_ANALYTICS_API_PREFIX}")
|
||||
|
||||
def _forward_response(self, response):
|
||||
try:
|
||||
content_type = response.headers.get('content-type', '')
|
||||
if content_type.find('application/json') != -1:
|
||||
json_response = response.json()
|
||||
self._update_response_links(json_response)
|
||||
|
||||
return Response(json_response, status=response.status_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Response error: {e}")
|
||||
|
||||
return Response(response.content, status=response.status_code)
|
||||
|
||||
def _send_to_analytics(self, request, method):
|
||||
try:
|
||||
headers = self._request_headers(request)
|
||||
|
||||
self._get_setting('INSIGHTS_TRACKING_STATE', False, ERROR_UPLOAD_NOT_ENABLED)
|
||||
url = self._get_analytics_url(request.path)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
else:
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
auth=(rh_user, rh_password),
|
||||
verify=settings.INSIGHTS_CERT_PATH,
|
||||
params=request.query_params,
|
||||
headers=headers,
|
||||
json=request.data,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
#
|
||||
elif response.status_code == status.HTTP_404_NOT_FOUND:
|
||||
return self._error_response_404(response)
|
||||
#
|
||||
# Success or not a 401/404 errors are just forwarded
|
||||
#
|
||||
else:
|
||||
return self._forward_response(response)
|
||||
|
||||
except MissingSettings as e:
|
||||
logger.warning(f"Analytics API: Setting missing: {e.args[0]}")
|
||||
return self._error_response(e.args[0], remote=False)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Analytics API: Request error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
except Exception as e:
|
||||
logger.error(f"Analytics API: Error: {e}")
|
||||
return self._error_response(ERROR_UNKNOWN, str(e), remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
|
||||
class AnalyticsGenericListView(AnalyticsGenericView):
|
||||
def get(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsGenericDetailView(AnalyticsGenericView):
|
||||
def get(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="GET")
|
||||
|
||||
def post(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="POST")
|
||||
|
||||
def options(self, request, slug, format=None):
|
||||
return self._send_to_analytics(request, method="OPTIONS")
|
||||
|
||||
|
||||
class AnalyticsAuthorizedView(AnalyticsGenericListView):
|
||||
name = _("Authorized")
|
||||
|
||||
|
||||
class AnalyticsReportsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Reports")
|
||||
swagger_topic = "Automation Analytics"
|
||||
|
||||
|
||||
class AnalyticsReportDetail(AnalyticsGenericDetailView):
|
||||
name = _("Report")
|
||||
|
||||
|
||||
class AnalyticsReportOptionsList(AnalyticsGenericListView):
|
||||
name = _("Report Options")
|
||||
|
||||
|
||||
class AnalyticsAdoptionRateList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Adoption Rate")
|
||||
|
||||
|
||||
class AnalyticsEventExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Event Explorer")
|
||||
|
||||
|
||||
class AnalyticsHostExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Host Explorer")
|
||||
|
||||
|
||||
class AnalyticsJobExplorerList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Job Explorer")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Templates")
|
||||
|
||||
|
||||
class AnalyticsProbeTemplateForHostsList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("Probe Template For Hosts")
|
||||
|
||||
|
||||
class AnalyticsRoiTemplatesList(GetNotAllowedMixin, AnalyticsGenericListView):
|
||||
name = _("ROI Templates")
|
||||
@@ -14,7 +14,6 @@ from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
from rest_framework import serializers
|
||||
|
||||
# AWX
|
||||
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
|
||||
@@ -32,7 +31,6 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||
|
||||
from awx.api.serializers import (
|
||||
InventorySerializer,
|
||||
ConstructedInventorySerializer,
|
||||
ActivityStreamSerializer,
|
||||
RoleSerializer,
|
||||
InstanceGroupSerializer,
|
||||
@@ -81,9 +79,7 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
|
||||
# Do not allow changes to an Inventory kind.
|
||||
if kind is not None and obj.kind != kind:
|
||||
return Response(
|
||||
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
)
|
||||
return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
||||
return super(InventoryDetail, self).update(request, *args, **kwargs)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
@@ -98,29 +94,6 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
|
||||
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class ConstructedInventoryDetail(InventoryDetail):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
|
||||
class ConstructedInventoryList(InventoryList):
|
||||
serializer_class = ConstructedInventorySerializer
|
||||
|
||||
def get_queryset(self):
|
||||
r = super().get_queryset()
|
||||
return r.filter(kind='constructed')
|
||||
|
||||
|
||||
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
|
||||
model = Inventory
|
||||
serializer_class = InventorySerializer
|
||||
parent_model = Inventory
|
||||
relationship = 'input_inventories'
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind == 'constructed':
|
||||
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
|
||||
|
||||
|
||||
class InventoryActivityStreamList(SubListAPIView):
|
||||
model = ActivityStream
|
||||
serializer_class = ActivityStreamSerializer
|
||||
|
||||
@@ -50,7 +50,7 @@ class UnifiedJobDeletionMixin(object):
|
||||
return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
else:
|
||||
# if it has been > 1 minute, events are probably lost
|
||||
logger.warning('Allowing deletion of {} through the API without all events processed.'.format(obj.log_format))
|
||||
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
|
||||
|
||||
# Manually cascade delete events if unpartitioned job
|
||||
if obj.has_unpartitioned_events:
|
||||
|
||||
@@ -61,6 +61,12 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = Organization.accessible_objects(self.request.user, 'read_role')
|
||||
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
|
||||
qs = qs.prefetch_related('created_by', 'modified_by')
|
||||
return qs
|
||||
|
||||
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
@@ -201,7 +207,6 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
|
||||
serializer_class = InstanceGroupSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'instance_groups'
|
||||
filter_read_permission = False
|
||||
|
||||
|
||||
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
@@ -209,7 +214,6 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
|
||||
serializer_class = CredentialSerializer
|
||||
parent_model = Organization
|
||||
relationship = 'galaxy_credentials'
|
||||
filter_read_permission = False
|
||||
|
||||
def is_valid_relation(self, parent, sub, created=False):
|
||||
if sub.kind != 'galaxy_api_token':
|
||||
|
||||
@@ -98,14 +98,10 @@ class ApiVersionRootView(APIView):
|
||||
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
|
||||
data['metrics'] = reverse('api:metrics_view', request=request)
|
||||
data['inventory'] = reverse('api:inventory_list', request=request)
|
||||
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
|
||||
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
|
||||
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
|
||||
data['groups'] = reverse('api:group_list', request=request)
|
||||
data['hosts'] = reverse('api:host_list', request=request)
|
||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||
# It will be enabled in future version of the AWX
|
||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||
data['jobs'] = reverse('api:job_list', request=request)
|
||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||
@@ -126,7 +122,6 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
@@ -277,9 +272,6 @@ class ApiV2ConfigView(APIView):
|
||||
|
||||
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
|
||||
|
||||
# Guarding against settings.UI_NEXT being set to a non-boolean value
|
||||
ui_next_state = settings.UI_NEXT if settings.UI_NEXT in (True, False) else False
|
||||
|
||||
data = dict(
|
||||
time_zone=settings.TIME_ZONE,
|
||||
license_info=license_data,
|
||||
@@ -288,7 +280,6 @@ class ApiV2ConfigView(APIView):
|
||||
analytics_status=pendo_state,
|
||||
analytics_collectors=all_collectors(),
|
||||
become_methods=PRIVILEGE_ESCALATION_METHODS,
|
||||
ui_next=ui_next_state,
|
||||
)
|
||||
|
||||
# If LDAP is enabled, user_ldap_fields will return a list of field
|
||||
|
||||
@@ -114,7 +114,7 @@ class WebhookReceiverBase(APIView):
|
||||
# Ensure that the full contents of the request are captured for multiple uses.
|
||||
request.body
|
||||
|
||||
logger.debug("headers: {}\ndata: {}\n".format(request.headers, request.data))
|
||||
logger.debug("headers: {}\n" "data: {}\n".format(request.headers, request.data))
|
||||
obj = self.get_object()
|
||||
self.check_signature(obj)
|
||||
|
||||
|
||||
@@ -5,13 +5,11 @@ import threading
|
||||
import time
|
||||
import os
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# Django
|
||||
from django.conf import LazySettings
|
||||
from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
@@ -159,7 +157,7 @@ class EncryptedCacheProxy(object):
|
||||
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
|
||||
if obj_id is empty:
|
||||
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
|
||||
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
|
||||
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
|
||||
elif obj_id == SETTING_CACHE_NONE:
|
||||
obj_id = None
|
||||
return method(TransientSetting(pk=obj_id, value=value), 'value')
|
||||
@@ -168,6 +166,11 @@ class EncryptedCacheProxy(object):
|
||||
# a no-op; it just returns the provided value
|
||||
return value
|
||||
|
||||
def _get_setting_from_db(self, key):
|
||||
field = self.registry.get_setting_field(key)
|
||||
if not field.read_only:
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.cache, name)
|
||||
|
||||
@@ -183,22 +186,6 @@ def get_settings_to_cache(registry):
|
||||
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
|
||||
|
||||
|
||||
# Will first attempt to get the setting from the database in synchronous mode.
|
||||
# If call from async context, it will attempt to get the setting from the database in a thread.
|
||||
def _get_setting_from_db(registry, key):
|
||||
def get_settings_from_db_sync(registry, key):
|
||||
field = registry.get_setting_field(key)
|
||||
if not field.read_only or key == 'INSTALL_UUID':
|
||||
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
|
||||
|
||||
try:
|
||||
return get_settings_from_db_sync(registry, key)
|
||||
except SynchronousOnlyOperation:
|
||||
with ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(get_settings_from_db_sync, registry, key)
|
||||
return future.result()
|
||||
|
||||
|
||||
def get_cache_value(value):
|
||||
"""Returns the proper special cache setting for a value
|
||||
based on instance type.
|
||||
@@ -358,7 +345,7 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
setting_id = None
|
||||
# this value is read-only, however we *do* want to fetch its value from the database
|
||||
if not field.read_only or name == 'INSTALL_UUID':
|
||||
setting = _get_setting_from_db(self.registry, name)
|
||||
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
|
||||
if setting:
|
||||
if getattr(field, 'encrypted', False):
|
||||
value = decrypt_field(setting, 'value')
|
||||
|
||||
@@ -94,7 +94,9 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert response.data['FOO_BAR'] == 3
|
||||
@@ -110,7 +112,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
# sure that the _Forbidden validator doesn't get used for the
|
||||
# fields. See also https://github.com/ansible/awx/issues/4099.
|
||||
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request(
|
||||
'patch',
|
||||
@@ -124,7 +126,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
@@ -134,7 +136,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
|
||||
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
|
||||
@@ -153,14 +155,16 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
|
||||
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
|
||||
'foobar', func_raising_exception
|
||||
), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
), mock.patch('awx.conf.views.handle_setting_changes'):
|
||||
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
assert not response.data['FOO_BAR']
|
||||
@@ -169,7 +173,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
|
||||
@pytest.mark.django_db
|
||||
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
|
||||
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
|
||||
'awx.conf.views.clear_setting_cache'
|
||||
'awx.conf.views.handle_setting_changes'
|
||||
):
|
||||
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
|
||||
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
|
||||
def test_to_representation_valid(self, value_in, value_known):
|
||||
@@ -48,7 +48,7 @@ class TestStringListBooleanField:
|
||||
field = StringListBooleanField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_representation(value)
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
|
||||
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
|
||||
|
||||
|
||||
class TestListTuplesField:
|
||||
@@ -67,7 +67,7 @@ class TestListTuplesField:
|
||||
field = ListTuplesField()
|
||||
with pytest.raises(ValidationError) as e:
|
||||
field.to_internal_value(value)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 but got {} instead.".format(t)
|
||||
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " "but got {} instead.".format(t)
|
||||
|
||||
|
||||
class TestStringListPathField:
|
||||
|
||||
@@ -26,11 +26,10 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
|
||||
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import camelcase_to_underscore
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.main.tasks.system import handle_setting_changes
|
||||
from awx.conf.models import Setting
|
||||
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
|
||||
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
|
||||
@@ -119,10 +118,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.save(update_fields=['value'])
|
||||
settings_change_list.append(key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
instance = self.get_object()
|
||||
@@ -137,10 +133,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
|
||||
setting.delete()
|
||||
settings_change_list.append(setting.key)
|
||||
if settings_change_list:
|
||||
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
|
||||
# call notify to rsyslog. no data is need so payload is empty
|
||||
reconfigure_rsyslog.delay()
|
||||
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
|
||||
|
||||
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
|
||||
# used to make the request as a default.
|
||||
|
||||
@@ -2234,7 +2234,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
if not node_access.can_add({'reference_obj': node}):
|
||||
wj_add_perm = False
|
||||
if not wj_add_perm and self.save_messages:
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job resources required for relaunch.')
|
||||
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' 'resources required for relaunch.')
|
||||
return wj_add_perm
|
||||
|
||||
def can_cancel(self, obj):
|
||||
@@ -2952,19 +2952,3 @@ class WorkflowApprovalTemplateAccess(BaseAccess):
|
||||
for cls in BaseAccess.__subclasses__():
|
||||
access_registry[cls.model] = cls
|
||||
access_registry[UnpartitionedJobEvent] = UnpartitionedJobEventAccess
|
||||
|
||||
|
||||
def optimize_queryset(queryset):
|
||||
"""
|
||||
A utility method in case you already have a queryset and just want to
|
||||
apply the standard optimizations for that model.
|
||||
In other words, use if you do not want to start from filtered_queryset for some reason.
|
||||
"""
|
||||
if not queryset.model or queryset.model not in access_registry:
|
||||
return queryset
|
||||
access_class = access_registry[queryset.model]
|
||||
if access_class.select_related:
|
||||
queryset = queryset.select_related(*access_class.select_related)
|
||||
if access_class.prefetch_related:
|
||||
queryset = queryset.prefetch_related(*access_class.prefetch_related)
|
||||
return queryset
|
||||
|
||||
@@ -4,11 +4,11 @@ import logging
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
|
||||
@@ -65,7 +65,7 @@ class FixedSlidingWindow:
|
||||
return sum(self.buckets.values()) or 0
|
||||
|
||||
|
||||
class RelayWebsocketStatsManager:
|
||||
class BroadcastWebsocketStatsManager:
|
||||
def __init__(self, event_loop, local_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
|
||||
@@ -74,7 +74,7 @@ class RelayWebsocketStatsManager:
|
||||
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
|
||||
|
||||
def new_remote_host_stats(self, remote_hostname):
|
||||
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
|
||||
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
|
||||
return self._stats[remote_hostname]
|
||||
|
||||
def delete_remote_host_stats(self, remote_hostname):
|
||||
@@ -107,7 +107,7 @@ class RelayWebsocketStatsManager:
|
||||
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
|
||||
|
||||
|
||||
class RelayWebsocketStats:
|
||||
class BroadcastWebsocketStats:
|
||||
def __init__(self, local_hostname, remote_hostname):
|
||||
self._local_hostname = local_hostname
|
||||
self._remote_hostname = remote_hostname
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
import distro
|
||||
|
||||
from django.db import connection
|
||||
from django.db.models import Count, Min
|
||||
from django.db.models import Count
|
||||
from django.conf import settings
|
||||
from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now, timedelta
|
||||
@@ -35,7 +35,7 @@ data _since_ the last report date - i.e., new data in the last 24 hours)
|
||||
"""
|
||||
|
||||
|
||||
def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
def trivial_slicing(key, since, until, last_gather):
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
@@ -48,7 +48,7 @@ def trivial_slicing(key, since, until, last_gather, **kwargs):
|
||||
return [(last_entry, until)]
|
||||
|
||||
|
||||
def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
def four_hour_slicing(key, since, until, last_gather):
|
||||
if since is not None:
|
||||
last_entry = since
|
||||
else:
|
||||
@@ -69,54 +69,6 @@ def four_hour_slicing(key, since, until, last_gather, **kwargs):
|
||||
start = end
|
||||
|
||||
|
||||
def host_metric_slicing(key, since, until, last_gather, **kwargs):
|
||||
"""
|
||||
Slicing doesn't start 4 weeks ago, but sends whole table monthly or first time
|
||||
"""
|
||||
from awx.main.models.inventory import HostMetric
|
||||
|
||||
if since is not None:
|
||||
return [(since, until)]
|
||||
|
||||
from awx.conf.models import Setting
|
||||
|
||||
# Check if full sync should be done
|
||||
full_sync_enabled = kwargs.get('full_sync_enabled', False)
|
||||
last_entry = None
|
||||
if not full_sync_enabled:
|
||||
#
|
||||
# If not, try incremental sync first
|
||||
#
|
||||
last_entries = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_ENTRIES').first()
|
||||
last_entries = json.loads((last_entries.value if last_entries is not None else '') or '{}', object_hook=datetime_hook)
|
||||
last_entry = last_entries.get(key)
|
||||
if not last_entry:
|
||||
#
|
||||
# If not done before, switch to full sync
|
||||
#
|
||||
full_sync_enabled = True
|
||||
|
||||
if full_sync_enabled:
|
||||
#
|
||||
# Find the lowest date for full sync
|
||||
#
|
||||
min_dates = HostMetric.objects.aggregate(min_last_automation=Min('last_automation'), min_last_deleted=Min('last_deleted'))
|
||||
if min_dates['min_last_automation'] and min_dates['min_last_deleted']:
|
||||
last_entry = min(min_dates['min_last_automation'], min_dates['min_last_deleted'])
|
||||
elif min_dates['min_last_automation'] or min_dates['min_last_deleted']:
|
||||
last_entry = min_dates['min_last_automation'] or min_dates['min_last_deleted']
|
||||
|
||||
if not last_entry:
|
||||
# empty table
|
||||
return []
|
||||
|
||||
start, end = last_entry, None
|
||||
while start < until:
|
||||
end = min(start + timedelta(days=30), until)
|
||||
yield (start, end)
|
||||
start = end
|
||||
|
||||
|
||||
def _identify_lower(key, since, until, last_gather):
|
||||
from awx.conf.models import Setting
|
||||
|
||||
@@ -131,7 +83,7 @@ def _identify_lower(key, since, until, last_gather):
|
||||
return lower, last_entries
|
||||
|
||||
|
||||
@register('config', '1.6', description=_('General platform configuration.'))
|
||||
@register('config', '1.4', description=_('General platform configuration.'))
|
||||
def config(since, **kwargs):
|
||||
license_info = get_license()
|
||||
install_type = 'traditional'
|
||||
@@ -155,13 +107,10 @@ def config(since, **kwargs):
|
||||
'subscription_name': license_info.get('subscription_name'),
|
||||
'sku': license_info.get('sku'),
|
||||
'support_level': license_info.get('support_level'),
|
||||
'usage': license_info.get('usage'),
|
||||
'product_name': license_info.get('product_name'),
|
||||
'valid_key': license_info.get('valid_key'),
|
||||
'satellite': license_info.get('satellite'),
|
||||
'pool_id': license_info.get('pool_id'),
|
||||
'subscription_id': license_info.get('subscription_id'),
|
||||
'account_number': license_info.get('account_number'),
|
||||
'current_instances': license_info.get('current_instances'),
|
||||
'automated_instances': license_info.get('automated_instances'),
|
||||
'automated_since': license_info.get('automated_since'),
|
||||
@@ -170,7 +119,6 @@ def config(since, **kwargs):
|
||||
'compliant': license_info.get('compliant'),
|
||||
'date_warning': license_info.get('date_warning'),
|
||||
'date_expired': license_info.get('date_expired'),
|
||||
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
|
||||
'free_instances': license_info.get('free_instances', 0),
|
||||
'total_licensed_instances': license_info.get('instance_count', 0),
|
||||
'license_expiry': license_info.get('time_remaining', 0),
|
||||
@@ -588,25 +536,3 @@ def workflow_job_template_node_table(since, full_path, **kwargs):
|
||||
) always_nodes ON main_workflowjobtemplatenode.id = always_nodes.from_workflowjobtemplatenode_id
|
||||
ORDER BY main_workflowjobtemplatenode.id ASC) TO STDOUT WITH CSV HEADER'''
|
||||
return _copy_table(table='workflow_job_template_node', query=workflow_job_template_node_query, path=full_path)
|
||||
|
||||
|
||||
@register(
|
||||
'host_metric_table', '1.0', format='csv', description=_('Host Metric data, incremental/full sync'), expensive=host_metric_slicing, full_sync_interval=30
|
||||
)
|
||||
def host_metric_table(since, full_path, until, **kwargs):
|
||||
host_metric_query = '''COPY (SELECT main_hostmetric.id,
|
||||
main_hostmetric.hostname,
|
||||
main_hostmetric.first_automation,
|
||||
main_hostmetric.last_automation,
|
||||
main_hostmetric.last_deleted,
|
||||
main_hostmetric.deleted,
|
||||
main_hostmetric.automated_counter,
|
||||
main_hostmetric.deleted_counter,
|
||||
main_hostmetric.used_in_inventories
|
||||
FROM main_hostmetric
|
||||
WHERE (main_hostmetric.last_automation > '{}' AND main_hostmetric.last_automation <= '{}') OR
|
||||
(main_hostmetric.last_deleted > '{}' AND main_hostmetric.last_deleted <= '{}')
|
||||
ORDER BY main_hostmetric.id ASC) TO STDOUT WITH CSV HEADER'''.format(
|
||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||
)
|
||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||
|
||||
@@ -52,7 +52,7 @@ def all_collectors():
|
||||
}
|
||||
|
||||
|
||||
def register(key, version, description=None, format='json', expensive=None, full_sync_interval=None):
|
||||
def register(key, version, description=None, format='json', expensive=None):
|
||||
"""
|
||||
A decorator used to register a function as a metric collector.
|
||||
|
||||
@@ -71,7 +71,6 @@ def register(key, version, description=None, format='json', expensive=None, full
|
||||
f.__awx_analytics_description__ = description
|
||||
f.__awx_analytics_type__ = format
|
||||
f.__awx_expensive__ = expensive
|
||||
f.__awx_full_sync_interval__ = full_sync_interval
|
||||
return f
|
||||
|
||||
return decorate
|
||||
@@ -260,19 +259,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
# These slicer functions may return a generator. The `since` parameter is
|
||||
# allowed to be None, and will fall back to LAST_ENTRIES[key] or to
|
||||
# LAST_GATHER (truncated appropriately to match the 4-week limit).
|
||||
#
|
||||
# Or it can force full table sync if interval is given
|
||||
kwargs = dict()
|
||||
full_sync_enabled = False
|
||||
if func.__awx_full_sync_interval__:
|
||||
last_full_sync = last_entries.get(f"{key}_full")
|
||||
full_sync_enabled = not last_full_sync or last_full_sync < now() - timedelta(days=func.__awx_full_sync_interval__)
|
||||
|
||||
kwargs['full_sync_enabled'] = full_sync_enabled
|
||||
if func.__awx_expensive__:
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather, **kwargs)
|
||||
slices = func.__awx_expensive__(key, since, until, last_gather)
|
||||
else:
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather, **kwargs)
|
||||
slices = collectors.trivial_slicing(key, since, until, last_gather)
|
||||
|
||||
for start, end in slices:
|
||||
files = func(start, full_path=gather_dir, until=end)
|
||||
@@ -311,12 +301,6 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
succeeded = False
|
||||
logger.exception("Could not generate metric {}".format(filename))
|
||||
|
||||
# update full sync timestamp if successfully shipped
|
||||
if full_sync_enabled and collection_type != 'dry-run' and succeeded:
|
||||
with disable_activity_stream():
|
||||
last_entries[f"{key}_full"] = now()
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
@@ -375,7 +359,9 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
response = s.post(
|
||||
url, files=files, verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31)
|
||||
)
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -9,7 +9,7 @@ from django.apps import apps
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
root_key = 'awx_metrics'
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
@@ -264,6 +264,13 @@ class Metrics:
|
||||
data[field] = self.METRICS[field].decode(self.conn)
|
||||
return data
|
||||
|
||||
def store_metrics(self, data_json):
|
||||
# called when receiving metrics from other instances
|
||||
data = json.loads(data_json)
|
||||
if self.instance_name != data['instance']:
|
||||
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
|
||||
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
|
||||
|
||||
def should_pipe_execute(self):
|
||||
if self.metrics_have_changed is False:
|
||||
return False
|
||||
@@ -298,15 +305,13 @@ class Metrics:
|
||||
try:
|
||||
current_time = time.time()
|
||||
if current_time - self.previous_send_metrics.decode(self.conn) > self.send_metrics_interval:
|
||||
serialized_metrics = self.serialize_local_metrics()
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': serialized_metrics,
|
||||
'metrics': self.serialize_local_metrics(),
|
||||
}
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
# store a local copy as well
|
||||
self.store_metrics(json.dumps(payload))
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
self.previous_send_metrics.store_value(self.conn)
|
||||
finally:
|
||||
|
||||
@@ -10,7 +10,7 @@ from rest_framework import serializers
|
||||
# AWX
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@@ -795,42 +795,6 @@ register(
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'UI_NEXT',
|
||||
field_class=fields.BooleanField,
|
||||
default=False,
|
||||
label=_('Enable Preview of New User Interface'),
|
||||
help_text=_('Enable preview of new user interface.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTION_USAGE_MODEL',
|
||||
field_class=fields.ChoiceField,
|
||||
choices=[
|
||||
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
|
||||
(
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
|
||||
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
|
||||
),
|
||||
],
|
||||
default='',
|
||||
allow_blank=True,
|
||||
label=_('Defines subscription usage model and shows Host Metrics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'CLEANUP_HOST_METRICS_LAST_TS',
|
||||
field_class=fields.DateTimeField,
|
||||
label=_('Last cleanup date for HostMetrics'),
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -38,8 +38,6 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True',
|
||||
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
|
||||
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
@@ -65,7 +63,7 @@ ENV_BLOCKLIST = frozenset(
|
||||
'INVENTORY_HOSTVARS',
|
||||
'AWX_HOST',
|
||||
'PROJECT_REVISION',
|
||||
'SUPERVISOR_CONFIG_PATH',
|
||||
'SUPERVISOR_WEB_CONFIG_PATH',
|
||||
)
|
||||
)
|
||||
|
||||
@@ -108,9 +106,3 @@ JOB_VARIABLE_PREFIXES = [
|
||||
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
|
||||
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
|
||||
)
|
||||
|
||||
# Values for setting SUBSCRIPTION_USAGE_MODEL
|
||||
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
|
||||
|
||||
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
|
||||
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
|
||||
|
||||
@@ -3,7 +3,6 @@ import logging
|
||||
import time
|
||||
import hmac
|
||||
import asyncio
|
||||
import redis
|
||||
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.conf import settings
|
||||
@@ -81,7 +80,7 @@ class WebsocketSecretAuthHelper:
|
||||
WebsocketSecretAuthHelper.verify_secret(secret)
|
||||
|
||||
|
||||
class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
try:
|
||||
WebsocketSecretAuthHelper.is_authorized(self.scope)
|
||||
@@ -101,21 +100,6 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def internal_message(self, event):
|
||||
await self.send(event['text'])
|
||||
|
||||
async def receive_json(self, data):
|
||||
(group, message) = unwrap_broadcast_msg(data)
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
async def consumer_subscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
async def consumer_unsubscribe(self, event):
|
||||
await self.send_json(event)
|
||||
|
||||
|
||||
class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
async def connect(self):
|
||||
@@ -144,11 +128,6 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
@database_sync_to_async
|
||||
def user_can_see_object_id(self, user_access, oid):
|
||||
# At this point user is a channels.auth.UserLazyObject object
|
||||
@@ -197,20 +176,9 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
|
||||
self.channel_name,
|
||||
)
|
||||
|
||||
if len(old_groups):
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
|
||||
new_groups_exclusive = new_groups - current_groups
|
||||
for group_name in new_groups_exclusive:
|
||||
await self.channel_layer.group_add(group_name, self.channel_name)
|
||||
|
||||
await self.channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
|
||||
)
|
||||
self.scope['session']['groups'] = new_groups
|
||||
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
|
||||
|
||||
@@ -232,11 +200,9 @@ def _dump_payload(payload):
|
||||
return None
|
||||
|
||||
|
||||
def unwrap_broadcast_msg(payload: dict):
|
||||
return (payload['group'], payload['message'])
|
||||
|
||||
|
||||
def emit_channel_notification(group, payload):
|
||||
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
|
||||
|
||||
payload_dumped = _dump_payload(payload)
|
||||
if payload_dumped is None:
|
||||
return
|
||||
@@ -246,6 +212,16 @@ def emit_channel_notification(group, payload):
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
group,
|
||||
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
|
||||
{"type": "internal.message", "text": payload_dumped},
|
||||
)
|
||||
)
|
||||
|
||||
run_sync(
|
||||
channel_layer.group_send(
|
||||
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
|
||||
{
|
||||
"type": "internal.message",
|
||||
"text": wrap_broadcast_msg(group, payload_dumped),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -54,12 +54,6 @@ aim_inputs = {
|
||||
'help_text': _('Lookup query for the object. Ex: Safe=TestSafe;Object=testAccountName123'),
|
||||
},
|
||||
{'id': 'object_query_format', 'label': _('Object Query Format'), 'type': 'string', 'default': 'Exact', 'choices': ['Exact', 'Regexp']},
|
||||
{
|
||||
'id': 'object_property',
|
||||
'label': _('Object Property'),
|
||||
'type': 'string',
|
||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
||||
},
|
||||
{
|
||||
'id': 'reason',
|
||||
'label': _('Reason'),
|
||||
@@ -80,7 +74,6 @@ def aim_backend(**kwargs):
|
||||
app_id = kwargs['app_id']
|
||||
object_query = kwargs['object_query']
|
||||
object_query_format = kwargs['object_query_format']
|
||||
object_property = kwargs.get('object_property', '')
|
||||
reason = kwargs.get('reason', None)
|
||||
if webservice_id == '':
|
||||
webservice_id = 'AIMWebService'
|
||||
@@ -105,18 +98,7 @@ def aim_backend(**kwargs):
|
||||
allow_redirects=False,
|
||||
)
|
||||
raise_for_status(res)
|
||||
# CCP returns the property name capitalized, username is camel case
|
||||
# so we need to handle that case
|
||||
if object_property == '':
|
||||
object_property = 'Content'
|
||||
elif object_property.lower() == 'username':
|
||||
object_property = 'UserName'
|
||||
elif object_property not in res:
|
||||
raise KeyError('Property {} not found in object'.format(object_property))
|
||||
else:
|
||||
object_property = object_property.capitalize()
|
||||
|
||||
return res.json()[object_property]
|
||||
return res.json()['Content']
|
||||
|
||||
|
||||
aim_plugin = CredentialPlugin('CyberArk Central Credential Provider Lookup', inputs=aim_inputs, backend=aim_backend)
|
||||
|
||||
@@ -35,14 +35,8 @@ dsv_inputs = {
|
||||
'type': 'string',
|
||||
'help_text': _('The secret path e.g. /test/secret1'),
|
||||
},
|
||||
{
|
||||
'id': 'secret_field',
|
||||
'label': _('Secret Field'),
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -58,5 +52,5 @@ if settings.DEBUG:
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path']),
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
from thycotic.secrets.server import PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -17,12 +17,6 @@ tss_inputs = {
|
||||
'help_text': _('The (Application) user username'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'domain',
|
||||
'label': _('Domain'),
|
||||
'help_text': _('The (Application) user domain'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'password',
|
||||
'label': _('Password'),
|
||||
@@ -50,10 +44,7 @@ tss_inputs = {
|
||||
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
if 'domain' in kwargs:
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
secret_dict = secret_server.get_secret(kwargs['secret_id'])
|
||||
secret = ServerSecret(**secret_dict)
|
||||
|
||||
@@ -63,7 +63,7 @@ class RecordedQueryLog(object):
|
||||
if not os.path.isdir(self.dest):
|
||||
os.makedirs(self.dest)
|
||||
progname = ' '.join(sys.argv)
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
|
||||
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
|
||||
if match in progname:
|
||||
progname = match
|
||||
break
|
||||
@@ -87,7 +87,7 @@ class RecordedQueryLog(object):
|
||||
)
|
||||
log.commit()
|
||||
log.execute(
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) ' 'VALUES (?, ?, ?, ?, ?, ?, ?);',
|
||||
(os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt),
|
||||
)
|
||||
log.commit()
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import os
|
||||
import psycopg2
|
||||
import select
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from awx.settings.application_name import get_application_name
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import connection as pg_connection
|
||||
|
||||
|
||||
NOT_READY = ([], [], [])
|
||||
|
||||
|
||||
@@ -16,29 +14,6 @@ def get_local_queuename():
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
|
||||
def get_task_queuename():
|
||||
if os.getenv('AWX_COMPONENT') != 'web':
|
||||
return settings.CLUSTER_HOST_ID
|
||||
|
||||
from awx.main.models.ha import Instance
|
||||
|
||||
random_task_instance = (
|
||||
Instance.objects.filter(
|
||||
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
|
||||
node_state=Instance.States.READY,
|
||||
enabled=True,
|
||||
)
|
||||
.only('hostname')
|
||||
.order_by('?')
|
||||
.first()
|
||||
)
|
||||
|
||||
if random_task_instance is None:
|
||||
raise ValueError('No task instances are READY and Enabled.')
|
||||
|
||||
return random_task_instance.hostname
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, conn):
|
||||
self.conn = conn
|
||||
@@ -85,11 +60,10 @@ def pg_bus_conn(new_connection=False):
|
||||
'''
|
||||
|
||||
if new_connection:
|
||||
conf = settings.DATABASES['default'].copy()
|
||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||
# Modify the application name to distinguish from other connections the process might use
|
||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(
|
||||
dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {})
|
||||
)
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on by default
|
||||
conn.set_session(autocommit=True)
|
||||
else:
|
||||
|
||||
@@ -6,7 +6,7 @@ from django.conf import settings
|
||||
from django.db import connection
|
||||
import redis
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
from . import pg_bus_conn
|
||||
|
||||
@@ -21,7 +21,7 @@ class Control(object):
|
||||
if service not in self.services:
|
||||
raise RuntimeError('{} must be in {}'.format(service, self.services))
|
||||
self.service = service
|
||||
self.queuename = host or get_task_queuename()
|
||||
self.queuename = host or get_local_queuename()
|
||||
|
||||
def status(self, *args, **kwargs):
|
||||
r = redis.Redis.from_url(settings.BROKER_URL)
|
||||
|
||||
@@ -10,7 +10,6 @@ from django_guid import set_guid
|
||||
from django_guid.utils import generate_guid
|
||||
|
||||
from awx.main.dispatch.worker import TaskWorker
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
@@ -22,9 +21,6 @@ class Scheduler(Scheduler):
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
logger.warning('periodic beat started')
|
||||
|
||||
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
|
||||
|
||||
while True:
|
||||
if os.getppid() != ppid:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
|
||||
@@ -79,11 +79,9 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
|
||||
else:
|
||||
hostname = instance.hostname
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
base_Q = Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
if ref_time:
|
||||
jobs = UnifiedJob.objects.filter(base_Q & Q(started__lte=ref_time))
|
||||
else:
|
||||
jobs = UnifiedJob.objects.filter(base_Q)
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
Q(status='running', modified__lte=ref_time) & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
|
||||
@@ -18,7 +18,6 @@ from django.conf import settings
|
||||
from awx.main.dispatch.pool import WorkerPool
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.utils.db import set_connection_name
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -220,7 +219,6 @@ class BaseWorker(object):
|
||||
def work_loop(self, queue, finished, idx, *args):
|
||||
ppid = os.getppid()
|
||||
signal_handler = WorkerSignalHandler()
|
||||
set_connection_name('worker') # set application_name to distinguish from other dispatcher processes
|
||||
while not signal_handler.kill_now:
|
||||
# if the parent PID changes, this process has been orphaned
|
||||
# via e.g., segfault or sigkill, we should exit too
|
||||
|
||||
@@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
|
||||
`awx.main.dispatch.publish`.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def resolve_callable(task):
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
@@ -46,8 +46,7 @@ class TaskWorker(BaseWorker):
|
||||
|
||||
return _call
|
||||
|
||||
@staticmethod
|
||||
def run_callable(body):
|
||||
def run_callable(self, body):
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
"""
|
||||
|
||||
@@ -800,7 +800,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
|
||||
def validate_env_var_allowed(self, env_var):
|
||||
if env_var.startswith('ANSIBLE_'):
|
||||
raise django_exceptions.ValidationError(
|
||||
_('Environment variable {} may affect Ansible configuration so its use is not allowed in credentials.').format(env_var),
|
||||
_('Environment variable {} may affect Ansible configuration so its ' 'use is not allowed in credentials.').format(env_var),
|
||||
code='invalid',
|
||||
params={'value': env_var},
|
||||
)
|
||||
@@ -954,16 +954,6 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
|
||||
def get_queryset(self):
|
||||
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
|
||||
|
||||
def add(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().add(*objects)
|
||||
|
||||
def remove(self, *objects):
|
||||
if len(objects) > 1:
|
||||
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
|
||||
return super().remove(*objects)
|
||||
|
||||
return OrderedManyRelatedManager
|
||||
|
||||
return add_custom_queryset_to_many_related_manager(
|
||||
@@ -981,12 +971,13 @@ class OrderedManyToManyField(models.ManyToManyField):
|
||||
by a special `position` column on the M2M table
|
||||
"""
|
||||
|
||||
def _update_m2m_position(self, sender, instance, action, **kwargs):
|
||||
if action in ('post_add', 'post_remove'):
|
||||
descriptor = getattr(instance, self.name)
|
||||
order_with_respect_to = descriptor.source_field_name
|
||||
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})):
|
||||
def _update_m2m_position(self, sender, **kwargs):
|
||||
if kwargs.get('action') in ('post_add', 'post_remove'):
|
||||
order_with_respect_to = None
|
||||
for field in sender._meta.local_fields:
|
||||
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
|
||||
order_with_respect_to = field.name
|
||||
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
|
||||
if ig.position != i:
|
||||
ig.position = i
|
||||
ig.save()
|
||||
|
||||
@@ -23,7 +23,7 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
@@ -152,7 +152,7 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
|
||||
@@ -44,7 +44,7 @@ class Command(BaseCommand):
|
||||
'- To list all (now deprecated) custom virtual environments run:',
|
||||
'awx-manage list_custom_venvs',
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- Run these commands with `-q` to remove tool tips.',
|
||||
|
||||
@@ -13,7 +13,7 @@ class Command(BaseCommand):
|
||||
Deprovision a cluster node
|
||||
"""
|
||||
|
||||
help = 'Remove instance from the database. Specify `--hostname` to use this command.'
|
||||
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -32,7 +31,5 @@ class Command(BaseCommand):
|
||||
else:
|
||||
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
|
||||
|
||||
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
|
||||
|
||||
def handle(self, **options):
|
||||
self._enable_disable_auth(options.get('enable'), options.get('disable'))
|
||||
|
||||
@@ -1,230 +1,53 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
import datetime
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.analytics.collectors import config
|
||||
from awx.main.models.inventory import HostMetric
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import tarfile
|
||||
import csv
|
||||
|
||||
CSV_PREFERRED_ROW_COUNT = 500000
|
||||
BATCHED_FETCH_COUNT = 10000
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'This is for offline licensing usage'
|
||||
|
||||
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'hostname',
|
||||
'first_automation',
|
||||
'last_automation',
|
||||
'last_deleted',
|
||||
'automated_counter',
|
||||
'deleted_counter',
|
||||
'deleted',
|
||||
'used_in_inventories',
|
||||
).order_by('first_automation')[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
|
||||
list_of_queryset = list(
|
||||
result.values(
|
||||
'id',
|
||||
'date',
|
||||
'license_consumed',
|
||||
'license_capacity',
|
||||
'hosts_added',
|
||||
'hosts_deleted',
|
||||
'indirectly_managed_hosts',
|
||||
).order_by(
|
||||
'date'
|
||||
)[offset : offset + limit]
|
||||
)
|
||||
|
||||
return list_of_queryset
|
||||
|
||||
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
|
||||
offset = 0
|
||||
list_of_queryset = []
|
||||
while True:
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
else:
|
||||
yield list_of_queryset
|
||||
|
||||
offset += len(list_of_queryset)
|
||||
|
||||
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
|
||||
if type == 'host_metric':
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_queryset(result, offset, fetch_count)
|
||||
elif type == 'host_metric_summary_monthly':
|
||||
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
|
||||
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
|
||||
|
||||
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
|
||||
with open(csv_file, mode, newline='') as output_file:
|
||||
try:
|
||||
keys = list_of_queryset[0].keys() if list_of_queryset else []
|
||||
dict_writer = csv.DictWriter(output_file, keys)
|
||||
if always_header or first_write:
|
||||
dict_writer.writeheader()
|
||||
dict_writer.writerows(list_of_queryset)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
|
||||
csv_file = f'{temp_dir}/{type}{index+1}.csv'
|
||||
arcname_file = f'{type}{index+1}.csv'
|
||||
|
||||
first_write = True if index == 0 else False
|
||||
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
|
||||
csv_iteration = 1
|
||||
|
||||
offset = 0
|
||||
rows_written_per_csv = 0
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
|
||||
while True:
|
||||
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
|
||||
|
||||
if not list_of_queryset:
|
||||
break
|
||||
|
||||
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
|
||||
arcname_file = f'{type}{csv_iteration}.csv'
|
||||
self.write_to_csv(csv_file, list_of_queryset, always_header)
|
||||
|
||||
offset += to_fetch
|
||||
rows_written_per_csv += to_fetch
|
||||
always_header = False
|
||||
|
||||
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
|
||||
|
||||
if not remaining_rows_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
rows_written_per_csv = 0
|
||||
always_header = True
|
||||
to_fetch = BATCHED_FETCH_COUNT
|
||||
csv_iteration += 1
|
||||
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
|
||||
to_fetch = remaining_rows_per_csv
|
||||
|
||||
if rows_written_per_csv:
|
||||
yield csv_file, arcname_file
|
||||
|
||||
def config_for_tar(self, options, temp_dir):
|
||||
config_json = json.dumps(config(options.get('since')))
|
||||
config_file = f'{temp_dir}/config.json'
|
||||
arcname_file = 'config.json'
|
||||
with open(config_file, 'w') as f:
|
||||
f.write(config_json)
|
||||
return config_file, arcname_file
|
||||
|
||||
def output_json(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
|
||||
csv_file = csv_detail[0]
|
||||
|
||||
with open(csv_file) as f:
|
||||
reader = csv.DictReader(f)
|
||||
rows = list(reader)
|
||||
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
|
||||
def output_csv(self, options, filter_kwargs):
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
|
||||
csv_file = csv_detail[0]
|
||||
with open(csv_file) as f:
|
||||
sys.stdout.write(f.read())
|
||||
|
||||
def output_tarball(self, options, filter_kwargs):
|
||||
always_header = True
|
||||
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
|
||||
|
||||
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
|
||||
|
||||
if rows_per_file <= BATCHED_FETCH_COUNT:
|
||||
csv_function = self.csv_for_tar
|
||||
else:
|
||||
csv_function = self.csv_for_tar_batched_fetch
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
|
||||
tar.add(csv_detail[0], arcname=csv_detail[1])
|
||||
|
||||
config_file, arcname_file = self.config_for_tar(options, temp_dir)
|
||||
tar.add(config_file, arcname=arcname_file)
|
||||
|
||||
tar.close()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly')
|
||||
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
|
||||
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
|
||||
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
|
||||
parser.add_argument('--json', action='store_true', help='Select output as JSON')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
since = options.get('since')
|
||||
until = options.get('until')
|
||||
|
||||
if since is None and until is None:
|
||||
print("No Arguments received")
|
||||
return None
|
||||
|
||||
if since is not None and since.tzinfo is None:
|
||||
since = since.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
if until is not None and until.tzinfo is None:
|
||||
until = until.replace(tzinfo=datetime.timezone.utc)
|
||||
|
||||
filter_kwargs = {}
|
||||
if since is not None:
|
||||
filter_kwargs['last_automation__gte'] = since
|
||||
if until is not None:
|
||||
filter_kwargs['last_automation__lte'] = until
|
||||
|
||||
filter_kwargs_host_metrics_summary = {}
|
||||
if since is not None:
|
||||
filter_kwargs_host_metrics_summary['date__gte'] = since
|
||||
|
||||
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
|
||||
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
|
||||
return
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
|
||||
# if --json flag is set, output the result in json format
|
||||
if options['json']:
|
||||
self.output_json(options, filter_kwargs)
|
||||
elif options['csv']:
|
||||
self.output_csv(options, filter_kwargs)
|
||||
elif options['tarball']:
|
||||
self.output_tarball(options, filter_kwargs)
|
||||
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
|
||||
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
|
||||
print(json_result)
|
||||
|
||||
# --json flag is not set, output in plain text
|
||||
else:
|
||||
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:")
|
||||
result = HostMetric.objects.filter(**filter_kwargs)
|
||||
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
|
||||
for item in list_of_queryset:
|
||||
print(f"Total Number of hosts automated: {len(result)}")
|
||||
for item in result:
|
||||
print(
|
||||
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
|
||||
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation']
|
||||
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
@@ -458,19 +458,12 @@ class Command(BaseCommand):
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
all_obj = self.inventory
|
||||
db_variables = all_obj.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
if db_variables != all_obj.variables_dict:
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
logger.debug('Inventory variables updated from "all" group')
|
||||
else:
|
||||
logger.debug('Inventory variables unmodified')
|
||||
@@ -529,32 +522,16 @@ class Command(BaseCommand):
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
db_variables = db_host.variables_dict
|
||||
mem_variables = mem_host.variables
|
||||
update_fields = []
|
||||
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
mem_variables.pop(var.format(prefix), None)
|
||||
|
||||
if self.overwrite_vars:
|
||||
db_variables = mem_variables
|
||||
db_variables = mem_host.variables
|
||||
else:
|
||||
db_variables.update(mem_variables)
|
||||
|
||||
db_variables.update(mem_host.variables)
|
||||
update_fields = []
|
||||
if db_variables != db_host.variables_dict:
|
||||
db_host.variables = json.dumps(db_variables)
|
||||
update_fields.append('variables')
|
||||
# Update host enabled flag.
|
||||
enabled = self._get_enabled(mem_variables)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None and db_host.enabled != enabled:
|
||||
db_host.enabled = enabled
|
||||
update_fields.append('enabled')
|
||||
@@ -563,6 +540,12 @@ class Command(BaseCommand):
|
||||
old_name = db_host.name
|
||||
db_host.name = mem_host.name
|
||||
update_fields.append('name')
|
||||
# Update host instance_id.
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if instance_id != db_host.instance_id:
|
||||
old_instance_id = db_host.instance_id
|
||||
db_host.instance_id = instance_id
|
||||
update_fields.append('instance_id')
|
||||
# Update host and display message(s) on what changed.
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
@@ -671,19 +654,13 @@ class Command(BaseCommand):
|
||||
mem_host = self.all_group.all_hosts[mem_host_name]
|
||||
import_vars = mem_host.variables
|
||||
host_desc = import_vars.pop('_awx_description', 'imported')
|
||||
host_attrs = dict(description=host_desc)
|
||||
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
|
||||
enabled = self._get_enabled(mem_host.variables)
|
||||
if enabled is not None:
|
||||
host_attrs['enabled'] = enabled
|
||||
if self.instance_id_var:
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
host_attrs['instance_id'] = instance_id
|
||||
if self.inventory.kind == 'constructed':
|
||||
# remote towervars so the constructed hosts do not have extra variables
|
||||
for prefix in ('host', 'tower'):
|
||||
for var in ('remote_{}_enabled', 'remote_{}_id'):
|
||||
import_vars.pop(var.format(prefix), None)
|
||||
host_attrs['variables'] = json.dumps(import_vars)
|
||||
try:
|
||||
sanitize_jinja(mem_host_name)
|
||||
except ValueError as e:
|
||||
|
||||
@@ -22,7 +22,7 @@ class Command(BaseCommand):
|
||||
'# Discovered Virtual Environments:',
|
||||
'\n'.join(venvs),
|
||||
'',
|
||||
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
|
||||
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
|
||||
'awx-manage export_custom_venv /path/to/venv',
|
||||
'',
|
||||
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',
|
||||
|
||||
@@ -44,18 +44,16 @@ class Command(BaseCommand):
|
||||
|
||||
for x in ig.instances.all():
|
||||
color = '\033[92m'
|
||||
end_color = '\033[0m'
|
||||
if x.capacity == 0 and x.node_type != 'hop':
|
||||
color = '\033[91m'
|
||||
if not x.enabled:
|
||||
color = '\033[90m[DISABLED] '
|
||||
if no_color:
|
||||
color = ''
|
||||
end_color = ''
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}\033[0m')
|
||||
|
||||
print()
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
|
||||
logger = logging.getLogger('awx.main.cache_clear')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Cache Clear
|
||||
Runs as a management command and starts a daemon that listens for a pg_notify message to clear the cache.
|
||||
"""
|
||||
|
||||
help = 'Launch the cache clear daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("tower_settings_change")
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
body = json.loads(e.payload)
|
||||
logger.info(f"Cache clear request received. Clearing now, payload: {e.payload}")
|
||||
TaskWorker.run_callable(body)
|
||||
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in cache clear main loop')
|
||||
raise
|
||||
@@ -8,7 +8,7 @@ from django.core.cache import cache as django_cache
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.db import connection as django_connection
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@@ -76,7 +76,7 @@ class Command(BaseCommand):
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
queues = ['tower_broadcast_all', get_local_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.run_heartbeet')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Launch the web server beacon (heartbeet)'
|
||||
|
||||
def print_banner(self):
|
||||
heartbeet = r"""
|
||||
********** **********
|
||||
************* *************
|
||||
*****************************
|
||||
***********HEART***********
|
||||
*************************
|
||||
*******************
|
||||
*************** _._
|
||||
*********** /`._ `'. __
|
||||
******* \ .\| \ _'` `)
|
||||
*** (``_) \| ).'` /`- /
|
||||
* `\ `;\_ `\\//`-'` /
|
||||
\ `'.'.| / __/`
|
||||
`'--v_|/`'`
|
||||
__||-._
|
||||
/'` `-`` `'\\
|
||||
/ .'` )
|
||||
\ BEET ' )
|
||||
\. /
|
||||
'. /'`
|
||||
`) |
|
||||
//
|
||||
'(.
|
||||
`\`.
|
||||
``"""
|
||||
print(heartbeet)
|
||||
|
||||
def construct_payload(self, action='online'):
|
||||
payload = {
|
||||
'hostname': settings.CLUSTER_HOST_ID,
|
||||
'ip': os.environ.get('MY_POD_IP'),
|
||||
'action': action,
|
||||
}
|
||||
return json.dumps(payload)
|
||||
|
||||
def notify_listener_and_exit(self, *args):
|
||||
with pg_bus_conn(new_connection=False) as conn:
|
||||
conn.notify('web_heartbeet', self.construct_payload(action='offline'))
|
||||
sys.exit(0)
|
||||
|
||||
def do_hearbeat_loop(self):
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
while True:
|
||||
logger.debug('Sending heartbeat')
|
||||
conn.notify('web_heartbeet', self.construct_payload())
|
||||
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
self.print_banner()
|
||||
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
|
||||
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
|
||||
|
||||
# Note: We don't really try any reconnect logic to pg_notify here,
|
||||
# just let supervisor restart if we fail.
|
||||
self.do_hearbeat_loop()
|
||||
@@ -1,41 +0,0 @@
|
||||
import logging
|
||||
import json
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from django.core.cache import cache
|
||||
from awx.main.dispatch import pg_bus_conn
|
||||
from awx.main.dispatch.worker.task import TaskWorker
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
|
||||
logger = logging.getLogger('awx.main.rsyslog_configurer')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Rsyslog Configurer
|
||||
Runs as a management command and starts rsyslog configurer daemon. Daemon listens
|
||||
for pg_notify then calls reconfigure_rsyslog
|
||||
"""
|
||||
|
||||
help = 'Launch the rsyslog_configurer daemon'
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
try:
|
||||
with pg_bus_conn(new_connection=True) as conn:
|
||||
conn.listen("rsyslog_configurer")
|
||||
# reconfigure rsyslog on start up
|
||||
reconfigure_rsyslog()
|
||||
for e in conn.events(yield_timeouts=True):
|
||||
if e is not None:
|
||||
logger.info("Change in logging settings found. Restarting rsyslogd")
|
||||
# clear the cache of relevant settings then restart
|
||||
setting_keys = [k for k in dir(settings) if k.startswith('LOG_AGGREGATOR')]
|
||||
cache.delete_many(setting_keys)
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
body = json.loads(e.payload)
|
||||
TaskWorker.run_callable(body)
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in rsyslog_configurer main loop')
|
||||
raise
|
||||
@@ -13,13 +13,13 @@ from django.db import connection
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
|
||||
from awx.main.analytics.broadcast_websocket import (
|
||||
RelayWebsocketStatsManager,
|
||||
BroadcastWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
from awx.main.wsbroadcast import BroadcastWebsocketManager
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
logger = logging.getLogger('awx.main.wsbroadcast')
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -98,9 +98,8 @@ class Command(BaseCommand):
|
||||
try:
|
||||
executor = MigrationExecutor(connection)
|
||||
migrating = bool(executor.migration_plan(executor.loader.graph.leaf_nodes()))
|
||||
connection.close() # Because of async nature, main loop will use new connection, so close this
|
||||
except Exception as exc:
|
||||
logger.warning(f'Error on startup of run_wsrelay (error: {exc}), retry in 10s...')
|
||||
logger.info(f'Error on startup of run_wsbroadcast (error: {exc}), retry in 10s...')
|
||||
time.sleep(10)
|
||||
return
|
||||
|
||||
@@ -131,9 +130,9 @@ class Command(BaseCommand):
|
||||
|
||||
if options.get('status'):
|
||||
try:
|
||||
stats_all = RelayWebsocketStatsManager.get_stats_sync()
|
||||
stats_all = BroadcastWebsocketStatsManager.get_stats_sync()
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
print(f"Unable to get Relay Websocket Status. Failed to connect to redis {e}")
|
||||
print(f"Unable to get Broadcast Websocket Status. Failed to connect to redis {e}")
|
||||
return
|
||||
|
||||
data = {}
|
||||
@@ -152,19 +151,22 @@ class Command(BaseCommand):
|
||||
host_stats = Command.get_connection_status(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'Relay websocket connection status from "{my_hostname}" to:')
|
||||
print(f'Broadcast websocket connection status from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
host_stats = Command.get_connection_stats(hostnames, data)
|
||||
lines = Command._format_lines(host_stats)
|
||||
|
||||
print(f'\nRelay websocket connection stats from "{my_hostname}" to:')
|
||||
print(f'\nBroadcast websocket connection stats from "{my_hostname}" to:')
|
||||
print('\n'.join(lines))
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
broadcast_websocket_mgr = BroadcastWebsocketManager()
|
||||
task = broadcast_websocket_mgr.start()
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(task)
|
||||
except KeyboardInterrupt:
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
logger.debug('Terminating Websocket Broadcaster')
|
||||
@@ -79,11 +79,6 @@ class HostManager(models.Manager):
|
||||
return qs
|
||||
|
||||
|
||||
class HostMetricActiveManager(models.Manager):
|
||||
def get_queryset(self):
|
||||
return super().get_queryset().filter(deleted=False)
|
||||
|
||||
|
||||
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
|
||||
# Create IG mapping by union of all groups their instances are members of
|
||||
ig_ig_mapping = {}
|
||||
|
||||
@@ -122,7 +122,7 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
field_class=fields.DictField,
|
||||
read_only=True,
|
||||
label=_('Formats of all available named urls'),
|
||||
help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'),
|
||||
help_text=_('Read-only list of key-value pairs that shows the standard format of all ' 'available named URLs.'),
|
||||
category=_('Named URL'),
|
||||
category_slug='named-url',
|
||||
)
|
||||
|
||||
@@ -12,17 +12,22 @@ def migrate_event_data(apps, schema_editor):
|
||||
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
|
||||
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
|
||||
with connection.cursor() as cursor:
|
||||
# This loop used to do roughly the following:
|
||||
# Rename the table to _old_<tablename>
|
||||
# Create a new table form the old table (it would have no rows)
|
||||
# Drop the old sequnce and create a new on tied to the new table and set the sequence to the last number from the old table
|
||||
# This used to work with postgres spitting out a NOTICE and DETAIL
|
||||
# With the django 4.2 upgrade that changed to an ERROR and HINT
|
||||
# By the time we hit the 4.2 upgrade, no one should be upgrading a database this old directly to this new schema
|
||||
# So we no longer really care about having to do all of this work, we only need a table with a bigint ID field
|
||||
# And this can be achieved by just changing the id column type...
|
||||
# rename the current event table
|
||||
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _old_{tblname};')
|
||||
# create a *new* table with the same schema
|
||||
cursor.execute(f'CREATE TABLE {tblname} (LIKE _old_{tblname} INCLUDING ALL);')
|
||||
# alter the *new* table so that the primary key is a big int
|
||||
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
|
||||
|
||||
# recreate counter for the new table's primary key to
|
||||
# start where the *old* table left off (we have to do this because the
|
||||
# counter changed from an int to a bigint)
|
||||
cursor.execute(f'DROP SEQUENCE IF EXISTS "{tblname}_id_seq" CASCADE;')
|
||||
cursor.execute(f'CREATE SEQUENCE "{tblname}_id_seq";')
|
||||
cursor.execute(f'ALTER TABLE "{tblname}" ALTER COLUMN "id" ' f"SET DEFAULT nextval('{tblname}_id_seq');")
|
||||
cursor.execute(f"SELECT setval('{tblname}_id_seq', (SELECT MAX(id) FROM _old_{tblname}), true);")
|
||||
cursor.execute(f'DROP TABLE _old_{tblname};')
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
def database_forwards(self, *args):
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# Generated by Django 3.2.16 on 2023-03-16 15:16
|
||||
from django.db import migrations
|
||||
|
||||
from awx.main.migrations._credentialtypes import migrate_credential_type
|
||||
from awx.main.models import CredentialType
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
def update_cyberark_plugin_names(apps, schema_editor):
|
||||
CredentialType.setup_tower_managed_defaults(apps)
|
||||
migrate_credential_type(apps, 'aim')
|
||||
migrate_credential_type(apps, 'conjur')
|
||||
|
||||
dependencies = [
|
||||
('main', '0178_instance_group_admin_migration'),
|
||||
]
|
||||
|
||||
operations = [migrations.RunPython(update_cyberark_plugin_names)]
|
||||
@@ -1,43 +0,0 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-03 09:40
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0179_change_cyberark_plugin_names'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(model_name='hostmetric', name='hostname', field=models.CharField(max_length=512, primary_key=False, serialize=True, unique=True)),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='last_deleted',
|
||||
field=models.DateTimeField(db_index=True, null=True, help_text='When the host was last deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='automated_counter',
|
||||
field=models.BigIntegerField(default=0, help_text='How many times was the host automated'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted_counter',
|
||||
field=models.IntegerField(default=0, help_text='How many times was the host deleted'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='deleted',
|
||||
field=models.BooleanField(
|
||||
default=False, help_text='Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric',
|
||||
name='used_in_inventories',
|
||||
field=models.IntegerField(null=True, help_text='How many inventories contain this host'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='hostmetric', name='id', field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
),
|
||||
]
|
||||
@@ -1,33 +0,0 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-10 12:26
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0180_add_hostmetric_fields'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='HostMetricSummaryMonthly',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('date', models.DateField(unique=True)),
|
||||
('license_consumed', models.BigIntegerField(default=0, help_text='How many unique hosts are consumed from the license')),
|
||||
('license_capacity', models.BigIntegerField(default=0, help_text="'License capacity as max. number of unique hosts")),
|
||||
(
|
||||
'hosts_added',
|
||||
models.IntegerField(default=0, help_text='How many hosts were added in the associated month, consuming more license capacity'),
|
||||
),
|
||||
(
|
||||
'hosts_deleted',
|
||||
models.IntegerField(default=0, help_text='How many hosts were deleted in the associated month, freeing the license capacity'),
|
||||
),
|
||||
(
|
||||
'indirectly_managed_hosts',
|
||||
models.IntegerField(default=0, help_text='Manually entered number indirectly managed hosts for a certain month'),
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
@@ -1,138 +0,0 @@
|
||||
# Generated by Django 3.2.16 on 2022-12-07 14:20
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0181_hostmetricsummarymonthly'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryConstructedInventoryMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
(
|
||||
'constructed_inventory',
|
||||
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory', related_name='constructed_inventory_memberships'),
|
||||
),
|
||||
('input_inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.inventory')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='input_inventories',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
help_text='Only valid for constructed inventories, this links to the inventories that will be used.',
|
||||
related_name='destination_inventories',
|
||||
through='main.InventoryConstructedInventoryMembership',
|
||||
to='main.Inventory',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventory',
|
||||
name='kind',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('', 'Hosts have a direct link to this inventory.'),
|
||||
('smart', 'Hosts for inventory generated using the host_filter property.'),
|
||||
('constructed', 'Parse list of source inventories with the constructed inventory plugin.'),
|
||||
],
|
||||
default='',
|
||||
help_text='Kind of inventory being represented.',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='limit',
|
||||
field=models.TextField(blank=True, default='', help_text='Enter host, group or pattern match'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='host_filter',
|
||||
field=models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobhostsummary',
|
||||
name='constructed_host',
|
||||
field=models.ForeignKey(
|
||||
default=None,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.SET_NULL,
|
||||
related_name='constructed_host_summaries',
|
||||
to='main.host',
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,30 +0,0 @@
|
||||
# Generated by Django 3.2.16 on 2023-04-21 14:15
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_ID_TOKEN_MODEL),
|
||||
('main', '0182_constructed_inventory'),
|
||||
('oauth2_provider', '0005_auto_20211222_2352'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='oauth2accesstoken',
|
||||
name='id_token',
|
||||
field=models.OneToOneField(
|
||||
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='access_token', to=settings.OAUTH2_PROVIDER_ID_TOKEN_MODEL
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='oauth2application',
|
||||
name='algorithm',
|
||||
field=models.CharField(
|
||||
blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5
|
||||
),
|
||||
),
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,7 @@ logger = logging.getLogger('awx.main.migrations')
|
||||
def migrate_org_admin_to_use(apps, schema_editor):
|
||||
logger.info('Initiated migration from Org admin to use role')
|
||||
roles_added = 0
|
||||
for org in Organization.objects.prefetch_related('admin_role__members').iterator(chunk_size=1000):
|
||||
for org in Organization.objects.prefetch_related('admin_role__members').iterator():
|
||||
igs = list(org.instance_groups.all())
|
||||
if not igs:
|
||||
continue
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import logging
|
||||
|
||||
from awx.main.models import CredentialType
|
||||
from django.db.models import Q
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
DEPRECATED_CRED_KIND = {
|
||||
'rax': {
|
||||
@@ -79,14 +76,3 @@ def add_tower_verify_field(apps, schema_editor):
|
||||
def remove_become_methods(apps, schema_editor):
|
||||
# this is no longer necessary; schemas are defined in code
|
||||
pass
|
||||
|
||||
|
||||
def migrate_credential_type(apps, namespace):
|
||||
ns_types = apps.get_model('main', 'CredentialType').objects.filter(namespace=namespace).order_by('created')
|
||||
if ns_types.count() == 2:
|
||||
original, renamed = ns_types.all()
|
||||
logger.info(f'There are credential types to migrate in the "{namespace}" namespace: {original.name}')
|
||||
apps.get_model('main', 'Credential').objects.filter(credential_type_id=original.id).update(credential_type_id=renamed.id)
|
||||
|
||||
logger.info(f'Removing old credential type: {renamed.name}')
|
||||
original.delete()
|
||||
|
||||
@@ -158,7 +158,7 @@ class ec2(PluginFileInjector):
|
||||
return {
|
||||
# vars that change
|
||||
'ec2_block_devices': (
|
||||
"dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))"
|
||||
"dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings " "| map(attribute='ebs.volume_id') | list))"
|
||||
),
|
||||
'ec2_dns_name': 'public_dns_name',
|
||||
'ec2_group_name': 'placement.group_name',
|
||||
@@ -635,7 +635,7 @@ class satellite6(PluginFileInjector):
|
||||
"environment": {
|
||||
"prefix": "{}environment_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | " "regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')",
|
||||
},
|
||||
"location": {
|
||||
"prefix": "{}location_".format(group_prefix),
|
||||
@@ -656,7 +656,7 @@ class satellite6(PluginFileInjector):
|
||||
"content_view": {
|
||||
"prefix": "{}content_view_".format(group_prefix),
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | " "lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -16,9 +16,7 @@ from awx.main.models.inventory import ( # noqa
|
||||
Group,
|
||||
Host,
|
||||
HostMetric,
|
||||
HostMetricSummaryMonthly,
|
||||
Inventory,
|
||||
InventoryConstructedInventoryMembership,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
|
||||
@@ -91,7 +91,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
related_name='credentials',
|
||||
null=False,
|
||||
on_delete=models.CASCADE,
|
||||
help_text=_('Specify the type of credential you want to create. Refer to the documentation for details on each type.'),
|
||||
help_text=_('Specify the type of credential you want to create. Refer ' 'to the documentation for details on each type.'),
|
||||
)
|
||||
managed = models.BooleanField(default=False, editable=False)
|
||||
organization = models.ForeignKey(
|
||||
@@ -103,7 +103,7 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
related_name='credentials',
|
||||
)
|
||||
inputs = CredentialInputField(
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.')
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||
)
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
@@ -195,9 +195,6 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
@cached_property
|
||||
def dynamic_input_fields(self):
|
||||
# if the credential is not yet saved we can't access the input_sources
|
||||
if not self.id:
|
||||
return []
|
||||
return [obj.input_field_name for obj in self.input_sources.all()]
|
||||
|
||||
def _password_field_allows_ask(self, field):
|
||||
@@ -346,12 +343,12 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
managed = models.BooleanField(default=False, editable=False)
|
||||
namespace = models.CharField(max_length=1024, null=True, default=None, editable=False)
|
||||
inputs = CredentialTypeInputField(
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. Refer to the documentation for example syntax.')
|
||||
blank=True, default=dict, help_text=_('Enter inputs using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.')
|
||||
)
|
||||
injectors = CredentialTypeInjectorField(
|
||||
blank=True,
|
||||
default=dict,
|
||||
help_text=_('Enter injectors using either JSON or YAML syntax. Refer to the documentation for example syntax.'),
|
||||
help_text=_('Enter injectors using either JSON or YAML syntax. ' 'Refer to the documentation for example syntax.'),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -605,7 +602,9 @@ ManagedCredentialType(
|
||||
'id': 'become_method',
|
||||
'label': gettext_noop('Privilege Escalation Method'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('Specify a method for "become" operations. This is equivalent to specifying the --become-method Ansible parameter.'),
|
||||
'help_text': gettext_noop(
|
||||
'Specify a method for "become" operations. This is ' 'equivalent to specifying the --become-method ' 'Ansible parameter.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'become_username',
|
||||
@@ -747,7 +746,7 @@ ManagedCredentialType(
|
||||
'id': 'host',
|
||||
'label': gettext_noop('Host (Authentication URL)'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('The host to authenticate with. For example, https://openstack.business.com/v2.0/'),
|
||||
'help_text': gettext_noop('The host to authenticate with. For example, ' 'https://openstack.business.com/v2.0/'),
|
||||
},
|
||||
{
|
||||
'id': 'project',
|
||||
@@ -798,7 +797,7 @@ ManagedCredentialType(
|
||||
'id': 'host',
|
||||
'label': gettext_noop('VCenter Host'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('Enter the hostname or IP address that corresponds to your VMware vCenter.'),
|
||||
'help_text': gettext_noop('Enter the hostname or IP address that corresponds ' 'to your VMware vCenter.'),
|
||||
},
|
||||
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
|
||||
{
|
||||
@@ -823,7 +822,7 @@ ManagedCredentialType(
|
||||
'id': 'host',
|
||||
'label': gettext_noop('Satellite 6 URL'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('Enter the URL that corresponds to your Red Hat Satellite 6 server. For example, https://satellite.example.org'),
|
||||
'help_text': gettext_noop('Enter the URL that corresponds to your Red Hat ' 'Satellite 6 server. For example, https://satellite.example.org'),
|
||||
},
|
||||
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
|
||||
{
|
||||
@@ -848,7 +847,7 @@ ManagedCredentialType(
|
||||
'id': 'username',
|
||||
'label': gettext_noop('Service Account Email Address'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('The email address assigned to the Google Compute Engine service account.'),
|
||||
'help_text': gettext_noop('The email address assigned to the Google Compute ' 'Engine service account.'),
|
||||
},
|
||||
{
|
||||
'id': 'project',
|
||||
@@ -868,7 +867,7 @@ ManagedCredentialType(
|
||||
'format': 'ssh_private_key',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
'help_text': gettext_noop('Paste the contents of the PEM file associated with the service account email.'),
|
||||
'help_text': gettext_noop('Paste the contents of the PEM file associated ' 'with the service account email.'),
|
||||
},
|
||||
],
|
||||
'required': ['username', 'ssh_key_data'],
|
||||
@@ -886,7 +885,7 @@ ManagedCredentialType(
|
||||
'id': 'subscription',
|
||||
'label': gettext_noop('Subscription ID'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('Subscription ID is an Azure construct, which is mapped to a username.'),
|
||||
'help_text': gettext_noop('Subscription ID is an Azure construct, which is ' 'mapped to a username.'),
|
||||
},
|
||||
{'id': 'username', 'label': gettext_noop('Username'), 'type': 'string'},
|
||||
{
|
||||
@@ -907,7 +906,7 @@ ManagedCredentialType(
|
||||
'id': 'cloud_environment',
|
||||
'label': gettext_noop('Azure Cloud Environment'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when using Azure GovCloud or Azure stack.'),
|
||||
'help_text': gettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when' ' using Azure GovCloud or Azure stack.'),
|
||||
},
|
||||
],
|
||||
'required': ['subscription'],
|
||||
@@ -1038,7 +1037,7 @@ ManagedCredentialType(
|
||||
'label': gettext_noop('Username'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop(
|
||||
'Red Hat Ansible Automation Platform username id to authenticate as.This should not be set if an OAuth token is being used.'
|
||||
'Red Hat Ansible Automation Platform username id to authenticate as.' 'This should not be set if an OAuth token is being used.'
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -1052,7 +1051,7 @@ ManagedCredentialType(
|
||||
'label': gettext_noop('OAuth Token'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'help_text': gettext_noop('An OAuth token to use to authenticate with.This should not be set if username/password are being used.'),
|
||||
'help_text': gettext_noop('An OAuth token to use to authenticate with.' 'This should not be set if username/password are being used.'),
|
||||
},
|
||||
{'id': 'verify_ssl', 'label': gettext_noop('Verify SSL'), 'type': 'boolean', 'secret': False},
|
||||
],
|
||||
@@ -1163,7 +1162,7 @@ ManagedCredentialType(
|
||||
'id': 'auth_url',
|
||||
'label': gettext_noop('Auth Server URL'),
|
||||
'type': 'string',
|
||||
'help_text': gettext_noop('The URL of a Keycloak server token_endpoint, if using SSO auth.'),
|
||||
'help_text': gettext_noop('The URL of a Keycloak server token_endpoint, if using ' 'SSO auth.'),
|
||||
},
|
||||
{
|
||||
'id': 'token',
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import datetime
|
||||
from datetime import timezone
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import models, DatabaseError
|
||||
from django.db.models.functions import Cast
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.text import Truncator
|
||||
from django.utils.timezone import now
|
||||
from django.utils.timezone import utc, now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.encoding import force_str
|
||||
|
||||
@@ -423,7 +421,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=timezone.utc)
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
@@ -433,7 +431,7 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
if not isinstance(kwargs['job_created'], datetime.datetime):
|
||||
kwargs['job_created'] = parse_datetime(kwargs['job_created'])
|
||||
if not kwargs['job_created'].tzinfo:
|
||||
kwargs['job_created'] = kwargs['job_created'].replace(tzinfo=timezone.utc)
|
||||
kwargs['job_created'] = kwargs['job_created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('job_created', None)
|
||||
|
||||
@@ -471,11 +469,11 @@ class JobEvent(BasePlaybookEvent):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
indexes = [
|
||||
models.Index(fields=['job', 'job_created', 'event']),
|
||||
models.Index(fields=['job', 'job_created', 'uuid']),
|
||||
models.Index(fields=['job', 'job_created', 'parent_uuid']),
|
||||
models.Index(fields=['job', 'job_created', 'counter']),
|
||||
index_together = [
|
||||
('job', 'job_created', 'event'),
|
||||
('job', 'job_created', 'uuid'),
|
||||
('job', 'job_created', 'parent_uuid'),
|
||||
('job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
@@ -538,38 +536,25 @@ class JobEvent(BasePlaybookEvent):
|
||||
return
|
||||
job = self.job
|
||||
|
||||
from awx.main.models import Host, JobHostSummary # circular import
|
||||
|
||||
if self.job.inventory.kind == 'constructed':
|
||||
all_hosts = Host.objects.filter(id__in=self.job.inventory.hosts.values_list(Cast('instance_id', output_field=models.IntegerField()))).only(
|
||||
'id', 'name'
|
||||
)
|
||||
constructed_host_map = self.host_map
|
||||
host_map = {host.name: host.id for host in all_hosts}
|
||||
else:
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
constructed_host_map = {}
|
||||
host_map = self.host_map
|
||||
from awx.main.models import Host, JobHostSummary, HostMetric # circular import
|
||||
|
||||
all_hosts = Host.objects.filter(pk__in=self.host_map.values()).only('id', 'name')
|
||||
existing_host_ids = set(h.id for h in all_hosts)
|
||||
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = host_map.get(host)
|
||||
host_id = self.host_map.get(host, None)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
constructed_host_id = constructed_host_map.get(host)
|
||||
host_stats = {}
|
||||
for stat in ('changed', 'dark', 'failures', 'ignored', 'ok', 'processed', 'rescued', 'skipped'):
|
||||
try:
|
||||
host_stats[stat] = self.event_data.get(stat, {}).get(host, 0)
|
||||
except AttributeError: # in case event_data[stat] isn't a dict.
|
||||
pass
|
||||
summary = JobHostSummary(
|
||||
created=now(), modified=now(), job_id=job.id, host_id=host_id, constructed_host_id=constructed_host_id, host_name=host, **host_stats
|
||||
)
|
||||
summary = JobHostSummary(created=now(), modified=now(), job_id=job.id, host_id=host_id, host_name=host, **host_stats)
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
@@ -590,26 +575,12 @@ class JobEvent(BasePlaybookEvent):
|
||||
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@staticmethod
|
||||
def _update_host_metrics(updated_hosts_list):
|
||||
from awx.main.models import HostMetric # circular import
|
||||
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
# bulk-update
|
||||
batch_start, batch_size = 0, 1000
|
||||
while batch_start <= len(updated_hosts_list):
|
||||
batched_host_list = updated_hosts_list[batch_start : (batch_start + batch_size)]
|
||||
HostMetric.objects.filter(hostname__in=batched_host_list).update(
|
||||
last_automation=current_time, automated_counter=models.F('automated_counter') + 1, deleted=False
|
||||
# bulk-create
|
||||
current_time = now()
|
||||
HostMetric.objects.bulk_create(
|
||||
[HostMetric(hostname=hostname, last_automation=current_time) for hostname in updated_hosts_list], ignore_conflicts=True, batch_size=100
|
||||
)
|
||||
batch_start += batch_size
|
||||
HostMetric.objects.filter(hostname__in=updated_hosts_list).update(last_automation=current_time)
|
||||
|
||||
@property
|
||||
def job_verbosity(self):
|
||||
@@ -633,10 +604,10 @@ class ProjectUpdateEvent(BasePlaybookEvent):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('pk',)
|
||||
indexes = [
|
||||
models.Index(fields=['project_update', 'job_created', 'event']),
|
||||
models.Index(fields=['project_update', 'job_created', 'uuid']),
|
||||
models.Index(fields=['project_update', 'job_created', 'counter']),
|
||||
index_together = [
|
||||
('project_update', 'job_created', 'event'),
|
||||
('project_update', 'job_created', 'uuid'),
|
||||
('project_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
@@ -735,7 +706,7 @@ class BaseCommandEvent(CreatedModifiedModel):
|
||||
if not isinstance(kwargs['created'], datetime.datetime):
|
||||
kwargs['created'] = parse_datetime(kwargs['created'])
|
||||
if not kwargs['created'].tzinfo:
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=timezone.utc)
|
||||
kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
|
||||
except (KeyError, ValueError):
|
||||
kwargs.pop('created', None)
|
||||
|
||||
@@ -771,10 +742,10 @@ class AdHocCommandEvent(BaseCommandEvent):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
indexes = [
|
||||
models.Index(fields=['ad_hoc_command', 'job_created', 'event']),
|
||||
models.Index(fields=['ad_hoc_command', 'job_created', 'uuid']),
|
||||
models.Index(fields=['ad_hoc_command', 'job_created', 'counter']),
|
||||
index_together = [
|
||||
('ad_hoc_command', 'job_created', 'event'),
|
||||
('ad_hoc_command', 'job_created', 'uuid'),
|
||||
('ad_hoc_command', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
EVENT_TYPES = [
|
||||
@@ -876,9 +847,9 @@ class InventoryUpdateEvent(BaseCommandEvent):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
indexes = [
|
||||
models.Index(fields=['inventory_update', 'job_created', 'uuid']),
|
||||
models.Index(fields=['inventory_update', 'job_created', 'counter']),
|
||||
index_together = [
|
||||
('inventory_update', 'job_created', 'uuid'),
|
||||
('inventory_update', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
@@ -921,9 +892,9 @@ class SystemJobEvent(BaseCommandEvent):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ('-pk',)
|
||||
indexes = [
|
||||
models.Index(fields=['system_job', 'job_created', 'uuid']),
|
||||
models.Index(fields=['system_job', 'job_created', 'counter']),
|
||||
index_together = [
|
||||
('system_job', 'job_created', 'uuid'),
|
||||
('system_job', 'job_created', 'counter'),
|
||||
]
|
||||
|
||||
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
|
||||
|
||||
@@ -9,8 +9,6 @@ import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -19,7 +17,6 @@ from django.db import models, connection
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.db import transaction
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.urls import resolve
|
||||
from django.utils.timezone import now
|
||||
from django.db.models import Q
|
||||
|
||||
@@ -35,7 +32,7 @@ from awx.main.fields import (
|
||||
SmartFilterField,
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.managers import HostManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
@@ -52,25 +49,15 @@ from awx.main.models.notifications import (
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.utils.execution_environments import to_container_path, get_control_plane_execution_environment
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership', 'HostMetric', 'HostMetricSummaryMonthly']
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'SmartInventoryMembership']
|
||||
|
||||
logger = logging.getLogger('awx.main.models.inventory')
|
||||
|
||||
|
||||
class InventoryConstructedInventoryMembership(models.Model):
|
||||
constructed_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE, related_name='constructed_inventory_memberships')
|
||||
input_inventory = models.ForeignKey('Inventory', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
"""
|
||||
an inventory source contains lists and hosts.
|
||||
@@ -80,7 +67,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
KIND_CHOICES = [
|
||||
('', _('Hosts have a direct link to this inventory.')),
|
||||
('smart', _('Hosts for inventory generated using the host_filter property.')),
|
||||
('constructed', _('Parse list of source inventories with the constructed inventory plugin.')),
|
||||
]
|
||||
|
||||
class Meta:
|
||||
@@ -106,28 +92,28 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
has_active_failures = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Flag indicating whether any hosts in this inventory have failed.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. ' 'Flag indicating whether any hosts in this inventory have failed.'),
|
||||
)
|
||||
total_hosts = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Total number of hosts in this inventory.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. ' 'Total number of hosts in this inventory.'),
|
||||
)
|
||||
hosts_with_active_failures = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Number of hosts in this inventory with active failures.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. ' 'Number of hosts in this inventory with active failures.'),
|
||||
)
|
||||
total_groups = models.PositiveIntegerField(
|
||||
default=0,
|
||||
editable=False,
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Total number of groups in this inventory.'),
|
||||
help_text=_('This field is deprecated and will be removed in a future release. ' 'Total number of groups in this inventory.'),
|
||||
)
|
||||
has_inventory_sources = models.BooleanField(
|
||||
default=False,
|
||||
editable=False,
|
||||
help_text=_(
|
||||
'This field is deprecated and will be removed in a future release. Flag indicating whether this inventory has any external inventory sources.'
|
||||
'This field is deprecated and will be removed in a future release. ' 'Flag indicating whether this inventory has any external inventory sources.'
|
||||
),
|
||||
)
|
||||
total_inventory_sources = models.PositiveIntegerField(
|
||||
@@ -153,14 +139,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
default=None,
|
||||
help_text=_('Filter that will be applied to the hosts of this inventory.'),
|
||||
)
|
||||
input_inventories = OrderedManyToManyField(
|
||||
'Inventory',
|
||||
blank=True,
|
||||
through_fields=('constructed_inventory', 'input_inventory'),
|
||||
related_name='destination_inventories',
|
||||
help_text=_('Only valid for constructed inventories, this links to the inventories that will be used.'),
|
||||
through='InventoryConstructedInventoryMembership',
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
blank=True,
|
||||
@@ -209,14 +187,6 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
if request is not None:
|
||||
# circular import
|
||||
from awx.api.urls.inventory import constructed_inventory_urls
|
||||
|
||||
route = resolve(request.path_info)
|
||||
if any(route.url_name == url.name for url in constructed_inventory_urls):
|
||||
return reverse('api:constructed_inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
return reverse('api:inventory_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
variables_dict = VarsDictProperty('variables')
|
||||
@@ -368,12 +338,13 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for host in hosts:
|
||||
data['_meta']['hostvars'][host.name] = host.variables_dict
|
||||
if towervars:
|
||||
for prefix in ('host', 'tower'):
|
||||
tower_dict = {
|
||||
f'remote_{prefix}_enabled': str(host.enabled).lower(),
|
||||
f'remote_{prefix}_id': host.id,
|
||||
}
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
tower_dict = dict(
|
||||
remote_tower_enabled=str(host.enabled).lower(),
|
||||
remote_tower_id=host.id,
|
||||
remote_host_enabled=str(host.enabled).lower(),
|
||||
remote_host_id=host.id,
|
||||
)
|
||||
data['_meta']['hostvars'][host.name].update(tower_dict)
|
||||
|
||||
return data
|
||||
|
||||
@@ -424,7 +395,7 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
for t in tasks:
|
||||
t.task_impact = t._get_task_impact()
|
||||
UnifiedJob.objects.bulk_update(tasks, ['task_impact'])
|
||||
logger.debug("Finished updating inventory computed fields, pk={0}, in {1:.3f} seconds".format(self.pk, time.time() - start_time))
|
||||
logger.debug("Finished updating inventory computed fields, pk={0}, in " "{1:.3f} seconds".format(self.pk, time.time() - start_time))
|
||||
|
||||
def websocket_emit_status(self, status):
|
||||
connection.on_commit(
|
||||
@@ -460,24 +431,12 @@ class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
|
||||
connection.on_commit(on_commit)
|
||||
|
||||
def _enforce_constructed_source(self):
|
||||
"""
|
||||
Constructed inventory should always have exactly 1 inventory source, constructed type
|
||||
this enforces that requirement
|
||||
"""
|
||||
if self.kind == 'constructed':
|
||||
if not self.inventory_sources.exists():
|
||||
self.inventory_sources.create(
|
||||
source='constructed', name=f'Auto-created source for: {self.name}'[:512], overwrite=True, overwrite_vars=True, update_on_launch=True
|
||||
)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
super(Inventory, self).save(*args, **kwargs)
|
||||
if self.kind == 'smart' and 'host_filter' in kwargs.get('update_fields', ['host_filter']) and connection.vendor != 'sqlite':
|
||||
# Minimal update of host_count for smart inventory host filter changes
|
||||
self.update_computed_fields()
|
||||
self._enforce_constructed_source()
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
self._update_host_smart_inventory_memeberships()
|
||||
@@ -861,64 +820,9 @@ class Group(CommonModelNameNotUnique, RelatedJobsMixin):
|
||||
|
||||
|
||||
class HostMetric(models.Model):
|
||||
hostname = models.CharField(unique=True, max_length=512)
|
||||
hostname = models.CharField(primary_key=True, max_length=512)
|
||||
first_automation = models.DateTimeField(auto_now_add=True, null=False, db_index=True, help_text=_('When the host was first automated against'))
|
||||
last_automation = models.DateTimeField(db_index=True, help_text=_('When the host was last automated against'))
|
||||
last_deleted = models.DateTimeField(null=True, db_index=True, help_text=_('When the host was last deleted'))
|
||||
automated_counter = models.BigIntegerField(default=0, help_text=_('How many times was the host automated'))
|
||||
deleted_counter = models.IntegerField(default=0, help_text=_('How many times was the host deleted'))
|
||||
deleted = models.BooleanField(
|
||||
default=False, help_text=_('Boolean flag saying whether the host is deleted and therefore not counted into the subscription consumption')
|
||||
)
|
||||
used_in_inventories = models.IntegerField(null=True, help_text=_('How many inventories contain this host'))
|
||||
|
||||
objects = models.Manager()
|
||||
active_objects = HostMetricActiveManager()
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:host_metric_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def soft_delete(self):
|
||||
if not self.deleted:
|
||||
self.deleted_counter = (self.deleted_counter or 0) + 1
|
||||
self.last_deleted = now()
|
||||
self.deleted = True
|
||||
self.save(update_fields=['deleted', 'deleted_counter', 'last_deleted'])
|
||||
|
||||
def soft_restore(self):
|
||||
if self.deleted:
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
||||
"""
|
||||
|
||||
date = models.DateField(unique=True)
|
||||
license_consumed = models.BigIntegerField(default=0, help_text=_("How many unique hosts are consumed from the license"))
|
||||
license_capacity = models.BigIntegerField(default=0, help_text=_("'License capacity as max. number of unique hosts"))
|
||||
hosts_added = models.IntegerField(default=0, help_text=_("How many hosts were added in the associated month, consuming more license capacity"))
|
||||
hosts_deleted = models.IntegerField(default=0, help_text=_("How many hosts were deleted in the associated month, freeing the license capacity"))
|
||||
indirectly_managed_hosts = models.IntegerField(default=0, help_text=("Manually entered number indirectly managed hosts for a certain month"))
|
||||
|
||||
|
||||
class InventorySourceOptions(BaseModel):
|
||||
@@ -930,7 +834,6 @@ class InventorySourceOptions(BaseModel):
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('file', _('File, Directory or Script')),
|
||||
('constructed', _('Template additional groups and hostvars at runtime')),
|
||||
('scm', _('Sourced from a Project')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
@@ -1010,7 +913,7 @@ class InventorySourceOptions(BaseModel):
|
||||
host_filter = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_('This field is deprecated and will be removed in a future release. Regex where only matching hosts will be imported.'),
|
||||
help_text=_('Regex where only matching hosts will be imported.'),
|
||||
)
|
||||
overwrite = models.BooleanField(
|
||||
default=False,
|
||||
@@ -1030,21 +933,6 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default=1,
|
||||
)
|
||||
limit = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text=_("Enter host, group or pattern match"),
|
||||
)
|
||||
|
||||
def resolve_execution_environment(self):
|
||||
"""
|
||||
Project updates, themselves, will use the control plane execution environment.
|
||||
Jobs using the project can use the default_environment, but the project updates
|
||||
are not flexible enough to allow customizing the image they use.
|
||||
"""
|
||||
if self.inventory.kind == 'constructed':
|
||||
return get_control_plane_execution_environment()
|
||||
return super().resolve_execution_environment()
|
||||
|
||||
@staticmethod
|
||||
def cloud_credential_validation(source, cred):
|
||||
@@ -1055,16 +943,16 @@ class InventorySourceOptions(BaseModel):
|
||||
# the actual inventory source being used (Amazon requires Amazon
|
||||
# credentials; Rackspace requires Rackspace credentials; etc...)
|
||||
if source.replace('ec2', 'aws') != cred.kind:
|
||||
return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source
|
||||
return _('Cloud-based inventory sources (such as %s) require ' 'credentials for the matching cloud service.') % source
|
||||
# Allow an EC2 source to omit the credential. If Tower is running on
|
||||
# an EC2 instance with an IAM Role assigned, boto will use credentials
|
||||
# from the instance metadata instead of those explicitly provided.
|
||||
elif source in CLOUD_PROVIDERS and source != 'ec2':
|
||||
return _('Credential is required for a cloud source.')
|
||||
elif source == 'custom' and cred and cred.credential_type.kind in ('scm', 'ssh', 'insights', 'vault'):
|
||||
return _('Credentials of type machine, source control, insights and vault are disallowed for custom inventory sources.')
|
||||
return _('Credentials of type machine, source control, insights and vault are ' 'disallowed for custom inventory sources.')
|
||||
elif source == 'scm' and cred and cred.credential_type.kind in ('insights', 'vault'):
|
||||
return _('Credentials of type insights and vault are disallowed for scm inventory sources.')
|
||||
return _('Credentials of type insights and vault are ' 'disallowed for scm inventory sources.')
|
||||
return None
|
||||
|
||||
def get_cloud_credential(self):
|
||||
@@ -1479,8 +1367,8 @@ class PluginFileInjector(object):
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
@@ -1664,18 +1552,5 @@ class insights(PluginFileInjector):
|
||||
use_fqcn = True
|
||||
|
||||
|
||||
class constructed(PluginFileInjector):
|
||||
plugin_name = 'constructed'
|
||||
namespace = 'ansible'
|
||||
collection = 'builtin'
|
||||
|
||||
def build_env(self, *args, **kwargs):
|
||||
env = super().build_env(*args, **kwargs)
|
||||
# Enable script inventory plugin so we pick up the script files from source inventories
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] += ',script'
|
||||
env['ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED'] = 'True'
|
||||
return env
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
InventorySourceOptions.injectors[cls.__name__] = cls
|
||||
|
||||
@@ -2,8 +2,12 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import codecs
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
@@ -11,9 +15,11 @@ from urllib.parse import urljoin
|
||||
from django.conf import settings
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
from django.db.models.functions import Cast
|
||||
from django.db.models.query import QuerySet
|
||||
|
||||
# from django.core.cache import cache
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
|
||||
@@ -22,7 +28,6 @@ from rest_framework.exceptions import ParseError
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
@@ -39,7 +44,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic, log_excess_runtime
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
@@ -55,6 +60,8 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.models.jobs')
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
__all__ = ['JobTemplate', 'JobLaunchConfig', 'Job', 'JobHostSummary', 'SystemJobTemplate', 'SystemJob']
|
||||
|
||||
@@ -101,7 +108,7 @@ class JobOptions(BaseModel):
|
||||
max_length=1024,
|
||||
default='',
|
||||
blank=True,
|
||||
help_text=_('Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.'),
|
||||
help_text=_('Branch to use in job run. Project default used if blank. ' 'Only allowed if project allow_override field is set to true.'),
|
||||
)
|
||||
forks = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
@@ -253,7 +260,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1."),
|
||||
help_text=_("The number of jobs to slice into at runtime. " "Will cause the Job Template to launch a workflow if value is greater than 1."),
|
||||
)
|
||||
|
||||
admin_role = ImplicitRoleField(parent_role=['organization.job_template_admin_role'])
|
||||
@@ -571,7 +578,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
)
|
||||
hosts = models.ManyToManyField('Host', related_name='jobs', editable=False, through='JobHostSummary', through_fields=('job', 'host'))
|
||||
hosts = models.ManyToManyField(
|
||||
'Host',
|
||||
related_name='jobs',
|
||||
editable=False,
|
||||
through='JobHostSummary',
|
||||
)
|
||||
artifacts = JSONBlob(
|
||||
default=dict,
|
||||
blank=True,
|
||||
@@ -596,12 +608,12 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
job_slice_number = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=0,
|
||||
help_text=_("If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used."),
|
||||
help_text=_("If part of a sliced job, the ID of the inventory slice operated on. " "If not part of sliced job, parameter is not used."),
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
help_text=_("If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job."),
|
||||
help_text=_("If ran as part of sliced jobs, the total number of slices. " "If 1, job is not part of a sliced job."),
|
||||
)
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
@@ -819,9 +831,6 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
for name in JOB_VARIABLE_PREFIXES:
|
||||
r['{}_job_template_id'.format(name)] = self.job_template.pk
|
||||
r['{}_job_template_name'.format(name)] = self.job_template.name
|
||||
if self.execution_node:
|
||||
for name in JOB_VARIABLE_PREFIXES:
|
||||
r['{}_execution_node'.format(name)] = self.execution_node
|
||||
return r
|
||||
|
||||
'''
|
||||
@@ -836,26 +845,109 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
def get_notification_friendly_name(self):
|
||||
return "Job"
|
||||
|
||||
def get_hosts_for_fact_cache(self):
|
||||
"""
|
||||
Builds the queryset to use for writing or finalizing the fact cache
|
||||
these need to be the 'real' hosts associated with the job.
|
||||
For constructed inventories, that means the original (input inventory) hosts
|
||||
when slicing, that means only returning hosts in that slice
|
||||
"""
|
||||
Host = JobHostSummary._meta.get_field('host').related_model
|
||||
if not self.inventory_id:
|
||||
return Host.objects.none()
|
||||
def _get_inventory_hosts(self, only=('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id'), **filters):
|
||||
"""Return value is an iterable for the relevant hosts for this job"""
|
||||
if not self.inventory:
|
||||
return []
|
||||
host_queryset = self.inventory.hosts.only(*only)
|
||||
if filters:
|
||||
host_queryset = host_queryset.filter(**filters)
|
||||
host_queryset = self.inventory.get_sliced_hosts(host_queryset, self.job_slice_number, self.job_slice_count)
|
||||
if isinstance(host_queryset, QuerySet):
|
||||
return host_queryset.iterator()
|
||||
return host_queryset
|
||||
|
||||
if self.inventory.kind == 'constructed':
|
||||
id_field = Host._meta.get_field('id')
|
||||
host_qs = Host.objects.filter(id__in=self.inventory.hosts.exclude(instance_id='').values_list(Cast('instance_id', output_field=id_field)))
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Job {job_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_job_fact_cache(self, destination, log_data, timeout=None):
|
||||
self.log_lifecycle("start_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['written_ct'] = 0
|
||||
os.makedirs(destination, mode=0o700)
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
if timeout > 0:
|
||||
# exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
|
||||
timeout = now() - datetime.timedelta(seconds=timeout)
|
||||
hosts = self._get_inventory_hosts(ansible_facts_modified__gte=timeout)
|
||||
else:
|
||||
host_qs = self.inventory.hosts
|
||||
hosts = self._get_inventory_hosts()
|
||||
|
||||
host_qs = host_qs.only(*HOST_FACTS_FIELDS)
|
||||
host_qs = self.inventory.get_sliced_hosts(host_qs, self.job_slice_number, self.job_slice_count)
|
||||
return host_qs
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Job {job_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_job_fact_cache(self, destination, facts_write_time, log_data):
|
||||
self.log_lifecycle("finish_job_fact_cache")
|
||||
log_data['job_id'] = self.id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
hosts_to_update = []
|
||||
for host in self._get_inventory_hosts():
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=self.id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
self.inventory.hosts.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
class LaunchTimeConfigBase(BaseModel):
|
||||
@@ -1077,15 +1169,6 @@ class JobHostSummary(CreatedModifiedModel):
|
||||
editable=False,
|
||||
)
|
||||
host = models.ForeignKey('Host', related_name='job_host_summaries', null=True, default=None, on_delete=models.SET_NULL, editable=False)
|
||||
constructed_host = models.ForeignKey(
|
||||
'Host',
|
||||
related_name='constructed_host_summaries',
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
editable=False,
|
||||
help_text='Only for jobs run against constructed inventories, this links to the host inside the constructed inventory.',
|
||||
)
|
||||
|
||||
host_name = models.CharField(
|
||||
max_length=1024,
|
||||
|
||||
@@ -675,4 +675,4 @@ class WebhookMixin(models.Model):
|
||||
if response.status_code < 400:
|
||||
logger.debug("Webhook status update sent.")
|
||||
else:
|
||||
logger.error("Posting webhook status failed, code: {}\n" "{}\nPayload sent: {}".format(response.status_code, response.text, json.dumps(data)))
|
||||
logger.error("Posting webhook status failed, code: {}\n" "{}\n" "Payload sent: {}".format(response.status_code, response.text, json.dumps(data)))
|
||||
|
||||
@@ -284,7 +284,7 @@ class JobNotificationMixin(object):
|
||||
'workflow_url',
|
||||
'scm_branch',
|
||||
'artifacts',
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark', 'processed', 'rescued', 'ignored']},
|
||||
{'host_status_counts': ['skipped', 'ok', 'changed', 'failed', 'failures', 'dark' 'processed', 'rescued', 'ignored']},
|
||||
{
|
||||
'summary_fields': [
|
||||
{
|
||||
|
||||
@@ -74,7 +74,7 @@ class ProjectOptions(models.Model):
|
||||
return []
|
||||
|
||||
local_path = models.CharField(
|
||||
max_length=1024, blank=True, help_text=_('Local path (relative to PROJECTS_ROOT) containing playbooks and related files for this project.')
|
||||
max_length=1024, blank=True, help_text=_('Local path (relative to PROJECTS_ROOT) containing ' 'playbooks and related files for this project.')
|
||||
)
|
||||
|
||||
scm_type = models.CharField(
|
||||
@@ -276,11 +276,11 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
|
||||
scm_update_cache_timeout = models.PositiveIntegerField(
|
||||
default=0,
|
||||
blank=True,
|
||||
help_text=_('The number of seconds after the last project update ran that a new project update will be launched as a job dependency.'),
|
||||
help_text=_('The number of seconds after the last project update ran that a new ' 'project update will be launched as a job dependency.'),
|
||||
)
|
||||
allow_override = models.BooleanField(
|
||||
default=False,
|
||||
help_text=_('Allow changing the SCM branch or revision in a job template that uses this project.'),
|
||||
help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
|
||||
)
|
||||
|
||||
# credential (keys) used to validate content signature
|
||||
|
||||
@@ -141,7 +141,7 @@ class Role(models.Model):
|
||||
app_label = 'main'
|
||||
verbose_name_plural = _('roles')
|
||||
db_table = 'main_rbac_roles'
|
||||
indexes = [models.Index(fields=["content_type", "object_id"])]
|
||||
index_together = [("content_type", "object_id")]
|
||||
ordering = ("content_type", "object_id")
|
||||
|
||||
role_field = models.TextField(null=False)
|
||||
@@ -447,10 +447,10 @@ class RoleAncestorEntry(models.Model):
|
||||
app_label = 'main'
|
||||
verbose_name_plural = _('role_ancestors')
|
||||
db_table = 'main_rbac_role_ancestors'
|
||||
indexes = [
|
||||
models.Index(fields=["ancestor", "content_type_id", "object_id"]), # used by get_roles_on_resource
|
||||
models.Index(fields=["ancestor", "content_type_id", "role_field"]), # used by accessible_objects
|
||||
models.Index(fields=["ancestor", "descendent"]), # used by rebuild_role_ancestor_list in the NOT EXISTS clauses.
|
||||
index_together = [
|
||||
("ancestor", "content_type_id", "object_id"), # used by get_roles_on_resource
|
||||
("ancestor", "content_type_id", "role_field"), # used by accessible_objects
|
||||
("ancestor", "descendent"), # used by rebuild_role_ancestor_list in the NOT EXISTS clauses.
|
||||
]
|
||||
|
||||
descendent = models.ForeignKey(Role, null=False, on_delete=models.CASCADE, related_name='+')
|
||||
|
||||
@@ -32,7 +32,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
from awx.main.models.mixins import ResourceMixin, TaskManagerUnifiedJobMixin, ExecutionEnvironmentMixin
|
||||
@@ -1567,7 +1567,7 @@ class UnifiedJob(
|
||||
return r
|
||||
|
||||
def get_queue_name(self):
|
||||
return self.controller_node or self.execution_node or get_task_queuename()
|
||||
return self.controller_node or self.execution_node or get_local_queuename()
|
||||
|
||||
@property
|
||||
def is_container_group_task(self):
|
||||
|
||||
@@ -82,7 +82,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
|
||||
related_name='%(class)ss_always',
|
||||
)
|
||||
all_parents_must_converge = models.BooleanField(
|
||||
default=False, help_text=_("If enabled then the node will only run if all of the parent nodes have met the criteria to reach this node")
|
||||
default=False, help_text=_("If enabled then the node will only run if all of the parent nodes " "have met the criteria to reach this node")
|
||||
)
|
||||
unified_job_template = models.ForeignKey(
|
||||
'UnifiedJobTemplate',
|
||||
@@ -181,7 +181,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
|
||||
max_length=512,
|
||||
default=uuid4,
|
||||
blank=False,
|
||||
help_text=_('An identifier for this node that is unique within its workflow. It is copied to workflow job nodes corresponding to this node.'),
|
||||
help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'),
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
@@ -334,7 +334,7 @@ class WorkflowJobNode(WorkflowNodeBase):
|
||||
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**node_prompts_data)
|
||||
if errors:
|
||||
logger.info(
|
||||
_('Bad launch configuration starting template {template_pk} as part of workflow {workflow_pk}. Errors:\n{error_text}').format(
|
||||
_('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format(
|
||||
template_pk=ujt_obj.pk, workflow_pk=self.pk, error_text=errors
|
||||
)
|
||||
)
|
||||
@@ -647,7 +647,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
null=True,
|
||||
default=None,
|
||||
on_delete=models.SET_NULL,
|
||||
help_text=_("If automatically created for a sliced job run, the job template the workflow job was created from."),
|
||||
help_text=_("If automatically created for a sliced job run, the job template " "the workflow job was created from."),
|
||||
)
|
||||
is_sliced_job = models.BooleanField(default=False)
|
||||
is_bulk_job = models.BooleanField(default=False)
|
||||
@@ -714,7 +714,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
wj = self.get_workflow_job()
|
||||
while wj and wj.workflow_job_template_id:
|
||||
if wj.pk in wj_ids:
|
||||
logger.critical('Cycles detected in the workflow jobs graph, this is not normal and suggests task manager degeneracy.')
|
||||
logger.critical('Cycles detected in the workflow jobs graph, ' 'this is not normal and suggests task manager degeneracy.')
|
||||
break
|
||||
wj_ids.add(wj.pk)
|
||||
ancestors.append(wj.workflow_job_template)
|
||||
|
||||
@@ -8,7 +8,7 @@ class CustomNotificationBase(object):
|
||||
|
||||
DEFAULT_APPROVAL_RUNNING_MSG = 'The approval node "{{ approval_node_name }}" needs review. This node can be viewed at: {{ workflow_url }}'
|
||||
DEFAULT_APPROVAL_RUNNING_BODY = (
|
||||
'The approval node "{{ approval_node_name }}" needs review. This approval node can be viewed at: {{ workflow_url }}\n\n{{ job_metadata }}'
|
||||
'The approval node "{{ approval_node_name }}" needs review. ' 'This approval node can be viewed at: {{ workflow_url }}\n\n{{ job_metadata }}'
|
||||
)
|
||||
|
||||
DEFAULT_APPROVAL_APPROVED_MSG = 'The approval node "{{ approval_node_name }}" was approved. {{ workflow_url }}'
|
||||
|
||||
@@ -32,7 +32,7 @@ class WebhookBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
"success": {"body": DEFAULT_BODY},
|
||||
"error": {"body": DEFAULT_BODY},
|
||||
"workflow_approval": {
|
||||
"running": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" needs review. This node can be viewed at: {{ workflow_url }}"}'},
|
||||
"running": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" needs review. ' 'This node can be viewed at: {{ workflow_url }}"}'},
|
||||
"approved": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" was approved. {{ workflow_url }}"}'},
|
||||
"timed_out": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" has timed out. {{ workflow_url }}"}'},
|
||||
"denied": {"body": '{"body": "The approval node \\"{{ approval_node_name }}\\" was denied. {{ workflow_url }}"}'},
|
||||
|
||||
@@ -28,7 +28,7 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
|
||||
@@ -8,7 +8,7 @@ from django.conf import settings
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
import codecs
|
||||
import datetime
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.common import log_excess_runtime
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
logger,
|
||||
debug_cutoff=0.01,
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
ansible_facts=host.ansible_facts,
|
||||
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
|
||||
job_id=job_id,
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
if hosts_to_update:
|
||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
||||
@@ -29,7 +29,7 @@ from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
STANDARD_INVENTORY_UPDATE_ENV,
|
||||
@@ -37,7 +37,6 @@ from awx.main.constants import (
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
HOST_FACTS_FIELDS,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
@@ -64,7 +63,6 @@ from awx.main.tasks.callback import (
|
||||
)
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.execution_environments import CONTAINER_ROOT, to_container_path
|
||||
@@ -317,22 +315,17 @@ class BaseTask(object):
|
||||
|
||||
return env
|
||||
|
||||
def write_inventory_file(self, inventory, private_data_dir, file_name, script_params):
|
||||
script_data = inventory.get_script_data(**script_params)
|
||||
for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items():
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map[hostname] = hv.get('remote_tower_id', '')
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, file_name, file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_inventory(self, instance, private_data_dir):
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
if hasattr(instance, 'job_slice_number'):
|
||||
script_params['slice_number'] = instance.job_slice_number
|
||||
script_params['slice_count'] = instance.job_slice_count
|
||||
|
||||
return self.write_inventory_file(instance.inventory, private_data_dir, 'hosts', script_params)
|
||||
script_data = instance.inventory.get_script_data(**script_params)
|
||||
# maintain a list of host_name --> host_id
|
||||
# so we can associate emitted events to Host objects
|
||||
self.runner_callback.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
|
||||
file_content = '#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json.dumps(script_data)
|
||||
return self.write_private_data_file(private_data_dir, 'hosts', file_content, sub_dir='inventory', file_permissions=0o700)
|
||||
|
||||
def build_args(self, instance, private_data_dir, passwords):
|
||||
raise NotImplementedError
|
||||
@@ -457,9 +450,6 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
@@ -558,8 +548,7 @@ class BaseTask(object):
|
||||
params['module'] = self.build_module_name(self.instance)
|
||||
params['module_args'] = self.build_module_args(self.instance)
|
||||
|
||||
# TODO: refactor into a better BasTask method
|
||||
if self.should_use_fact_cache():
|
||||
if getattr(self.instance, 'use_fact_cache', False):
|
||||
# Enable Ansible fact cache.
|
||||
params['fact_cache_type'] = 'jsonfile'
|
||||
else:
|
||||
@@ -806,7 +795,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@@ -1014,9 +1003,6 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return self.instance.use_fact_cache
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
|
||||
return job.playbook
|
||||
|
||||
@@ -1082,11 +1068,8 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
|
||||
# Fetch "cached" fact data from prior runs and put on the disk
|
||||
# where ansible expects to find it
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
if job.use_fact_cache:
|
||||
self.facts_write_time = self.instance.start_job_fact_cache(os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'))
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
self.sync_and_copy(job.project, private_data_dir, scm_branch=job.scm_branch)
|
||||
@@ -1100,14 +1083,10 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
return
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
if job.use_fact_cache:
|
||||
job.finish_job_fact_cache(
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
self.facts_write_time,
|
||||
)
|
||||
|
||||
def final_run_hook(self, job, status, private_data_dir):
|
||||
@@ -1121,7 +1100,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@@ -1443,7 +1422,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return params
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@@ -1490,6 +1469,8 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source == 'scm':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@@ -1542,22 +1523,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args = ['ansible-inventory', '--list', '--export']
|
||||
|
||||
# special case for constructed inventories, we pass source inventories from database
|
||||
# these must come in order, and in order _before_ the constructed inventory itself
|
||||
if inventory_update.inventory.kind == 'constructed':
|
||||
inventory_update.log_lifecycle("start_job_fact_cache")
|
||||
for input_inventory in inventory_update.inventory.input_inventories.all():
|
||||
args.append('-i')
|
||||
script_params = dict(hostvars=True, towervars=True)
|
||||
source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)
|
||||
args.append(to_container_path(source_inv_path, private_data_dir))
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
# Add arguments for the source inventory file/script/thing
|
||||
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
|
||||
container_location = os.path.join(CONTAINER_ROOT, rel_path)
|
||||
@@ -1565,11 +1530,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
args.append('-i')
|
||||
args.append(container_location)
|
||||
# Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596
|
||||
# limit should be usable in ansible-inventory 2.15+
|
||||
if inventory_update.limit:
|
||||
args.append('--limit')
|
||||
args.append(inventory_update.limit)
|
||||
|
||||
args.append('--output')
|
||||
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
|
||||
@@ -1585,9 +1545,6 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
|
||||
return args
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return bool(self.instance.source == 'constructed')
|
||||
|
||||
def build_inventory(self, inventory_update, private_data_dir):
|
||||
return None # what runner expects in order to not deal with inventory
|
||||
|
||||
@@ -1706,7 +1663,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -1859,7 +1816,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
@@ -28,7 +28,7 @@ from awx.main.utils.common import (
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch import get_local_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
# Receptorctl
|
||||
@@ -639,7 +639,7 @@ class AWXReceptorJob:
|
||||
#
|
||||
RECEPTOR_CONFIG_STARTER = (
|
||||
{'local-only': None},
|
||||
{'log-level': 'info'},
|
||||
{'log-level': 'debug'},
|
||||
{'node': {'firewallrules': [{'action': 'reject', 'tonode': settings.CLUSTER_HOST_ID, 'toservice': 'control'}]}},
|
||||
{'control-service': {'service': 'control', 'filename': '/var/run/receptor/receptor.sock', 'permissions': '0660'}},
|
||||
{'work-command': {'worktype': 'local', 'command': 'ansible-runner', 'params': 'worker', 'allowruntimeparams': True}},
|
||||
@@ -668,7 +668,6 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
'rootcas': '/etc/receptor/tls/ca/receptor-ca.crt',
|
||||
'cert': '/etc/receptor/tls/receptor.crt',
|
||||
'key': '/etc/receptor/tls/receptor.key',
|
||||
'mintls13': False,
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -713,7 +712,7 @@ def write_receptor_config():
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@@ -47,11 +47,10 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.dispatch import get_local_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
@@ -60,6 +59,7 @@ from awx.main.utils.common import (
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
|
||||
from awx.main.utils.external_logging import reconfigure_rsyslog
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
@@ -115,6 +115,9 @@ def dispatch_startup():
|
||||
m = Metrics()
|
||||
m.reset_values()
|
||||
|
||||
# Update Tower's rsyslog.conf file based on loggins settings in the db
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
try:
|
||||
@@ -129,7 +132,7 @@ def inform_cluster_of_shutdown():
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@@ -241,10 +244,8 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
@task(queue='tower_broadcast_all')
|
||||
def handle_setting_changes(setting_keys):
|
||||
orig_len = len(setting_keys)
|
||||
for i in range(orig_len):
|
||||
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
|
||||
@@ -253,6 +254,9 @@ def clear_setting_cache(setting_keys):
|
||||
logger.debug('cache delete_many(%r)', cache_keys)
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
|
||||
reconfigure_rsyslog()
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
@@ -282,7 +286,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -313,7 +317,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
logger.exception('Error saving notification {} result.'.format(notification.id))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
@@ -326,7 +330,7 @@ def gather_analytics():
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@@ -374,26 +378,12 @@ def handle_removed_image(remove_images=None):
|
||||
_cleanup_images_and_files(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
from awx.conf.models import Setting
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
||||
|
||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@@ -412,7 +402,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@@ -506,7 +496,7 @@ def inspect_execution_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task(queue=get_local_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
@@ -596,7 +586,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@@ -632,7 +622,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@@ -652,7 +642,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
|
||||
if acquired is False:
|
||||
@@ -718,7 +708,7 @@ def schedule_manager_success_or_error(instance):
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -730,7 +720,7 @@ def handle_work_success(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
@@ -770,7 +760,7 @@ def handle_work_error(task_actual):
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -811,7 +801,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -827,7 +817,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task(queue=get_local_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -892,9 +882,16 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
||||
@task(queue=get_local_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
|
||||
sub_obj_list = cache.get(uuid)
|
||||
if sub_obj_list is None:
|
||||
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
|
||||
return
|
||||
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
model = getattr(importlib.import_module(model_module), model_name, None)
|
||||
if model is None:
|
||||
@@ -906,28 +903,6 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning("Object or user no longer exists.")
|
||||
return
|
||||
|
||||
o2m_to_preserve = {}
|
||||
fields_to_preserve = set(getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []))
|
||||
|
||||
for field in model._meta.get_fields():
|
||||
if field.name in fields_to_preserve:
|
||||
if field.one_to_many:
|
||||
try:
|
||||
field_val = getattr(obj, field.name)
|
||||
except AttributeError:
|
||||
continue
|
||||
o2m_to_preserve[field.name] = field_val
|
||||
|
||||
sub_obj_list = []
|
||||
for o2m in o2m_to_preserve:
|
||||
for sub_obj in o2m_to_preserve[o2m].all():
|
||||
sub_model = type(sub_obj)
|
||||
sub_obj_list.append((sub_model.__module__, sub_model.__name__, sub_obj.pk))
|
||||
|
||||
from awx.api.generics import CopyAPIView
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
|
||||
copy_mapping = {}
|
||||
for sub_obj_setup in sub_obj_list:
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo"
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
@@ -9,4 +10,4 @@
|
||||
"CONTROLLER_USERNAME": "fooo",
|
||||
"CONTROLLER_OAUTH_TOKEN": "",
|
||||
"CONTROLLER_VERIFY_SSL": "False"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,8 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"AWS_SESSION_TOKEN": "fooo"
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"ANSIBLE_JINJA2_NATIVE": "True",
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GOOGLE_APPLICATION_CREDENTIALS": "{{ file_reference }}",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user